From efbc5dab19b8bb7d520eedf0fc7a77bc2ea3e026 Mon Sep 17 00:00:00 2001 From: Shahar Soel <4233843+bd82@users.noreply.github.com> Date: Fri, 26 Feb 2021 20:34:39 +0200 Subject: [PATCH] build: replace tslint with eslint (#1382) fixes #1271 --- .eslintrc.js | 69 +++ examples/grammars/css/css.js | 3 + examples/grammars/ecma5/ecma5_parser.js | 12 +- examples/grammars/graphql/graphql.js | 6 +- examples/grammars/tinyc/tinyc.js | 2 +- .../ecma6/ecma6_json.js | 2 +- examples/lexer/custom_errors/custom_errors.js | 4 +- .../keywords_vs_identifiers.js | 2 +- .../content_assist/content_assist_simple.js | 1 - .../parser/custom_errors/custom_errors.js | 3 +- .../step1_lexing/step1_lexing_spec.js | 1 - .../step2_parsing/step2_parsing_spec.js | 1 - .../step3_actions/step3_actions_spec.js | 1 - package.json | 16 +- .../diagrams/src/diagrams_builder.js | 2 +- packages/chevrotain/package.json | 4 +- .../chevrotain/scripts/update-api-docs.js | 5 +- packages/chevrotain/src/generate/generate.ts | 4 +- .../chevrotain/src/lang/lang_extensions.ts | 4 +- packages/chevrotain/src/parse/constants.ts | 2 +- .../chevrotain/src/parse/cst/cst_visitor.ts | 34 +- .../chevrotain/src/parse/errors_public.ts | 42 +- .../chevrotain/src/parse/grammar/checks.ts | 124 ++--- .../chevrotain/src/parse/grammar/first.ts | 4 +- .../chevrotain/src/parse/grammar/follow.ts | 14 +- .../chevrotain/src/parse/grammar/gast/gast.ts | 2 +- .../src/parse/grammar/gast/gast_public.ts | 4 +- .../src/parse/grammar/interpreter.ts | 96 ++-- .../chevrotain/src/parse/grammar/lookahead.ts | 97 ++-- .../chevrotain/src/parse/grammar/resolver.ts | 6 +- packages/chevrotain/src/parse/grammar/rest.ts | 22 +- .../chevrotain/src/parse/parser/parser.ts | 14 +- .../src/parse/parser/traits/context_assist.ts | 10 +- .../src/parse/parser/traits/error_handler.ts | 24 +- .../src/parse/parser/traits/looksahead.ts | 2 +- .../src/parse/parser/traits/recognizer_api.ts | 6 +- .../parse/parser/traits/recognizer_engine.ts | 83 +-- .../src/parse/parser/traits/recoverable.ts | 72 +-- .../src/parse/parser/traits/tree_builder.ts | 6 +- packages/chevrotain/src/scan/lexer.ts | 104 ++-- packages/chevrotain/src/scan/lexer_public.ts | 64 +-- packages/chevrotain/src/scan/tokens.ts | 4 +- packages/chevrotain/src/scan/tokens_public.ts | 4 +- packages/chevrotain/src/utils/utils.ts | 90 ++-- packages/chevrotain/test/all.ts | 2 +- .../backtracking/backtracking_parser_spec.ts | 14 +- .../test/full_flow/ecma_quirks/ecma_quirks.ts | 10 +- .../sql_statements/sql_recovery_parser.ts | 56 +- .../sql_statements/sql_recovery_spec.ts | 70 +-- .../switch_case/switchcase_recovery_parser.ts | 16 +- .../switch_case/swithcase_recovery_spec.ts | 48 +- .../chevrotain/test/full_flow/parse_tree.ts | 2 +- packages/chevrotain/test/parse/cst_spec.ts | 124 ++--- .../chevrotain/test/parse/cst_visitor_spec.ts | 30 +- .../chevrotain/test/parse/exceptions_spec.ts | 39 +- .../test/parse/grammar/checks_spec.ts | 84 +-- .../test/parse/grammar/first_spec.ts | 28 +- .../test/parse/grammar/follow_spec.ts | 20 +- .../test/parse/grammar/gast_spec.ts | 78 +-- .../test/parse/grammar/interperter_spec.ts | 170 +++--- .../test/parse/grammar/lookahead_spec.ts | 170 +++--- .../test/parse/grammar/resolver_spec.ts | 8 +- .../chevrotain/test/parse/grammar/samples.ts | 26 +- .../chevrotain/test/parse/predicate_spec.ts | 52 +- .../recognizer/recognizer_config_spec.ts | 4 +- .../parse/recognizer/rules_override_spec.ts | 8 +- .../test/parse/recognizer_lookahead_spec.ts | 284 +++++----- .../chevrotain/test/parse/recognizer_spec.ts | 182 ++++--- .../test/scan/lexer_errors_public_spec.ts | 2 +- packages/chevrotain/test/scan/lexer_spec.ts | 335 ++++++------ packages/chevrotain/test/scan/token_spec.ts | 40 +- packages/chevrotain/test/test.config.js | 2 + packages/chevrotain/test/text/range_spec.ts | 30 +- packages/chevrotain/test/utils/utils_spec.ts | 10 +- .../sanity/json_parser_spec.js | 2 +- tslint.json | 23 - yarn.lock | 508 ++++++++++++++++-- 77 files changed, 2023 insertions(+), 1526 deletions(-) create mode 100644 .eslintrc.js delete mode 100644 tslint.json diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 000000000..16dcf198c --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,69 @@ +module.exports = { + // Common settings for JS Files. + extends: ["plugin:eslint-comments/recommended", "prettier"], + env: { + es6: true, + commonjs: true, + mocha: true, + node: true + }, + globals: { + expect: true, + define: true, + window: true + }, + rules: { + "eslint-comments/require-description": ["error", { ignore: [] }] + }, + overrides: [ + { + // For pure-java script sub-packages and general scripts (in any package). + files: ["*.js"], + extends: ["eslint:recommended"], + excludedFiles: [ + "**/vendor/**/*.js", + "**/diagrams/**/*.js", + "**/benchmark_web/**/*.js" + ], + + parserOptions: { + // The `ecmaVersion` should align to the supported features of our target runtimes (browsers / nodejs / others) + // Consult with: https://kangax.github.io/compat-table/es2016plus/ + ecmaVersion: 2017 + } + }, + { + // For sub-packages using TypeScript (libraries/VSCode Exts) && TypeScript definitions (d.ts) + files: ["*.ts"], + plugins: ["@typescript-eslint"], + parser: "@typescript-eslint/parser", + parserOptions: { + // project: ["./tsconfig.base.json", "./tsconfig.json"], + }, + extends: [ + "plugin:@typescript-eslint/eslint-recommended" + // "plugin:@typescript-eslint/recommended-requiring-type-checking", + ], + rules: { + "@typescript-eslint/no-use-before-define": [ + "error", + // These can be safely used before they are defined due to function hoisting in EcmaScript + { functions: false, classes: false } + ] + // TODO: This rule configuration is very useful, attempt to apply it on the existing code base in the future + // "@typescript-eslint/ban-ts-comment": [ + // "error", + // { + // // We only allow ts-expect-error comments to enforce removal + // // of outdated suppression comments when the underlying issue has been resolved. + // // https://devblogs.microsoft.com/typescript/announcing-typescript-3-9/#what-about-ts-ignore + // "ts-expect-error": "allow-with-description", + // "ts-ignore": true, + // "ts-nocheck": true, + // "ts-check": true, + // }, + // ], + } + } + ] +} diff --git a/examples/grammars/css/css.js b/examples/grammars/css/css.js index 2a45bfed2..d407aa4f0 100644 --- a/examples/grammars/css/css.js +++ b/examples/grammars/css/css.js @@ -45,6 +45,7 @@ FRAGMENT("spaces", "[ \\t\\r\\n\\f]+") FRAGMENT("ident", "-?{{nmstart}}{{nmchar}}*") FRAGMENT("num", "[0-9]+|[0-9]*\\.[0-9]+") +/* eslint-disable no-unused-vars -- tokens are collected in our `createToken` wrapper */ const Whitespace = createToken({ name: "Whitespace", pattern: MAKE_PATTERN("{{spaces}}"), @@ -244,6 +245,8 @@ const Ident = createToken({ const Minus = createToken({ name: "Minus", pattern: /-/ }) +/* eslint-enable no-unused-vars -- tokens are collected in our `createToken` wrapper */ + const CssLexer = new Lexer(cssTokens) // ----------------- parser ----------------- diff --git a/examples/grammars/ecma5/ecma5_parser.js b/examples/grammars/ecma5/ecma5_parser.js index 26f974188..f2e8365af 100644 --- a/examples/grammars/ecma5/ecma5_parser.js +++ b/examples/grammars/ecma5/ecma5_parser.js @@ -1,6 +1,11 @@ "use strict" -const { EmbeddedActionsParser, EOF, tokenMatcher } = require("chevrotain") +const { + EmbeddedActionsParser, + EOF, + tokenMatcher, + MismatchedTokenException +} = require("chevrotain") const tokens = require("./ecma5_tokens") // for conciseness const t = tokens @@ -689,7 +694,7 @@ class ECMAScript5Parser extends EmbeddedActionsParser { // there is no danger of inRule recovery (single token insertion/deletion) // happening in this case because that type of recovery can only happen if CONSUME(...) was invoked. this.SAVE_ERROR( - new chevrotain.MismatchedTokenException( + new MismatchedTokenException( "Line Terminator not allowed before Expression in Throw Statement" // TODO: create line terminator token on the fly? ) @@ -865,8 +870,9 @@ class ECMAScript5Parser extends EmbeddedActionsParser { // the "IN" is only allowed if x is a left hand side expression // https://www.ecma-international.org/ecma-262/5.1/index.html#sec-12.6 // so this method must verify that the exp parameter fulfills this condition. + // eslint-disable-next-line no-unused-vars -- function signature canInComeAfterExp(exp) { - // TODO: temp implemntatoin, will always allow IN style iteration for now. + // TODO: temp implementation, will always allow IN style iteration for now. return true } diff --git a/examples/grammars/graphql/graphql.js b/examples/grammars/graphql/graphql.js index bce52101a..d0390c7d8 100644 --- a/examples/grammars/graphql/graphql.js +++ b/examples/grammars/graphql/graphql.js @@ -13,7 +13,7 @@ */ // wrapping in UMD to allow code to work both in node.js // and in the browser -;(function (root, factory) { +(function (root, factory) { if (typeof module === "object" && module.exports) { // Node. Does not work with strict CommonJS, but // only CommonJS-like environments that support module.exports, @@ -84,6 +84,8 @@ return newNotTokenCategory } + /* eslint-disable no-unused-vars -- tokens are collected in our `createToken` wrapper */ + // B1 - Ignored-Tokens // http://facebook.github.io/graphql/June2018/#sec-Appendix-Grammar-Summary.Ignored-Tokens const WhiteSpace = createToken({ @@ -116,6 +118,8 @@ group: Lexer.SKIPPED }) + /* eslint-enable no-unused-vars -- tokens are collected in our `createToken` wrapper */ + // B2 - Lexical Tokens // http://facebook.github.io/graphql/June2018/#sec-Appendix-Grammar-Summary.Lexical-Tokens // Punctuator diff --git a/examples/grammars/tinyc/tinyc.js b/examples/grammars/tinyc/tinyc.js index 5e52adbb6..09482250c 100644 --- a/examples/grammars/tinyc/tinyc.js +++ b/examples/grammars/tinyc/tinyc.js @@ -16,7 +16,7 @@ function createToken(options) { return newToken } -const WhiteSpace = createToken({ +createToken({ name: "WhiteSpace", pattern: /\s+/, group: Lexer.SKIPPED diff --git a/examples/implementation_languages/ecma6/ecma6_json.js b/examples/implementation_languages/ecma6/ecma6_json.js index acc9413d5..862c12f0f 100644 --- a/examples/implementation_languages/ecma6/ecma6_json.js +++ b/examples/implementation_languages/ecma6/ecma6_json.js @@ -47,7 +47,7 @@ const JsonLexer = new Lexer(allTokens) // ----------------- parser ----------------- class JsonParserES6 extends CstParser { - constructor(input) { + constructor() { super(allTokens) // not mandatory, using $ (or any other sign) to reduce verbosity (this. this. this. this. .......) diff --git a/examples/lexer/custom_errors/custom_errors.js b/examples/lexer/custom_errors/custom_errors.js index b236d973e..a0460a1af 100644 --- a/examples/lexer/custom_errors/custom_errors.js +++ b/examples/lexer/custom_errors/custom_errors.js @@ -10,13 +10,15 @@ const Whitespace = createToken({ }) // A link to the detailed API for the ILexerErrorMessageProvider can be found here: -// https://chevrotain.io/docs/features/custom_errors.htmlconst OyVeyErrorMessageProvider = { +// https://chevrotain.io/docs/features/custom_errors.html const OyVeyErrorMessageProvider = { buildUnexpectedCharactersMessage( fullText, startOffset, length, + // eslint-disable-next-line no-unused-vars -- template line, + // eslint-disable-next-line no-unused-vars -- template column ) { return ( diff --git a/examples/lexer/keywords_vs_identifiers/keywords_vs_identifiers.js b/examples/lexer/keywords_vs_identifiers/keywords_vs_identifiers.js index 6cbdb365d..fff32bdf2 100644 --- a/examples/lexer/keywords_vs_identifiers/keywords_vs_identifiers.js +++ b/examples/lexer/keywords_vs_identifiers/keywords_vs_identifiers.js @@ -39,7 +39,7 @@ const Whitespace = createToken({ group: Lexer.SKIPPED }) -keywordsVsIdentifiersLexer = new Lexer([ +const keywordsVsIdentifiersLexer = new Lexer([ Whitespace, // Whitespace is very common in most languages so placing it first generally speeds up the lexing. While, // the actual keywords (While/For/Do) must appear BEFORE the Identifier Token as they are all a valid prefix of it's PATTERN. diff --git a/examples/parser/content_assist/content_assist_simple.js b/examples/parser/content_assist/content_assist_simple.js index a3cde40b0..5b02df845 100644 --- a/examples/parser/content_assist/content_assist_simple.js +++ b/examples/parser/content_assist/content_assist_simple.js @@ -7,7 +7,6 @@ * "Public sta" --> ["static"] * "call f" --> ["foo"] // assuming foo is in the symbol table. */ -const _ = require("lodash") const { createToken, Lexer, CstParser } = require("chevrotain") const A = createToken({ name: "A", pattern: /A/ }) diff --git a/examples/parser/custom_errors/custom_errors.js b/examples/parser/custom_errors/custom_errors.js index 155b77def..3bf91e336 100644 --- a/examples/parser/custom_errors/custom_errors.js +++ b/examples/parser/custom_errors/custom_errors.js @@ -56,7 +56,7 @@ const myErrorProvider = { // ----------------- parser ----------------- class CustomErrorsParser extends CstParser { - constructor(input) { + constructor() { super(allTokens, { // passing our custom error message provider errorMessageProvider: myErrorProvider @@ -124,6 +124,7 @@ function parseStartingWithRule(ruleName) { // setting a new input will RESET the parser instance's state. parser.input = lexResult.tokens // just invoke which ever rule you want as the start rule. its all just plain javascript... + // eslint-disable-next-line no-unused-vars -- template const cst = parser[ruleName]() // we are only interested in the errors in this scenario. diff --git a/examples/tutorial/step1_lexing/step1_lexing_spec.js b/examples/tutorial/step1_lexing/step1_lexing_spec.js index c24effadd..2a8c05410 100644 --- a/examples/tutorial/step1_lexing/step1_lexing_spec.js +++ b/examples/tutorial/step1_lexing/step1_lexing_spec.js @@ -1,6 +1,5 @@ "use strict" const expect = require("chai").expect -const _ = require("lodash") const tokenMatcher = require("chevrotain").tokenMatcher const lex = require("./step1_lexing").lex const tokenVocabulary = require("./step1_lexing").tokenVocabulary diff --git a/examples/tutorial/step2_parsing/step2_parsing_spec.js b/examples/tutorial/step2_parsing/step2_parsing_spec.js index d5d7f682b..a269c5fe9 100644 --- a/examples/tutorial/step2_parsing/step2_parsing_spec.js +++ b/examples/tutorial/step2_parsing/step2_parsing_spec.js @@ -1,6 +1,5 @@ "use strict" const expect = require("chai").expect -const _ = require("lodash") const parse = require("./step2_parsing").parse describe("Chevrotain Tutorial", () => { diff --git a/examples/tutorial/step3_actions/step3_actions_spec.js b/examples/tutorial/step3_actions/step3_actions_spec.js index ed4c58db2..c7551cd69 100644 --- a/examples/tutorial/step3_actions/step3_actions_spec.js +++ b/examples/tutorial/step3_actions/step3_actions_spec.js @@ -1,6 +1,5 @@ "use strict" const expect = require("chai").expect -const _ = require("lodash") const toAstVisitor = require("./step3a_actions_visitor").toAst const toAstEmbedded = require("./step3b_actions_embedded").toAst diff --git a/package.json b/package.json index ab9429301..f5f19235f 100644 --- a/package.json +++ b/package.json @@ -10,11 +10,13 @@ "scripts": { "lerna:version": "yarn && lerna version", "lerna:publish": "lerna publish from-git --yes --no-verify-access", - "ci": "npm-run-all format:validate build test", + "ci": "npm-run-all format:validate lint:validate build test", "build": "lerna run build", "test": "lerna run test", "format:fix": "prettier --ignore-path .gitignore --write \"**/*.@(ts|js|json|md|yml)\"", - "format:validate": "prettier --ignore-path .gitignore --check \"**/*.@(ts|js|json|md|yml)\"" + "format:validate": "prettier --ignore-path .gitignore --check \"**/*.@(ts|js|json|md|yml)\"", + "lint:fix": "eslint . --ext=js,ts --fix --max-warnings=0 --ignore-path=.gitignore", + "lint:validate": "eslint . --ext=js,ts --max-warnings=0 --ignore-path=.gitignore" }, "prettier": { "endOfLine": "lf", @@ -30,6 +32,9 @@ "lint-staged": { "*.{ts,js,json,md}": [ "prettier --write" + ], + "*.{ts,js}": [ + "eslint --fix --max-warnings=0 --ignore-pattern=!.*" ] }, "config": { @@ -54,6 +59,11 @@ "shx": "^0.3.2", "cz-conventional-changelog": "3.3.0", "@commitlint/cli": "12.0.0", - "@commitlint/config-conventional": "12.0.0" + "@commitlint/config-conventional": "12.0.0", + "eslint": "7.18.0", + "@typescript-eslint/parser": "4.14.0", + "@typescript-eslint/eslint-plugin": "4.15.0", + "eslint-config-prettier": "7.2.0", + "eslint-plugin-eslint-comments": "3.2.0" } } diff --git a/packages/chevrotain/diagrams/src/diagrams_builder.js b/packages/chevrotain/diagrams/src/diagrams_builder.js index d0adc6ca2..73e81228b 100644 --- a/packages/chevrotain/diagrams/src/diagrams_builder.js +++ b/packages/chevrotain/diagrams/src/diagrams_builder.js @@ -20,7 +20,7 @@ var Optional = railroad.Optional var OneOrMore = railroad.OneOrMore var ZeroOrMore = railroad.ZeroOrMore - var Terminal = railroad.Terminal + // var Terminal = railroad.Terminal var NonTerminal = railroad.NonTerminal /** diff --git a/packages/chevrotain/package.json b/packages/chevrotain/package.json index d1a3785cd..b1ffe69c1 100644 --- a/packages/chevrotain/package.json +++ b/packages/chevrotain/package.json @@ -45,7 +45,7 @@ "homepage": "https://chevrotain.io/docs/", "scripts": { "---------- CI FLOWS --------": "", - "build": "npm-run-all clean lint compile build:esm dts api-site:build bundle", + "build": "npm-run-all clean compile build:esm dts api-site:build bundle", "build:esm": "npm-run-all clean:esm compile:esm", "test": "npm-run-all test:esm compile:def coverage check-coverage", "test:esm": "mocha \"./lib_esm/test/**/*spec.js\" --require esm", @@ -58,7 +58,6 @@ "---------- BUILD STEPS --------": "", "clean": "shx rm -rf coverage dev lib", "clean:esm": "shx rm -rf lib_esm", - "lint": "tslint --project tsconfig.json", "compile": "tsc && node ./scripts/fix-coverage-report.js", "compile:esm": "tsc --project tsconfig.esm.json", "compile:watch": "tsc -w", @@ -99,7 +98,6 @@ "sinon": "^9.0.0", "sinon-chai": "^3.0.0", "source-map-support": "0.5.19", - "tslint": "^6.0.0", "typedoc": "^0.20.28", "typescript": "4.2.2", "vuepress": "^1.4.1", diff --git a/packages/chevrotain/scripts/update-api-docs.js b/packages/chevrotain/scripts/update-api-docs.js index 26660dc47..f60380b98 100644 --- a/packages/chevrotain/scripts/update-api-docs.js +++ b/packages/chevrotain/scripts/update-api-docs.js @@ -1,8 +1,7 @@ -const _ = require("lodash") const fs = require("fs-extra") const path = require("path") -pkgPath = path.join(__dirname, "../package.json") +const pkgPath = path.join(__dirname, "../package.json") const pkg = fs.readJsonSync(pkgPath) console.log("updating api docs re-direct") @@ -15,7 +14,7 @@ const newVersionApiDocsDir = path.join( ) try { - stats = fs.lstatSync(newVersionApiDocsDir) + const stats = fs.lstatSync(newVersionApiDocsDir) if (stats.isDirectory()) { console.error("docs directory for " + noDotsVersion + " already exists") diff --git a/packages/chevrotain/src/generate/generate.ts b/packages/chevrotain/src/generate/generate.ts index 78c84fe6e..a68e8c527 100644 --- a/packages/chevrotain/src/generate/generate.ts +++ b/packages/chevrotain/src/generate/generate.ts @@ -59,7 +59,7 @@ return new ${options.name}(tokenVocabulary, config) export function genClass(options: { name: string; rules: Rule[] }): string { // TODO: how to pass the token vocabulary? Constructor? other? - let result = ` + const result = ` function ${options.name}(tokenVocabulary, config) { // invoke super constructor // No support for embedded actions currently, so we can 'hardcode' @@ -85,7 +85,7 @@ ${options.name}.prototype.constructor = ${options.name} } export function genAllRules(rules: Rule[]): string { - let rulesText = map(rules, (currRule) => { + const rulesText = map(rules, (currRule) => { return genRule(currRule, 1) }) diff --git a/packages/chevrotain/src/lang/lang_extensions.ts b/packages/chevrotain/src/lang/lang_extensions.ts index 0ac187d3d..b9536ab80 100644 --- a/packages/chevrotain/src/lang/lang_extensions.ts +++ b/packages/chevrotain/src/lang/lang_extensions.ts @@ -15,7 +15,7 @@ const NAME = "name" export function functionName(func: TokenType): string { // Engines that support Function.prototype.name OR the nth (n>1) time after // the name has been computed in the following else block. - let existingNameProp = (func).name + const existingNameProp = (func).name /* istanbul ignore else - too many hacks for IE/old versions of node.js here*/ if (existingNameProp) { return existingNameProp @@ -28,7 +28,7 @@ export function functionName(func: TokenType): string { * @returns {boolean} - has the property been successfully defined */ export function defineNameProp(obj, nameValue): boolean { - let namePropDescriptor = Object.getOwnPropertyDescriptor(obj, NAME) + const namePropDescriptor = Object.getOwnPropertyDescriptor(obj, NAME) /* istanbul ignore else -> will only run in old versions of node.js */ if (isUndefined(namePropDescriptor) || namePropDescriptor.configurable) { Object.defineProperty(obj, NAME, { diff --git a/packages/chevrotain/src/parse/constants.ts b/packages/chevrotain/src/parse/constants.ts index 83edc96ac..4365827ae 100644 --- a/packages/chevrotain/src/parse/constants.ts +++ b/packages/chevrotain/src/parse/constants.ts @@ -1,2 +1,2 @@ // TODO: can this be removed? where is it used? -export let IN = "_~IN~_" +export const IN = "_~IN~_" diff --git a/packages/chevrotain/src/parse/cst/cst_visitor.ts b/packages/chevrotain/src/parse/cst/cst_visitor.ts index 559942c72..8886a1df8 100644 --- a/packages/chevrotain/src/parse/cst/cst_visitor.ts +++ b/packages/chevrotain/src/parse/cst/cst_visitor.ts @@ -13,14 +13,14 @@ import { defineNameProp, functionName } from "../../lang/lang_extensions" import { ICstVisitor } from "../../../api" export function defaultVisit(ctx: any, param: IN): OUT { - let childrenNames = keys(ctx) - let childrenNamesLength = childrenNames.length + const childrenNames = keys(ctx) + const childrenNamesLength = childrenNames.length for (let i = 0; i < childrenNamesLength; i++) { - let currChildName = childrenNames[i] - let currChildArray = ctx[currChildName] - let currChildArrayLength = currChildArray.length + const currChildName = childrenNames[i] + const currChildArray = ctx[currChildName] + const currChildArrayLength = currChildArray.length for (let j = 0; j < currChildArrayLength; j++) { - let currChild: any = currChildArray[j] + const currChild: any = currChildArray[j] // distinction between Tokens Children and CstNode children if (currChild.tokenTypeIdx === undefined) { this[currChild.name](currChild.children, param) @@ -37,14 +37,14 @@ export function createBaseSemanticVisitorConstructor( ): { new (...args: any[]): ICstVisitor } { - let derivedConstructor: any = function () {} + const derivedConstructor: any = function () {} // can be overwritten according to: // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/ // name?redirectlocale=en-US&redirectslug=JavaScript%2FReference%2FGlobal_Objects%2FFunction%2Fname defineNameProp(derivedConstructor, grammarName + "BaseSemantics") - let semanticProto = { + const semanticProto = { visit: function (cstNode, param) { // enables writing more concise visitor methods when CstNode has only a single child if (isArray(cstNode)) { @@ -62,9 +62,9 @@ export function createBaseSemanticVisitorConstructor( }, validateVisitor: function () { - let semanticDefinitionErrors = validateVisitor(this, ruleNames) + const semanticDefinitionErrors = validateVisitor(this, ruleNames) if (!isEmpty(semanticDefinitionErrors)) { - let errorMessages = map( + const errorMessages = map( semanticDefinitionErrors, (currDefError) => currDefError.msg ) @@ -92,14 +92,14 @@ export function createBaseVisitorConstructorWithDefaults( ): { new (...args: any[]): ICstVisitor } { - let derivedConstructor: any = function () {} + const derivedConstructor: any = function () {} // can be overwritten according to: // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/ // name?redirectlocale=en-US&redirectslug=JavaScript%2FReference%2FGlobal_Objects%2FFunction%2Fname defineNameProp(derivedConstructor, grammarName + "BaseSemanticsWithDefaults") - let withDefaultsProto = Object.create(baseConstructor.prototype) + const withDefaultsProto = Object.create(baseConstructor.prototype) forEach(ruleNames, (ruleName) => { withDefaultsProto[ruleName] = defaultVisit }) @@ -125,8 +125,8 @@ export function validateVisitor( visitorInstance: Function, ruleNames: string[] ): IVisitorDefinitionError[] { - let missingErrors = validateMissingCstMethods(visitorInstance, ruleNames) - let redundantErrors = validateRedundantMethods(visitorInstance, ruleNames) + const missingErrors = validateMissingCstMethods(visitorInstance, ruleNames) + const redundantErrors = validateRedundantMethods(visitorInstance, ruleNames) return missingErrors.concat(redundantErrors) } @@ -135,7 +135,7 @@ export function validateMissingCstMethods( visitorInstance: Function, ruleNames: string[] ): IVisitorDefinitionError[] { - let errors: IVisitorDefinitionError[] = map(ruleNames, (currRuleName) => { + const errors: IVisitorDefinitionError[] = map(ruleNames, (currRuleName) => { if (!isFunction(visitorInstance[currRuleName])) { return { msg: `Missing visitor method: <${currRuleName}> on ${functionName( @@ -155,9 +155,9 @@ export function validateRedundantMethods( visitorInstance: Function, ruleNames: string[] ): IVisitorDefinitionError[] { - let errors = [] + const errors = [] - for (let prop in visitorInstance) { + for (const prop in visitorInstance) { if ( isFunction(visitorInstance[prop]) && !contains(VALID_PROP_NAMES, prop) && diff --git a/packages/chevrotain/src/parse/errors_public.ts b/packages/chevrotain/src/parse/errors_public.ts index 28f379805..15c1c5ebf 100644 --- a/packages/chevrotain/src/parse/errors_public.ts +++ b/packages/chevrotain/src/parse/errors_public.ts @@ -18,12 +18,12 @@ import { export const defaultParserErrorProvider: IParserErrorMessageProvider = { buildMismatchTokenMessage({ expected, actual, previous, ruleName }): string { - let hasLabel = hasTokenLabel(expected) - let expectedMsg = hasLabel + const hasLabel = hasTokenLabel(expected) + const expectedMsg = hasLabel ? `--> ${tokenLabel(expected)} <--` : `token of type --> ${expected.name} <--` - let msg = `Expecting ${expectedMsg} but found --> '${actual.image}' <--` + const msg = `Expecting ${expectedMsg} but found --> '${actual.image}' <--` return msg }, @@ -39,31 +39,31 @@ export const defaultParserErrorProvider: IParserErrorMessageProvider = { customUserDescription, ruleName }): string { - let errPrefix = "Expecting: " + const errPrefix = "Expecting: " // TODO: issue: No Viable Alternative Error may have incomplete details. #502 - let actualText = first(actual).image - let errSuffix = "\nbut found: '" + actualText + "'" + const actualText = first(actual).image + const errSuffix = "\nbut found: '" + actualText + "'" if (customUserDescription) { return errPrefix + customUserDescription + errSuffix } else { - let allLookAheadPaths = reduce( + const allLookAheadPaths = reduce( expectedPathsPerAlt, (result, currAltPaths) => result.concat(currAltPaths), [] ) - let nextValidTokenSequences = map( + const nextValidTokenSequences = map( allLookAheadPaths, (currPath) => `[${map(currPath, (currTokenType) => tokenLabel(currTokenType)).join( ", " )}]` ) - let nextValidSequenceItems = map( + const nextValidSequenceItems = map( nextValidTokenSequences, (itemMsg, idx) => ` ${idx + 1}. ${itemMsg}` ) - let calculatedDescription = `one of these possible Token sequences:\n${nextValidSequenceItems.join( + const calculatedDescription = `one of these possible Token sequences:\n${nextValidSequenceItems.join( "\n" )}` @@ -77,22 +77,22 @@ export const defaultParserErrorProvider: IParserErrorMessageProvider = { customUserDescription, ruleName }): string { - let errPrefix = "Expecting: " + const errPrefix = "Expecting: " // TODO: issue: No Viable Alternative Error may have incomplete details. #502 - let actualText = first(actual).image - let errSuffix = "\nbut found: '" + actualText + "'" + const actualText = first(actual).image + const errSuffix = "\nbut found: '" + actualText + "'" if (customUserDescription) { return errPrefix + customUserDescription + errSuffix } else { - let nextValidTokenSequences = map( + const nextValidTokenSequences = map( expectedIterationPaths, (currPath) => `[${map(currPath, (currTokenType) => tokenLabel(currTokenType)).join( "," )}]` ) - let calculatedDescription = + const calculatedDescription = `expecting at least one iteration which starts with one of these possible Token sequences::\n ` + `<${nextValidTokenSequences.join(" ,")}>` @@ -140,7 +140,7 @@ export const defaultGrammarValidatorErrorProvider: IGrammarValidatorErrorMessage const duplicateProd = first(duplicateProds) const index = duplicateProd.idx const dslName = getProductionDslName(duplicateProd) - let extraArgument = getExtraProductionArgument(duplicateProd) + const extraArgument = getExtraProductionArgument(duplicateProd) const hasExplicitIndex = index > 0 let msg = `->${dslName}${hasExplicitIndex ? index : ""}<- ${ @@ -199,10 +199,10 @@ export const defaultGrammarValidatorErrorProvider: IGrammarValidatorErrorMessage ambiguityIndices: number[] alternation: Alternation }): string { - let pathMsg = map(options.prefixPath, (currtok) => + const pathMsg = map(options.prefixPath, (currtok) => tokenLabel(currtok) ).join(", ") - let occurrence = + const occurrence = options.alternation.idx === 0 ? "" : options.alternation.idx let currMessage = `Ambiguous Alternatives Detected: <${options.ambiguityIndices.join( @@ -275,14 +275,14 @@ export const defaultGrammarValidatorErrorProvider: IGrammarValidatorErrorMessage leftRecursionPath: Rule[] }): string { const ruleName = options.topLevelRule.name - let pathNames = utils.map( + const pathNames = utils.map( options.leftRecursionPath, (currRule) => currRule.name ) - let leftRecursivePath = `${ruleName} --> ${pathNames + const leftRecursivePath = `${ruleName} --> ${pathNames .concat([ruleName]) .join(" --> ")}` - let errMsg = + const errMsg = `Left Recursion found in grammar.\n` + `rule: <${ruleName}> can be invoked from itself (directly or indirectly)\n` + `without consuming any Tokens. The grammar path that causes this is: \n ${leftRecursivePath}\n` + diff --git a/packages/chevrotain/src/parse/grammar/checks.ts b/packages/chevrotain/src/parse/grammar/checks.ts index 63bff454e..2275d4d79 100644 --- a/packages/chevrotain/src/parse/grammar/checks.ts +++ b/packages/chevrotain/src/parse/grammar/checks.ts @@ -54,10 +54,10 @@ export function validateGrammar( errMsgProvider: IGrammarValidatorErrorMessageProvider, grammarName: string ): IParserDefinitionError[] { - let duplicateErrors: any = utils.map(topLevels, (currTopLevel) => + const duplicateErrors: any = utils.map(topLevels, (currTopLevel) => validateDuplicateProductions(currTopLevel, errMsgProvider) ) - let leftRecursionErrors: any = utils.map(topLevels, (currTopRule) => + const leftRecursionErrors: any = utils.map(topLevels, (currTopRule) => validateNoLeftRecursion(currTopRule, currTopRule, errMsgProvider) ) @@ -86,7 +86,7 @@ export function validateGrammar( ) } - let termsNamespaceConflictErrors = checkTerminalAndNoneTerminalsNameSpace( + const termsNamespaceConflictErrors = checkTerminalAndNoneTerminalsNameSpace( topLevels, tokenTypes, errMsgProvider @@ -124,27 +124,27 @@ function validateDuplicateProductions( topLevelRule: Rule, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserDuplicatesDefinitionError[] { - let collectorVisitor = new OccurrenceValidationCollector() + const collectorVisitor = new OccurrenceValidationCollector() topLevelRule.accept(collectorVisitor) - let allRuleProductions = collectorVisitor.allProductions + const allRuleProductions = collectorVisitor.allProductions - let productionGroups = utils.groupBy( + const productionGroups = utils.groupBy( allRuleProductions, identifyProductionForDuplicates ) - let duplicates: any = utils.pick(productionGroups, (currGroup) => { + const duplicates: any = utils.pick(productionGroups, (currGroup) => { return currGroup.length > 1 }) - let errors = utils.map(utils.values(duplicates), (currDuplicates: any) => { - let firstProd: any = utils.first(currDuplicates) - let msg = errMsgProvider.buildDuplicateFoundError( + const errors = utils.map(utils.values(duplicates), (currDuplicates: any) => { + const firstProd: any = utils.first(currDuplicates) + const msg = errMsgProvider.buildDuplicateFoundError( topLevelRule, currDuplicates ) - let dslName = getProductionDslName(firstProd) - let defError: IParserDuplicatesDefinitionError = { + const dslName = getProductionDslName(firstProd) + const defError: IParserDuplicatesDefinitionError = { message: msg, type: ParserDefinitionErrorType.DUPLICATE_PRODUCTIONS, ruleName: topLevelRule.name, @@ -152,7 +152,7 @@ function validateDuplicateProductions( occurrence: firstProd.idx } - let param = getExtraProductionArgument(firstProd) + const param = getExtraProductionArgument(firstProd) if (param) { defError.parameter = param } @@ -224,7 +224,7 @@ export function validateRuleDoesNotAlreadyExist( className, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserDefinitionError[] { - let errors = [] + const errors = [] const occurrences = reduce( allRules, (result, curRule) => { @@ -258,7 +258,7 @@ export function validateRuleIsOverridden( definedRulesNames: string[], className ): IParserDefinitionError[] { - let errors = [] + const errors = [] let errMsg if (!utils.contains(definedRulesNames, ruleName)) { @@ -281,13 +281,13 @@ export function validateNoLeftRecursion( errMsgProvider: IGrammarValidatorErrorMessageProvider, path: Rule[] = [] ): IParserDefinitionError[] { - let errors = [] - let nextNonTerminals = getFirstNoneTerminal(currRule.definition) + const errors = [] + const nextNonTerminals = getFirstNoneTerminal(currRule.definition) if (utils.isEmpty(nextNonTerminals)) { return [] } else { - let ruleName = topRule.name - let foundLeftRecursion = utils.contains(nextNonTerminals, topRule) + const ruleName = topRule.name + const foundLeftRecursion = utils.contains(nextNonTerminals, topRule) if (foundLeftRecursion) { errors.push({ message: errMsgProvider.buildLeftRecursionError({ @@ -301,12 +301,12 @@ export function validateNoLeftRecursion( // we are only looking for cyclic paths leading back to the specific topRule // other cyclic paths are ignored, we still need this difference to avoid infinite loops... - let validNextSteps = utils.difference( + const validNextSteps = utils.difference( nextNonTerminals, path.concat([topRule]) ) - let errorsFromNextSteps = utils.map(validNextSteps, (currRefRule) => { - let newPath = utils.cloneArr(path) + const errorsFromNextSteps = utils.map(validNextSteps, (currRefRule) => { + const newPath = utils.cloneArr(path) newPath.push(currRefRule) return validateNoLeftRecursion( topRule, @@ -325,7 +325,7 @@ export function getFirstNoneTerminal(definition: IProduction[]): Rule[] { if (utils.isEmpty(definition)) { return result } - let firstProd = utils.first(definition) + const firstProd = utils.first(definition) /* istanbul ignore else */ if (firstProd instanceof NonTerminal) { @@ -354,10 +354,10 @@ export function getFirstNoneTerminal(definition: IProduction[]): Rule[] { throw Error("non exhaustive match") } - let isFirstOptional = isOptionalProd(firstProd) - let hasMore = definition.length > 1 + const isFirstOptional = isOptionalProd(firstProd) + const hasMore = definition.length > 1 if (isFirstOptional && hasMore) { - let rest = utils.drop(definition) + const rest = utils.drop(definition) return result.concat(getFirstNoneTerminal(rest)) } else { return result @@ -376,15 +376,15 @@ export function validateEmptyOrAlternative( topLevelRule: Rule, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserEmptyAlternativeDefinitionError[] { - let orCollector = new OrCollector() + const orCollector = new OrCollector() topLevelRule.accept(orCollector) - let ors = orCollector.alternations + const ors = orCollector.alternations - let errors = utils.reduce( + const errors = utils.reduce( ors, (errors, currOr) => { - let exceptLast = utils.dropRight(currOr.definition) - let currErrors = utils.map( + const exceptLast = utils.dropRight(currOr.definition) + const currErrors = utils.map( exceptLast, (currAlternative: IProduction, currAltIdx) => { const possibleFirstInAlt = nextPossibleTokensAfter( @@ -423,7 +423,7 @@ export function validateAmbiguousAlternationAlternatives( globalMaxLookahead: number, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserAmbiguousAlternativesDefinitionError[] { - let orCollector = new OrCollector() + const orCollector = new OrCollector() topLevelRule.accept(orCollector) let ors = orCollector.alternations @@ -431,24 +431,24 @@ export function validateAmbiguousAlternationAlternatives( // - https://github.com/chevrotain/chevrotain/issues/869 ors = reject(ors, (currOr) => currOr.ignoreAmbiguities === true) - let errors = utils.reduce( + const errors = utils.reduce( ors, (result, currOr: Alternation) => { - let currOccurrence = currOr.idx + const currOccurrence = currOr.idx const actualMaxLookahead = currOr.maxLookahead || globalMaxLookahead - let alternatives = getLookaheadPathsForOr( + const alternatives = getLookaheadPathsForOr( currOccurrence, topLevelRule, actualMaxLookahead, currOr ) - let altsAmbiguityErrors = checkAlternativesAmbiguities( + const altsAmbiguityErrors = checkAlternativesAmbiguities( alternatives, currOr, topLevelRule, errMsgProvider ) - let altsPrefixAmbiguityErrors = checkPrefixAlternativesAmbiguities( + const altsPrefixAmbiguityErrors = checkPrefixAlternativesAmbiguities( alternatives, currOr, topLevelRule, @@ -489,11 +489,11 @@ export function validateTooManyAlts( topLevelRule: Rule, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserDefinitionError[] { - let orCollector = new OrCollector() + const orCollector = new OrCollector() topLevelRule.accept(orCollector) - let ors = orCollector.alternations + const ors = orCollector.alternations - let errors = utils.reduce( + const errors = utils.reduce( ors, (errors, currOr) => { if (currOr.definition.length > 255) { @@ -520,22 +520,22 @@ export function validateSomeNonEmptyLookaheadPath( maxLookahead: number, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserDefinitionError[] { - let errors = [] + const errors = [] forEach(topLevelRules, (currTopRule) => { - let collectorVisitor = new RepetionCollector() + const collectorVisitor = new RepetionCollector() currTopRule.accept(collectorVisitor) - let allRuleProductions = collectorVisitor.allProductions + const allRuleProductions = collectorVisitor.allProductions forEach(allRuleProductions, (currProd) => { - let prodType = getProdType(currProd) + const prodType = getProdType(currProd) const actualMaxLookahead = currProd.maxLookahead || maxLookahead - let currOccurrence = currProd.idx - let paths = getLookaheadPathsForOptionalProd( + const currOccurrence = currProd.idx + const paths = getLookaheadPathsForOptionalProd( currOccurrence, currTopRule, prodType, actualMaxLookahead ) - let pathsInsideProduction = paths[0] + const pathsInsideProduction = paths[0] if (isEmpty(flatten(pathsInsideProduction))) { const errMsg = errMsgProvider.buildEmptyRepetitionError({ topLevelRule: currTopRule, @@ -564,8 +564,8 @@ function checkAlternativesAmbiguities( rule: Rule, errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserAmbiguousAlternativesDefinitionError[] { - let foundAmbiguousPaths = [] - let identicalAmbiguities = reduce( + const foundAmbiguousPaths = [] + const identicalAmbiguities = reduce( alternatives, (result, currAlt, currAltIdx) => { // ignore (skip) ambiguities with this alternative @@ -574,7 +574,7 @@ function checkAlternativesAmbiguities( } forEach(currAlt, (currPath) => { - let altsCurrPathAppearsIn = [currAltIdx] + const altsCurrPathAppearsIn = [currAltIdx] forEach(alternatives, (currOtherAlt, currOtherAltIdx) => { if ( currAltIdx !== currOtherAltIdx && @@ -602,8 +602,8 @@ function checkAlternativesAmbiguities( [] ) - let currErrors = utils.map(identicalAmbiguities, (currAmbDescriptor) => { - let ambgIndices = map( + const currErrors = utils.map(identicalAmbiguities, (currAmbDescriptor) => { + const ambgIndices = map( currAmbDescriptor.alts, (currAltIdx) => currAltIdx + 1 ) @@ -636,10 +636,10 @@ export function checkPrefixAlternativesAmbiguities( let errors = [] // flatten - let pathsAndIndices = reduce( + const pathsAndIndices = reduce( alternatives, (result, currAlt, idx) => { - let currPathsAndIdx = map(currAlt, (currPath) => { + const currPathsAndIdx = map(currAlt, (currPath) => { return { idx: idx, path: currPath } }) return result.concat(currPathsAndIdx) @@ -653,10 +653,10 @@ export function checkPrefixAlternativesAmbiguities( if (alternativeGast.ignoreAmbiguities === true) { return } - let targetIdx = currPathAndIdx.idx - let targetPath = currPathAndIdx.path + const targetIdx = currPathAndIdx.idx + const targetPath = currPathAndIdx.path - let prefixAmbiguitiesPathsAndIndices = findAll( + const prefixAmbiguitiesPathsAndIndices = findAll( pathsAndIndices, (searchPathAndIdx) => { // prefix ambiguity can only be created from lower idx (higher priority) path @@ -672,10 +672,10 @@ export function checkPrefixAlternativesAmbiguities( } ) - let currPathPrefixErrors = map( + const currPathPrefixErrors = map( prefixAmbiguitiesPathsAndIndices, (currAmbPathAndIdx) => { - let ambgIndices = [currAmbPathAndIdx.idx + 1, targetIdx + 1] + const ambgIndices = [currAmbPathAndIdx.idx + 1, targetIdx + 1] const occurrence = alternation.idx === 0 ? "" : alternation.idx const message = errMsgProvider.buildAlternationPrefixAmbiguityError({ @@ -704,14 +704,14 @@ function checkTerminalAndNoneTerminalsNameSpace( tokenTypes: TokenType[], errMsgProvider: IGrammarValidatorErrorMessageProvider ): IParserDefinitionError[] { - let errors = [] + const errors = [] - let tokenNames = map(tokenTypes, (currToken) => currToken.name) + const tokenNames = map(tokenTypes, (currToken) => currToken.name) forEach(topLevels, (currRule) => { const currRuleName = currRule.name if (contains(tokenNames, currRuleName)) { - let errMsg = errMsgProvider.buildNamespaceConflictError(currRule) + const errMsg = errMsgProvider.buildNamespaceConflictError(currRule) errors.push({ message: errMsg, diff --git a/packages/chevrotain/src/parse/grammar/first.ts b/packages/chevrotain/src/parse/grammar/first.ts index bc24703d6..c96868156 100644 --- a/packages/chevrotain/src/parse/grammar/first.ts +++ b/packages/chevrotain/src/parse/grammar/first.ts @@ -28,7 +28,7 @@ export function first(prod: IProduction): TokenType[] { export function firstForSequence(prod: AbstractProduction): TokenType[] { let firstSet: TokenType[] = [] - let seq = prod.definition + const seq = prod.definition let nextSubProdIdx = 0 let hasInnerProdsRemaining = seq.length > nextSubProdIdx let currSubProd @@ -47,7 +47,7 @@ export function firstForSequence(prod: AbstractProduction): TokenType[] { } export function firstForBranching(prod: AbstractProduction): TokenType[] { - let allAlternativesFirsts: TokenType[][] = map( + const allAlternativesFirsts: TokenType[][] = map( prod.definition, (innerProd) => { return first(innerProd) diff --git a/packages/chevrotain/src/parse/grammar/follow.ts b/packages/chevrotain/src/parse/grammar/follow.ts index f89aa0086..dbc5ef9a3 100644 --- a/packages/chevrotain/src/parse/grammar/follow.ts +++ b/packages/chevrotain/src/parse/grammar/follow.ts @@ -32,12 +32,12 @@ export class ResyncFollowsWalker extends RestWalker { currRest: IProduction[], prevRest: IProduction[] ): void { - let followName = + const followName = buildBetweenProdsFollowPrefix(refProd.referencedRule, refProd.idx) + this.topProd.name - let fullRest: IProduction[] = currRest.concat(prevRest) - let restProd = new Alternative({ definition: fullRest }) - let t_in_topProd_follows = first(restProd) + const fullRest: IProduction[] = currRest.concat(prevRest) + const restProd = new Alternative({ definition: fullRest }) + const t_in_topProd_follows = first(restProd) this.follows[followName] = t_in_topProd_follows } } @@ -45,10 +45,10 @@ export class ResyncFollowsWalker extends RestWalker { export function computeAllProdsFollows( topProductions: Rule[] ): Record { - let reSyncFollows = {} + const reSyncFollows = {} forEach(topProductions, (topProd) => { - let currRefsFollow = new ResyncFollowsWalker(topProd).startWalking() + const currRefsFollow = new ResyncFollowsWalker(topProd).startWalking() assign(reSyncFollows, currRefsFollow) }) return reSyncFollows @@ -62,6 +62,6 @@ export function buildBetweenProdsFollowPrefix( } export function buildInProdFollowPrefix(terminal: Terminal): string { - let terminalName = terminal.terminalType.name + const terminalName = terminal.terminalType.name return terminalName + terminal.idx + IN } diff --git a/packages/chevrotain/src/parse/grammar/gast/gast.ts b/packages/chevrotain/src/parse/grammar/gast/gast.ts index 810abf971..89dcaff99 100644 --- a/packages/chevrotain/src/parse/grammar/gast/gast.ts +++ b/packages/chevrotain/src/parse/grammar/gast/gast.ts @@ -32,7 +32,7 @@ export function isOptionalProd( prod: IProduction, alreadyVisited: NonTerminal[] = [] ): boolean { - let isDirectlyOptional = + const isDirectlyOptional = prod instanceof Option || prod instanceof Repetition || prod instanceof RepetitionWithSeparator diff --git a/packages/chevrotain/src/parse/grammar/gast/gast_public.ts b/packages/chevrotain/src/parse/grammar/gast/gast_public.ts index c015c5daf..7fd455108 100644 --- a/packages/chevrotain/src/parse/grammar/gast/gast_public.ts +++ b/packages/chevrotain/src/parse/grammar/gast/gast_public.ts @@ -343,14 +343,14 @@ export function serializeProduction(node: IProduction): ISerializedGast { definition: convertDefinition(node.definition) } } else if (node instanceof Terminal) { - let serializedTerminal = { + const serializedTerminal = { type: "Terminal", name: node.terminalType.name, label: tokenLabel(node.terminalType), idx: node.idx } - let pattern = node.terminalType.PATTERN + const pattern = node.terminalType.PATTERN if (node.terminalType.PATTERN) { serializedTerminal.pattern = isRegExp(pattern) ? (pattern).source diff --git a/packages/chevrotain/src/parse/grammar/interpreter.ts b/packages/chevrotain/src/parse/grammar/interpreter.ts index fb88682d0..793e8cffb 100644 --- a/packages/chevrotain/src/parse/grammar/interpreter.ts +++ b/packages/chevrotain/src/parse/grammar/interpreter.ts @@ -84,7 +84,7 @@ export abstract class AbstractNextPossibleTokensWalker extends RestWalker { refProd.referencedRule.name === this.nextProductionName && refProd.idx === this.nextProductionOccurrence ) { - let fullRest = currRest.concat(prevRest) + const fullRest = currRest.concat(prevRest) this.updateExpectedNext() this.walk(refProd.referencedRule, fullRest) } @@ -126,8 +126,8 @@ export class NextAfterTokenWalker extends AbstractNextPossibleTokensWalker { terminal.idx === this.nextTerminalOccurrence && !this.found ) { - let fullRest = currRest.concat(prevRest) - let restProd = new Alternative({ definition: fullRest }) + const fullRest = currRest.concat(prevRest) + const restProd = new Alternative({ definition: fullRest }) this.possibleTokTypes = first(restProd) this.found = true } @@ -170,7 +170,7 @@ export class NextTerminalAfterManyWalker extends AbstractNextTerminalAfterProduc prevRest: IProduction[] ): void { if (manyProd.idx === this.occurrence) { - let firstAfterMany = _first(currRest.concat(prevRest)) + const firstAfterMany = _first(currRest.concat(prevRest)) this.result.isEndOfRule = firstAfterMany === undefined if (firstAfterMany instanceof Terminal) { this.result.token = firstAfterMany.terminalType @@ -189,7 +189,7 @@ export class NextTerminalAfterManySepWalker extends AbstractNextTerminalAfterPro prevRest: IProduction[] ): void { if (manySepProd.idx === this.occurrence) { - let firstAfterManySep = _first(currRest.concat(prevRest)) + const firstAfterManySep = _first(currRest.concat(prevRest)) this.result.isEndOfRule = firstAfterManySep === undefined if (firstAfterManySep instanceof Terminal) { this.result.token = firstAfterManySep.terminalType @@ -208,7 +208,7 @@ export class NextTerminalAfterAtLeastOneWalker extends AbstractNextTerminalAfter prevRest: IProduction[] ): void { if (atLeastOneProd.idx === this.occurrence) { - let firstAfterAtLeastOne = _first(currRest.concat(prevRest)) + const firstAfterAtLeastOne = _first(currRest.concat(prevRest)) this.result.isEndOfRule = firstAfterAtLeastOne === undefined if (firstAfterAtLeastOne instanceof Terminal) { this.result.token = firstAfterAtLeastOne.terminalType @@ -228,7 +228,9 @@ export class NextTerminalAfterAtLeastOneSepWalker extends AbstractNextTerminalAf prevRest: IProduction[] ): void { if (atleastOneSepProd.idx === this.occurrence) { - let firstAfterfirstAfterAtLeastOneSep = _first(currRest.concat(prevRest)) + const firstAfterfirstAfterAtLeastOneSep = _first( + currRest.concat(prevRest) + ) this.result.isEndOfRule = firstAfterfirstAfterAtLeastOneSep === undefined if (firstAfterfirstAfterAtLeastOneSep instanceof Terminal) { this.result.token = firstAfterfirstAfterAtLeastOneSep.terminalType @@ -262,7 +264,7 @@ export function possiblePathsFrom( // TODO: avoid inner funcs function getAlternativesForProd(definition: IProduction[]) { - let alternatives = possiblePathsFrom( + const alternatives = possiblePathsFrom( remainingPathWith(definition), maxLength, currPath @@ -278,7 +280,7 @@ export function possiblePathsFrom( * the optional production. */ while (currPath.length < maxLength && i < targetDef.length) { - let prod = targetDef[i] + const prod = targetDef[i] /* istanbul ignore else */ if (prod instanceof Alternative) { @@ -366,11 +368,11 @@ export function nextPossibleTokensAfter( let foundCompletePath = false const tokenVectorLength = tokenVector.length - let minimalAlternativesIndex = tokenVectorLength - maxLookAhead - 1 + const minimalAlternativesIndex = tokenVectorLength - maxLookAhead - 1 - let result: ISyntacticContentAssistPath[] = [] + const result: ISyntacticContentAssistPath[] = [] - let possiblePaths: IPathToExamine[] = [] + const possiblePaths: IPathToExamine[] = [] possiblePaths.push({ idx: -1, def: initialDef, @@ -379,7 +381,7 @@ export function nextPossibleTokensAfter( }) while (!isEmpty(possiblePaths)) { - let currPath = possiblePaths.pop() + const currPath = possiblePaths.pop() // skip alternatives if no more results can be found (assuming deterministic grammar with fixed lookahead) if (currPath === EXIT_ALTERNATIVE) { @@ -393,20 +395,20 @@ export function nextPossibleTokensAfter( continue } - let currDef = currPath.def - let currIdx = currPath.idx - let currRuleStack = currPath.ruleStack - let currOccurrenceStack = currPath.occurrenceStack + const currDef = currPath.def + const currIdx = currPath.idx + const currRuleStack = currPath.ruleStack + const currOccurrenceStack = currPath.occurrenceStack // For Example: an empty path could exist in a valid grammar in the case of an EMPTY_ALT if (isEmpty(currDef)) { continue } - let prod = currDef[0] + const prod = currDef[0] /* istanbul ignore else */ if (prod === EXIT_NON_TERMINAL) { - let nextPath = { + const nextPath = { idx: currIdx, def: drop(currDef), ruleStack: dropRight(currRuleStack), @@ -416,10 +418,10 @@ export function nextPossibleTokensAfter( } else if (prod instanceof Terminal) { /* istanbul ignore else */ if (currIdx < tokenVectorLength - 1) { - let nextIdx = currIdx + 1 - let actualToken = tokenVector[nextIdx] + const nextIdx = currIdx + 1 + const actualToken = tokenVector[nextIdx] if (tokMatcher(actualToken, prod.terminalType)) { - let nextPath = { + const nextPath = { idx: nextIdx, def: drop(currDef), ruleStack: currRuleStack, @@ -441,13 +443,13 @@ export function nextPossibleTokensAfter( throw Error("non exhaustive match") } } else if (prod instanceof NonTerminal) { - let newRuleStack = cloneArr(currRuleStack) + const newRuleStack = cloneArr(currRuleStack) newRuleStack.push(prod.nonTerminalName) - let newOccurrenceStack = cloneArr(currOccurrenceStack) + const newOccurrenceStack = cloneArr(currOccurrenceStack) newOccurrenceStack.push(prod.idx) - let nextPath = { + const nextPath = { idx: currIdx, def: prod.definition.concat(EXIT_NON_TERMINAL_ARR, drop(currDef)), ruleStack: newRuleStack, @@ -456,7 +458,7 @@ export function nextPossibleTokensAfter( possiblePaths.push(nextPath) } else if (prod instanceof Option) { // the order of alternatives is meaningful, FILO (Last path will be traversed first). - let nextPathWithout = { + const nextPathWithout = { idx: currIdx, def: drop(currDef), ruleStack: currRuleStack, @@ -466,7 +468,7 @@ export function nextPossibleTokensAfter( // required marker to avoid backtracking paths whose higher priority alternatives already matched possiblePaths.push(EXIT_ALTERNATIVE) - let nextPathWith = { + const nextPathWith = { idx: currIdx, def: prod.definition.concat(drop(currDef)), ruleStack: currRuleStack, @@ -475,12 +477,12 @@ export function nextPossibleTokensAfter( possiblePaths.push(nextPathWith) } else if (prod instanceof RepetitionMandatory) { // TODO:(THE NEW operators here take a while...) (convert once?) - let secondIteration = new Repetition({ + const secondIteration = new Repetition({ definition: prod.definition, idx: prod.idx }) - let nextDef = prod.definition.concat([secondIteration], drop(currDef)) - let nextPath = { + const nextDef = prod.definition.concat([secondIteration], drop(currDef)) + const nextPath = { idx: currIdx, def: nextDef, ruleStack: currRuleStack, @@ -489,15 +491,15 @@ export function nextPossibleTokensAfter( possiblePaths.push(nextPath) } else if (prod instanceof RepetitionMandatoryWithSeparator) { // TODO:(THE NEW operators here take a while...) (convert once?) - let separatorGast = new Terminal({ + const separatorGast = new Terminal({ terminalType: prod.separator }) - let secondIteration = new Repetition({ + const secondIteration = new Repetition({ definition: [separatorGast].concat(prod.definition), idx: prod.idx }) - let nextDef = prod.definition.concat([secondIteration], drop(currDef)) - let nextPath = { + const nextDef = prod.definition.concat([secondIteration], drop(currDef)) + const nextPath = { idx: currIdx, def: nextDef, ruleStack: currRuleStack, @@ -506,7 +508,7 @@ export function nextPossibleTokensAfter( possiblePaths.push(nextPath) } else if (prod instanceof RepetitionWithSeparator) { // the order of alternatives is meaningful, FILO (Last path will be traversed first). - let nextPathWithout = { + const nextPathWithout = { idx: currIdx, def: drop(currDef), ruleStack: currRuleStack, @@ -516,15 +518,15 @@ export function nextPossibleTokensAfter( // required marker to avoid backtracking paths whose higher priority alternatives already matched possiblePaths.push(EXIT_ALTERNATIVE) - let separatorGast = new Terminal({ + const separatorGast = new Terminal({ terminalType: prod.separator }) - let nthRepetition = new Repetition({ + const nthRepetition = new Repetition({ definition: [separatorGast].concat(prod.definition), idx: prod.idx }) - let nextDef = prod.definition.concat([nthRepetition], drop(currDef)) - let nextPathWith = { + const nextDef = prod.definition.concat([nthRepetition], drop(currDef)) + const nextPathWith = { idx: currIdx, def: nextDef, ruleStack: currRuleStack, @@ -533,7 +535,7 @@ export function nextPossibleTokensAfter( possiblePaths.push(nextPathWith) } else if (prod instanceof Repetition) { // the order of alternatives is meaningful, FILO (Last path will be traversed first). - let nextPathWithout = { + const nextPathWithout = { idx: currIdx, def: drop(currDef), ruleStack: currRuleStack, @@ -544,12 +546,12 @@ export function nextPossibleTokensAfter( possiblePaths.push(EXIT_ALTERNATIVE) // TODO: an empty repetition will cause infinite loops here, will the parser detect this in selfAnalysis? - let nthRepetition = new Repetition({ + const nthRepetition = new Repetition({ definition: prod.definition, idx: prod.idx }) - let nextDef = prod.definition.concat([nthRepetition], drop(currDef)) - let nextPathWith = { + const nextDef = prod.definition.concat([nthRepetition], drop(currDef)) + const nextPathWith = { idx: currIdx, def: nextDef, ruleStack: currRuleStack, @@ -559,8 +561,8 @@ export function nextPossibleTokensAfter( } else if (prod instanceof Alternation) { // the order of alternatives is meaningful, FILO (Last path will be traversed first). for (let i = prod.definition.length - 1; i >= 0; i--) { - let currAlt: any = prod.definition[i] - let currAltPath = { + const currAlt: any = prod.definition[i] + const currAltPath = { idx: currIdx, def: currAlt.definition.concat(drop(currDef)), ruleStack: currRuleStack, @@ -594,10 +596,10 @@ function expandTopLevelRule( currRuleStack: string[], currOccurrenceStack: number[] ): IPathToExamine { - let newRuleStack = cloneArr(currRuleStack) + const newRuleStack = cloneArr(currRuleStack) newRuleStack.push(topRule.name) - let newCurrOccurrenceStack = cloneArr(currOccurrenceStack) + const newCurrOccurrenceStack = cloneArr(currOccurrenceStack) // top rule is always assumed to have been called with occurrence index 1 newCurrOccurrenceStack.push(1) diff --git a/packages/chevrotain/src/parse/grammar/lookahead.ts b/packages/chevrotain/src/parse/grammar/lookahead.ts index f1fa42f07..8acec062b 100644 --- a/packages/chevrotain/src/parse/grammar/lookahead.ts +++ b/packages/chevrotain/src/parse/grammar/lookahead.ts @@ -69,7 +69,7 @@ export function buildLookaheadFuncForOr( dynamicTokensEnabled: boolean, laFuncBuilder: Function ): (orAlts?: IOrAlt[]) => number { - let lookAheadPaths = getLookaheadPathsForOr( + const lookAheadPaths = getLookaheadPathsForOr( occurrence, ruleGrammar, maxLookahead @@ -107,7 +107,7 @@ export function buildLookaheadFuncForOptionalProd( prodType: PROD_TYPE, lookaheadBuilder: (lookAheadSequence, TokenMatcher, boolean) => () => boolean ): () => boolean { - let lookAheadPaths = getLookaheadPathsForOptionalProd( + const lookAheadPaths = getLookaheadPathsForOptionalProd( occurrence, ruleGrammar, prodType, @@ -129,8 +129,8 @@ export function buildAlternativesLookAheadFunc( tokenMatcher: TokenMatcher, dynamicTokensEnabled: boolean ): (orAlts?: IOrAlt[]) => number { - let numOfAlts = alts.length - let areAllOneTokenLookahead = every(alts, (currAlt) => { + const numOfAlts = alts.length + const areAllOneTokenLookahead = every(alts, (currAlt) => { return every(currAlt, (currPath) => { return currPath.length === 1 }) @@ -145,22 +145,22 @@ export function buildAlternativesLookAheadFunc( // unfortunately the predicates must be extracted every single time // as they cannot be cached due to references to parameters(vars) which are no longer valid. // note that in the common case of no predicates, no cpu time will be wasted on this (see else block) - let predicates: Predicate[] = map(orAlts, (currAlt) => currAlt.GATE) + const predicates: Predicate[] = map(orAlts, (currAlt) => currAlt.GATE) for (let t = 0; t < numOfAlts; t++) { - let currAlt = alts[t] - let currNumOfPaths = currAlt.length + const currAlt = alts[t] + const currNumOfPaths = currAlt.length - let currPredicate = predicates[t] + const currPredicate = predicates[t] if (currPredicate !== undefined && currPredicate.call(this) === false) { // if the predicate does not match there is no point in checking the paths continue } nextPath: for (let j = 0; j < currNumOfPaths; j++) { - let currPath = currAlt[j] - let currPathLength = currPath.length + const currPath = currAlt[j] + const currPathLength = currPath.length for (let i = 0; i < currPathLength; i++) { - let nextToken = this.LA(i + 1) + const nextToken = this.LA(i + 1) if (tokenMatcher(nextToken, currPath[i]) === false) { // mismatch in current path // try the next pth @@ -180,11 +180,11 @@ export function buildAlternativesLookAheadFunc( } else if (areAllOneTokenLookahead && !dynamicTokensEnabled) { // optimized (common) case of all the lookaheads paths requiring only // a single token lookahead. These Optimizations cannot work if dynamically defined Tokens are used. - let singleTokenAlts = map(alts, (currAlt) => { + const singleTokenAlts = map(alts, (currAlt) => { return flatten(currAlt) }) - let choiceToAlt = reduce( + const choiceToAlt = reduce( singleTokenAlts, (result, currAlt, idx) => { forEach(currAlt, (currTokType) => { @@ -206,7 +206,7 @@ export function buildAlternativesLookAheadFunc( * @returns {number} - The chosen alternative index */ return function (): number { - let nextToken = this.LA(1) + const nextToken = this.LA(1) return choiceToAlt[nextToken.tokenTypeIdx] } } else { @@ -217,13 +217,13 @@ export function buildAlternativesLookAheadFunc( */ return function (): number { for (let t = 0; t < numOfAlts; t++) { - let currAlt = alts[t] - let currNumOfPaths = currAlt.length + const currAlt = alts[t] + const currNumOfPaths = currAlt.length nextPath: for (let j = 0; j < currNumOfPaths; j++) { - let currPath = currAlt[j] - let currPathLength = currPath.length + const currPath = currAlt[j] + const currPathLength = currPath.length for (let i = 0; i < currPathLength; i++) { - let nextToken = this.LA(i + 1) + const nextToken = this.LA(i + 1) if (tokenMatcher(nextToken, currPath[i]) === false) { // mismatch in current path // try the next pth @@ -248,29 +248,29 @@ export function buildSingleAlternativeLookaheadFunction( tokenMatcher: TokenMatcher, dynamicTokensEnabled: boolean ): () => boolean { - let areAllOneTokenLookahead = every(alt, (currPath) => { + const areAllOneTokenLookahead = every(alt, (currPath) => { return currPath.length === 1 }) - let numOfPaths = alt.length + const numOfPaths = alt.length // optimized (common) case of all the lookaheads paths requiring only // a single token lookahead. if (areAllOneTokenLookahead && !dynamicTokensEnabled) { - let singleTokensTypes = flatten(alt) + const singleTokensTypes = flatten(alt) if ( singleTokensTypes.length === 1 && isEmpty((singleTokensTypes[0]).categoryMatches) ) { - let expectedTokenType = singleTokensTypes[0] - let expectedTokenUniqueKey = (expectedTokenType).tokenTypeIdx + const expectedTokenType = singleTokensTypes[0] + const expectedTokenUniqueKey = (expectedTokenType).tokenTypeIdx return function (): boolean { return this.LA(1).tokenTypeIdx === expectedTokenUniqueKey } } else { - let choiceToAlt = reduce( + const choiceToAlt = reduce( singleTokensTypes, (result, currTokType, idx) => { result[currTokType.tokenTypeIdx] = true @@ -283,17 +283,17 @@ export function buildSingleAlternativeLookaheadFunction( ) return function (): boolean { - let nextToken = this.LA(1) + const nextToken = this.LA(1) return choiceToAlt[nextToken.tokenTypeIdx] === true } } } else { return function (): boolean { nextPath: for (let j = 0; j < numOfPaths; j++) { - let currPath = alt[j] - let currPathLength = currPath.length + const currPath = alt[j] + const currPathLength = currPath.length for (let i = 0; i < currPathLength; i++) { - let nextToken = this.LA(i + 1) + const nextToken = this.LA(i + 1) if (tokenMatcher(nextToken, currPath[i]) === false) { // mismatch in current path // try the next pth @@ -472,7 +472,7 @@ class InsideDefinitionFinderVisitor extends GAstVisitor { } function initializeArrayOfArrays(size): any[][] { - let result = new Array(size) + const result = new Array(size) for (let i = 0; i < size; i++) { result[i] = [] } @@ -488,7 +488,7 @@ function pathToHashKeys(path: TokenType[]): string[] { let keys = [""] for (let i = 0; i < path.length; i++) { const tokType = path[i] - let longerKeys = [] + const longerKeys = [] for (let j = 0; j < keys.length; j++) { const currShorterKey = keys[j] longerKeys.push(currShorterKey + "_" + tokType.tokenTypeIdx) @@ -535,8 +535,10 @@ export function lookAheadSequenceFromAlternatives( altsDefs: IProduction[], k: number ): lookAheadSequence[] { - let partialAlts = map(altsDefs, (currAlt) => possiblePathsFrom([currAlt], 1)) - let finalResult = initializeArrayOfArrays(partialAlts.length) + const partialAlts = map(altsDefs, (currAlt) => + possiblePathsFrom([currAlt], 1) + ) + const finalResult = initializeArrayOfArrays(partialAlts.length) const altsHashes = map(partialAlts, (currAltPaths) => { const dict = {} forEach(currAltPaths, (item) => { @@ -551,25 +553,25 @@ export function lookAheadSequenceFromAlternatives( // maxLookahead loop for (let pathLength = 1; pathLength <= k; pathLength++) { - let currDataset = newData + const currDataset = newData newData = initializeArrayOfArrays(currDataset.length) // alternatives loop for (let altIdx = 0; altIdx < currDataset.length; altIdx++) { - let currAltPathsAndSuffixes = currDataset[altIdx] + const currAltPathsAndSuffixes = currDataset[altIdx] // paths in current alternative loop for ( let currPathIdx = 0; currPathIdx < currAltPathsAndSuffixes.length; currPathIdx++ ) { - let currPathPrefix = currAltPathsAndSuffixes[currPathIdx].partialPath - let suffixDef = currAltPathsAndSuffixes[currPathIdx].suffixDef + const currPathPrefix = currAltPathsAndSuffixes[currPathIdx].partialPath + const suffixDef = currAltPathsAndSuffixes[currPathIdx].suffixDef const prefixKeys = pathToHashKeys(currPathPrefix) - let isUnique = isUniquePrefixHash(altsHashes, prefixKeys, altIdx) + const isUnique = isUniquePrefixHash(altsHashes, prefixKeys, altIdx) // End of the line for this path. if (isUnique || isEmpty(suffixDef) || currPathPrefix.length === k) { - let currAltResult = finalResult[altIdx] + const currAltResult = finalResult[altIdx] // TODO: Can we implement a containsPath using Maps/Dictionaries? if (containsPath(currAltResult, currPathPrefix) === false) { currAltResult.push(currPathPrefix) @@ -582,7 +584,7 @@ export function lookAheadSequenceFromAlternatives( } // Expand longer paths else { - let newPartialPathsAndSuffixes = possiblePathsFrom( + const newPartialPathsAndSuffixes = possiblePathsFrom( suffixDef, pathLength + 1, currPathPrefix @@ -625,19 +627,22 @@ export function getLookaheadPathsForOptionalProd( prodType: PROD_TYPE, k: number ): lookAheadSequence[] { - let insideDefVisitor = new InsideDefinitionFinderVisitor(occurrence, prodType) + const insideDefVisitor = new InsideDefinitionFinderVisitor( + occurrence, + prodType + ) ruleGrammar.accept(insideDefVisitor) - let insideDef = insideDefVisitor.result + const insideDef = insideDefVisitor.result - let afterDefWalker = new RestDefinitionFinderWalker( + const afterDefWalker = new RestDefinitionFinderWalker( ruleGrammar, occurrence, prodType ) - let afterDef = afterDefWalker.startWalking() + const afterDef = afterDefWalker.startWalking() - let insideFlat = new AlternativeGAST({ definition: insideDef }) - let afterFlat = new AlternativeGAST({ definition: afterDef }) + const insideFlat = new AlternativeGAST({ definition: insideDef }) + const afterFlat = new AlternativeGAST({ definition: afterDef }) return lookAheadSequenceFromAlternatives([insideFlat, afterFlat], k) } diff --git a/packages/chevrotain/src/parse/grammar/resolver.ts b/packages/chevrotain/src/parse/grammar/resolver.ts index 314a161ee..978e663ca 100644 --- a/packages/chevrotain/src/parse/grammar/resolver.ts +++ b/packages/chevrotain/src/parse/grammar/resolver.ts @@ -14,7 +14,7 @@ export function resolveGrammar( topLevels: Record, errMsgProvider: IGrammarResolverErrorMessageProvider ): IParserDefinitionError[] { - let refResolver = new GastRefResolverVisitor(topLevels, errMsgProvider) + const refResolver = new GastRefResolverVisitor(topLevels, errMsgProvider) refResolver.resolveRefs() return refResolver.errors } @@ -38,10 +38,10 @@ export class GastRefResolverVisitor extends GAstVisitor { } public visitNonTerminal(node: NonTerminal): void { - let ref = this.nameToTopRule[node.nonTerminalName] + const ref = this.nameToTopRule[node.nonTerminalName] if (!ref) { - let msg = this.errMsgProvider.buildRuleNotFoundError( + const msg = this.errMsgProvider.buildRuleNotFoundError( this.currTopLevel, node ) diff --git a/packages/chevrotain/src/parse/grammar/rest.ts b/packages/chevrotain/src/parse/grammar/rest.ts index 3c81ab670..76f7019cd 100644 --- a/packages/chevrotain/src/parse/grammar/rest.ts +++ b/packages/chevrotain/src/parse/grammar/rest.ts @@ -19,7 +19,7 @@ import { IProduction } from "../../../api" export abstract class RestWalker { walk(prod: AbstractProduction, prevRest: any[] = []): void { forEach(prod.definition, (subProd: IProduction, index) => { - let currRest = drop(prod.definition, index + 1) + const currRest = drop(prod.definition, index + 1) /* istanbul ignore else */ if (subProd instanceof NonTerminal) { this.walkProdRef(subProd, currRest, prevRest) @@ -63,7 +63,7 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABCDEF => after the D the rest is EF - let fullOrRest = currRest.concat(prevRest) + const fullOrRest = currRest.concat(prevRest) this.walk(flatProd, fullOrRest) } @@ -73,7 +73,7 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABC(DE)?F => after the (DE)? the rest is F - let fullOrRest = currRest.concat(prevRest) + const fullOrRest = currRest.concat(prevRest) this.walk(optionProd, fullOrRest) } @@ -83,7 +83,7 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABC(DE)+F => after the (DE)+ the rest is (DE)?F - let fullAtLeastOneRest: IProduction[] = [ + const fullAtLeastOneRest: IProduction[] = [ new Option({ definition: atLeastOneProd.definition }) ].concat(currRest, prevRest) this.walk(atLeastOneProd, fullAtLeastOneRest) @@ -95,7 +95,7 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABC DE(,DE)* F => after the (,DE)+ the rest is (,DE)?F - let fullAtLeastOneSepRest = restForRepetitionWithSeparator( + const fullAtLeastOneSepRest = restForRepetitionWithSeparator( atLeastOneSepProd, currRest, prevRest @@ -109,7 +109,7 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABC(DE)*F => after the (DE)* the rest is (DE)?F - let fullManyRest: IProduction[] = [ + const fullManyRest: IProduction[] = [ new Option({ definition: manyProd.definition }) ].concat(currRest, prevRest) this.walk(manyProd, fullManyRest) @@ -121,7 +121,7 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABC (DE(,DE)*)? F => after the (,DE)* the rest is (,DE)?F - let fullManySepRest = restForRepetitionWithSeparator( + const fullManySepRest = restForRepetitionWithSeparator( manySepProd, currRest, prevRest @@ -135,27 +135,27 @@ export abstract class RestWalker { prevRest: IProduction[] ): void { // ABC(D|E|F)G => when finding the (D|E|F) the rest is G - let fullOrRest = currRest.concat(prevRest) + const fullOrRest = currRest.concat(prevRest) // walk all different alternatives forEach(orProd.definition, (alt) => { // wrapping each alternative in a single definition wrapper // to avoid errors in computing the rest of that alternative in the invocation to computeInProdFollows // (otherwise for OR([alt1,alt2]) alt2 will be considered in 'rest' of alt1 - let prodWrapper = new Alternative({ definition: [alt] }) + const prodWrapper = new Alternative({ definition: [alt] }) this.walk(prodWrapper, fullOrRest) }) } } function restForRepetitionWithSeparator(repSepProd, currRest, prevRest) { - let repSepRest = [ + const repSepRest = [ new Option({ definition: [new Terminal({ terminalType: repSepProd.separator })].concat( repSepProd.definition ) }) ] - let fullRepSepRest: IProduction[] = repSepRest.concat( + const fullRepSepRest: IProduction[] = repSepRest.concat( currRest, prevRest ) diff --git a/packages/chevrotain/src/parse/parser/parser.ts b/packages/chevrotain/src/parse/parser/parser.ts index f69ba9184..a718cb00c 100644 --- a/packages/chevrotain/src/parse/parser/parser.ts +++ b/packages/chevrotain/src/parse/parser/parser.ts @@ -152,7 +152,7 @@ export class Parser { let defErrorsMsgs this.selfAnalysisDone = true - let className = this.className + const className = this.className this.TRACE_INIT("toFastProps", () => { // Without this voodoo magic the parser would be x3-x4 slower @@ -187,25 +187,21 @@ export class Parser { resolverErrors = resolveGrammar({ rules: values(this.gastProductionsCache) }) - this.definitionErrors.push.apply(this.definitionErrors, resolverErrors) // mutability for the win? + this.definitionErrors = this.definitionErrors.concat(resolverErrors) }) this.TRACE_INIT("Grammar Validations", () => { // only perform additional grammar validations IFF no resolving errors have occurred. // as unresolved grammar may lead to unhandled runtime exceptions in the follow up validations. if (isEmpty(resolverErrors) && this.skipValidations === false) { - let validationErrors = validateGrammar({ + const validationErrors = validateGrammar({ rules: values(this.gastProductionsCache), maxLookahead: this.maxLookahead, tokenTypes: values(this.tokensMap), errMsgProvider: defaultGrammarValidatorErrorProvider, grammarName: className }) - - this.definitionErrors.push.apply( - this.definitionErrors, - validationErrors - ) // mutability for the win? + this.definitionErrors = this.definitionErrors.concat(validationErrors) } }) @@ -214,7 +210,7 @@ export class Parser { // The results of these computations are not needed unless error recovery is enabled. if (this.recoveryEnabled) { this.TRACE_INIT("computeAllProdsFollows", () => { - let allFollows = computeAllProdsFollows( + const allFollows = computeAllProdsFollows( values(this.gastProductionsCache) ) this.resyncFollows = allFollows diff --git a/packages/chevrotain/src/parse/parser/traits/context_assist.ts b/packages/chevrotain/src/parse/parser/traits/context_assist.ts index a33405c6d..2466eb6af 100644 --- a/packages/chevrotain/src/parse/parser/traits/context_assist.ts +++ b/packages/chevrotain/src/parse/parser/traits/context_assist.ts @@ -19,7 +19,7 @@ export class ContentAssist { startRuleName: string, precedingInput: IToken[] ): ISyntacticContentAssistPath[] { - let startRuleGast = this.gastProductionsCache[startRuleName] + const startRuleGast = this.gastProductionsCache[startRuleName] if (isUndefined(startRuleGast)) { throw Error(`Rule ->${startRuleName}<- does not exist in this grammar.`) @@ -39,10 +39,10 @@ export class ContentAssist { this: MixedInParser, grammarPath: ITokenGrammarPath ): TokenType[] { - let topRuleName = first(grammarPath.ruleStack) - let gastProductions = this.getGAstProductions() - let topProduction = gastProductions[topRuleName] - let nextPossibleTokenTypes = new NextAfterTokenWalker( + const topRuleName = first(grammarPath.ruleStack) + const gastProductions = this.getGAstProductions() + const topProduction = gastProductions[topRuleName] + const nextPossibleTokenTypes = new NextAfterTokenWalker( topProduction, grammarPath ).startWalking() diff --git a/packages/chevrotain/src/parse/parser/traits/error_handler.ts b/packages/chevrotain/src/parse/parser/traits/error_handler.ts index d2425a550..f16a51f9e 100644 --- a/packages/chevrotain/src/parse/parser/traits/error_handler.ts +++ b/packages/chevrotain/src/parse/parser/traits/error_handler.ts @@ -62,20 +62,20 @@ export class ErrorHandler { prodType: PROD_TYPE, userDefinedErrMsg: string ): void { - let ruleName = this.getCurrRuleFullName() - let ruleGrammar = this.getGAstProductions()[ruleName] - let lookAheadPathsPerAlternative = getLookaheadPathsForOptionalProd( + const ruleName = this.getCurrRuleFullName() + const ruleGrammar = this.getGAstProductions()[ruleName] + const lookAheadPathsPerAlternative = getLookaheadPathsForOptionalProd( occurrence, ruleGrammar, prodType, this.maxLookahead ) - let insideProdPaths = lookAheadPathsPerAlternative[0] - let actualTokens = [] + const insideProdPaths = lookAheadPathsPerAlternative[0] + const actualTokens = [] for (let i = 1; i <= this.maxLookahead; i++) { actualTokens.push(this.LA(i)) } - let msg = this.errorMessageProvider.buildEarlyExitMessage({ + const msg = this.errorMessageProvider.buildEarlyExitMessage({ expectedIterationPaths: insideProdPaths, actual: actualTokens, previous: this.LA(0), @@ -92,22 +92,22 @@ export class ErrorHandler { occurrence: number, errMsgTypes: string ): void { - let ruleName = this.getCurrRuleFullName() - let ruleGrammar = this.getGAstProductions()[ruleName] + const ruleName = this.getCurrRuleFullName() + const ruleGrammar = this.getGAstProductions()[ruleName] // TODO: getLookaheadPathsForOr can be slow for large enough maxLookahead and certain grammars, consider caching ? - let lookAheadPathsPerAlternative = getLookaheadPathsForOr( + const lookAheadPathsPerAlternative = getLookaheadPathsForOr( occurrence, ruleGrammar, this.maxLookahead ) - let actualTokens = [] + const actualTokens = [] for (let i = 1; i <= this.maxLookahead; i++) { actualTokens.push(this.LA(i)) } - let previousToken = this.LA(0) + const previousToken = this.LA(0) - let errMsg = this.errorMessageProvider.buildNoViableAltMessage({ + const errMsg = this.errorMessageProvider.buildNoViableAltMessage({ expectedPathsPerAlt: lookAheadPathsPerAlternative, actual: actualTokens, previous: previousToken, diff --git a/packages/chevrotain/src/parse/parser/traits/looksahead.ts b/packages/chevrotain/src/parse/parser/traits/looksahead.ts index 5cf2a83a4..8465827e3 100644 --- a/packages/chevrotain/src/parse/parser/traits/looksahead.ts +++ b/packages/chevrotain/src/parse/parser/traits/looksahead.ts @@ -212,7 +212,7 @@ export class LooksAhead { dslMethodIdx: number, occurrence: number ): number { - let currRuleShortName: any = this.getLastExplicitRuleShortName() + const currRuleShortName: any = this.getLastExplicitRuleShortName() return getKeyForAutomaticLookahead( currRuleShortName, dslMethodIdx, diff --git a/packages/chevrotain/src/parse/parser/traits/recognizer_api.ts b/packages/chevrotain/src/parse/parser/traits/recognizer_api.ts index 1d8bec95b..7ef9ef780 100644 --- a/packages/chevrotain/src/parse/parser/traits/recognizer_api.ts +++ b/packages/chevrotain/src/parse/parser/traits/recognizer_api.ts @@ -658,7 +658,7 @@ export class RecognizerApi { this.definedRulesNames.push(name) - let ruleImplementation = this.defineRule(name, implementation, config) + const ruleImplementation = this.defineRule(name, implementation, config) this[name] = ruleImplementation return ruleImplementation } @@ -673,9 +673,9 @@ export class RecognizerApi { ruleErrors = ruleErrors.concat( validateRuleIsOverridden(name, this.definedRulesNames, this.className) ) - this.definitionErrors.push.apply(this.definitionErrors, ruleErrors) // mutability for the win + this.definitionErrors = this.definitionErrors.concat(ruleErrors) - let ruleImplementation = this.defineRule(name, impl, config) + const ruleImplementation = this.defineRule(name, impl, config) this[name] = ruleImplementation return ruleImplementation } diff --git a/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts b/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts index a7a70d94f..4e6550760 100644 --- a/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts +++ b/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts @@ -141,8 +141,8 @@ export class RecognizerEngine { has(tokenVocabulary, "modes") && every(flatten(values((tokenVocabulary).modes)), isTokenType) ) { - let allTokenTypes = flatten(values((tokenVocabulary).modes)) - let uniqueTokens = uniq(allTokenTypes) + const allTokenTypes = flatten(values((tokenVocabulary).modes)) + const uniqueTokens = uniq(allTokenTypes) this.tokensMap = reduce( uniqueTokens, (acc, tokType: TokenType) => { @@ -193,17 +193,17 @@ export class RecognizerEngine { `Make sure that all grammar rule definitions are done before 'performSelfAnalysis' is called.` ) } - let resyncEnabled = has(config, "resyncEnabled") + const resyncEnabled = has(config, "resyncEnabled") ? config.resyncEnabled : DEFAULT_RULE_CONFIG.resyncEnabled - let recoveryValueFunc = has(config, "recoveryValueFunc") + const recoveryValueFunc = has(config, "recoveryValueFunc") ? config.recoveryValueFunc : DEFAULT_RULE_CONFIG.recoveryValueFunc // performance optimization: Use small integers as keys for the longer human readable "full" rule names. // this greatly improves Map access time (as much as 8% for some performance benchmarks). /* tslint:disable */ - let shortName = + const shortName = this.ruleShortNameIdx << (BITS_FOR_METHOD_TYPE + BITS_FOR_OCCURRENCE_IDX) /* tslint:enable */ @@ -228,14 +228,15 @@ export class RecognizerEngine { } } - let wrappedGrammarRule - - wrappedGrammarRule = function (idxInCallingRule: number = 0, args: any[]) { + const wrappedGrammarRule = function ( + idxInCallingRule: number = 0, + args: any[] + ) { this.ruleInvocationStateUpdate(shortName, ruleName, idxInCallingRule) return invokeRuleWithTry.call(this, args) } - let ruleNamePropName = "ruleName" + const ruleNamePropName = "ruleName" wrappedGrammarRule[ruleNamePropName] = ruleName wrappedGrammarRule["originalGrammarAction"] = impl return wrappedGrammarRule @@ -247,22 +248,22 @@ export class RecognizerEngine { resyncEnabledConfig: boolean, recoveryValueFunc: Function ): void { - let isFirstInvokedRule = this.RULE_STACK.length === 1 + const isFirstInvokedRule = this.RULE_STACK.length === 1 // note the reSync is always enabled for the first rule invocation, because we must always be able to // reSync with EOF and just output some INVALID ParseTree // during backtracking reSync recovery is disabled, otherwise we can't be certain the backtracking // path is really the most valid one - let reSyncEnabled = + const reSyncEnabled = resyncEnabledConfig && !this.isBackTracking() && this.recoveryEnabled if (isRecognitionException(e)) { const recogError: any = e if (reSyncEnabled) { - let reSyncTokType = this.findReSyncTokenType() + const reSyncTokType = this.findReSyncTokenType() if (this.isInCurrentRuleReSyncSet(reSyncTokType)) { recogError.resyncedTokens = this.reSyncTo(reSyncTokType) if (this.outputCst) { - let partialCstResult: any = this.CST_STACK[ + const partialCstResult: any = this.CST_STACK[ this.CST_STACK.length - 1 ] partialCstResult.recoveredNode = true @@ -303,7 +304,7 @@ export class RecognizerEngine { actionORMethodDef: GrammarAction | DSLMethodOpts, occurrence: number ): OUT { - let key = this.getKeyForAutomaticLookahead(OPTION_IDX, occurrence) + const key = this.getKeyForAutomaticLookahead(OPTION_IDX, occurrence) return this.optionInternalLogic(actionORMethodDef, occurrence, key) } @@ -321,7 +322,7 @@ export class RecognizerEngine { predicate = (>actionORMethodDef).GATE // predicate present if (predicate !== undefined) { - let orgLookaheadFunction = lookAheadFunc + const orgLookaheadFunction = lookAheadFunc lookAheadFunc = () => { return predicate.call(this) && orgLookaheadFunction.call(this) } @@ -341,7 +342,7 @@ export class RecognizerEngine { prodOccurrence: number, actionORMethodDef: GrammarAction | DSLMethodOptsWithErr ): void { - let laKey = this.getKeyForAutomaticLookahead( + const laKey = this.getKeyForAutomaticLookahead( AT_LEAST_ONE_IDX, prodOccurrence ) @@ -367,7 +368,7 @@ export class RecognizerEngine { predicate = (>actionORMethodDef).GATE // predicate present if (predicate !== undefined) { - let orgLookaheadFunction = lookAheadFunc + const orgLookaheadFunction = lookAheadFunc lookAheadFunc = () => { return predicate.call(this) && orgLookaheadFunction.call(this) } @@ -412,7 +413,7 @@ export class RecognizerEngine { prodOccurrence: number, options: AtLeastOneSepMethodOpts ): void { - let laKey = this.getKeyForAutomaticLookahead( + const laKey = this.getKeyForAutomaticLookahead( AT_LEAST_ONE_SEP_IDX, prodOccurrence ) @@ -425,10 +426,10 @@ export class RecognizerEngine { options: AtLeastOneSepMethodOpts, key: number ): void { - let action = options.DEF - let separator = options.SEP + const action = options.DEF + const separator = options.SEP - let firstIterationLookaheadFunc = this.getLaFuncFromCache(key) + const firstIterationLookaheadFunc = this.getLaFuncFromCache(key) // 1st iteration if (firstIterationLookaheadFunc.call(this) === true) { @@ -436,7 +437,7 @@ export class RecognizerEngine { // TODO: Optimization can move this function construction into "attemptInRepetitionRecovery" // because it is only needed in error recovery scenarios. - let separatorLookAheadFunc = () => { + const separatorLookAheadFunc = () => { return this.tokenMatcher(this.LA(1), separator) } @@ -478,7 +479,7 @@ export class RecognizerEngine { prodOccurrence: number, actionORMethodDef: GrammarAction | DSLMethodOpts ): void { - let laKey = this.getKeyForAutomaticLookahead(MANY_IDX, prodOccurrence) + const laKey = this.getKeyForAutomaticLookahead(MANY_IDX, prodOccurrence) return this.manyInternalLogic(prodOccurrence, actionORMethodDef, laKey) } @@ -497,7 +498,7 @@ export class RecognizerEngine { predicate = (>actionORMethodDef).GATE // predicate present if (predicate !== undefined) { - let orgLookaheadFunction = lookaheadFunction + const orgLookaheadFunction = lookaheadFunction lookaheadFunction = () => { return predicate.call(this) && orgLookaheadFunction.call(this) } @@ -533,7 +534,7 @@ export class RecognizerEngine { prodOccurrence: number, options: ManySepMethodOpts ): void { - let laKey = this.getKeyForAutomaticLookahead(MANY_SEP_IDX, prodOccurrence) + const laKey = this.getKeyForAutomaticLookahead(MANY_SEP_IDX, prodOccurrence) this.manySepFirstInternalLogic(prodOccurrence, options, laKey) } @@ -543,15 +544,15 @@ export class RecognizerEngine { options: ManySepMethodOpts, key: number ): void { - let action = options.DEF - let separator = options.SEP - let firstIterationLaFunc = this.getLaFuncFromCache(key) + const action = options.DEF + const separator = options.SEP + const firstIterationLaFunc = this.getLaFuncFromCache(key) // 1st iteration if (firstIterationLaFunc.call(this) === true) { action.call(this) - let separatorLookAheadFunc = () => { + const separatorLookAheadFunc = () => { return this.tokenMatcher(this.LA(1), separator) } // 2nd..nth iterations @@ -633,15 +634,15 @@ export class RecognizerEngine { altsOrOpts: IOrAlt[] | OrMethodOpts, occurrence: number ): T { - let laKey = this.getKeyForAutomaticLookahead(OR_IDX, occurrence) - let alts = isArray(altsOrOpts) + const laKey = this.getKeyForAutomaticLookahead(OR_IDX, occurrence) + const alts = isArray(altsOrOpts) ? (altsOrOpts as IOrAlt[]) : (altsOrOpts as OrMethodOpts).DEF const laFunc = this.getLaFuncFromCache(laKey) - let altIdxToTake = laFunc.call(this, alts) + const altIdxToTake = laFunc.call(this, alts) if (altIdxToTake !== undefined) { - let chosenAlternative: any = alts[altIdxToTake] + const chosenAlternative: any = alts[altIdxToTake] return chosenAlternative.ALT.call(this) } this.raiseNoAltException( @@ -658,8 +659,8 @@ export class RecognizerEngine { this.cstFinallyStateUpdate() if (this.RULE_STACK.length === 0 && this.isAtEndOfInput() === false) { - let firstRedundantTok = this.LA(1) - let errMsg = this.errorMessageProvider.buildNotAllInputParsedMessage({ + const firstRedundantTok = this.LA(1) + const errMsg = this.errorMessageProvider.buildNotAllInputParsedMessage({ firstRedundant: firstRedundantTok, ruleName: this.getCurrRuleFullName() }) @@ -716,7 +717,7 @@ export class RecognizerEngine { ): IToken { let consumedToken try { - let nextToken = this.LA(1) + const nextToken = this.LA(1) if (this.tokenMatcher(nextToken, tokType) === true) { this.consumeToken() consumedToken = nextToken @@ -747,7 +748,7 @@ export class RecognizerEngine { options: ConsumeMethodOpts ): void { let msg - let previousToken = this.LA(0) + const previousToken = this.LA(0) if (options !== undefined && options.ERR_MSG) { msg = options.ERR_MSG } else { @@ -777,7 +778,7 @@ export class RecognizerEngine { eFromConsumption.name === "MismatchedTokenException" && !this.isBackTracking() ) { - let follows = this.getFollowsForInRuleRecovery(tokType, idx) + const follows = this.getFollowsForInRuleRecovery(tokType, idx) try { return this.tryInRuleRecovery(tokType, follows) } catch (eFromInRuleRecovery) { @@ -796,8 +797,8 @@ export class RecognizerEngine { saveRecogState(this: MixedInParser): IParserState { // errors is a getter which will clone the errors array - let savedErrors = this.errors - let savedRuleStack = cloneArr(this.RULE_STACK) + const savedErrors = this.errors + const savedRuleStack = cloneArr(this.RULE_STACK) return { errors: savedErrors, lexerState: this.exportLexerState(), @@ -829,7 +830,7 @@ export class RecognizerEngine { } getCurrRuleFullName(this: MixedInParser): string { - let shortName = this.getLastExplicitRuleShortName() + const shortName = this.getLastExplicitRuleShortName() return this.shortRuleNameToFull[shortName] } diff --git a/packages/chevrotain/src/parse/parser/traits/recoverable.ts b/packages/chevrotain/src/parse/parser/traits/recoverable.ts index 8e573cdbf..d27984c9b 100644 --- a/packages/chevrotain/src/parse/parser/traits/recoverable.ts +++ b/packages/chevrotain/src/parse/parser/traits/recoverable.ts @@ -66,7 +66,7 @@ export class Recoverable { } public getTokenToInsert(tokType: TokenType): IToken { - let tokToInsert = createTokenInstance( + const tokToInsert = createTokenInstance( tokType, "", NaN, @@ -92,25 +92,25 @@ export class Recoverable { expectedTokType: TokenType ): void { // TODO: can the resyncTokenType be cached? - let reSyncTokType = this.findReSyncTokenType() - let savedLexerState = this.exportLexerState() - let resyncedTokens = [] + const reSyncTokType = this.findReSyncTokenType() + const savedLexerState = this.exportLexerState() + const resyncedTokens = [] let passedResyncPoint = false - let nextTokenWithoutResync = this.LA(1) + const nextTokenWithoutResync = this.LA(1) let currToken = this.LA(1) - let generateErrorMessage = () => { - let previousToken = this.LA(0) + const generateErrorMessage = () => { + const previousToken = this.LA(0) // we are preemptively re-syncing before an error has been detected, therefor we must reproduce // the error that would have been thrown - let msg = this.errorMessageProvider.buildMismatchTokenMessage({ + const msg = this.errorMessageProvider.buildMismatchTokenMessage({ expected: expectedTokType, actual: nextTokenWithoutResync, previous: previousToken, ruleName: this.getCurrRuleFullName() }) - let error = new MismatchedTokenException( + const error = new MismatchedTokenException( msg, nextTokenWithoutResync, this.LA(0) @@ -194,8 +194,8 @@ export class Recoverable { tokType: TokenType, tokIdxInRule: number ): TokenType[] { - let grammarPath = this.getCurrentGrammarPath(tokType, tokIdxInRule) - let follows = this.getNextPossibleTokenTypes(grammarPath) + const grammarPath = this.getCurrentGrammarPath(tokType, tokIdxInRule) + const follows = this.getNextPossibleTokenTypes(grammarPath) return follows } @@ -205,12 +205,12 @@ export class Recoverable { follows: TokenType[] ): IToken { if (this.canRecoverWithSingleTokenInsertion(expectedTokType, follows)) { - let tokToInsert = this.getTokenToInsert(expectedTokType) + const tokToInsert = this.getTokenToInsert(expectedTokType) return tokToInsert } if (this.canRecoverWithSingleTokenDeletion(expectedTokType)) { - let nextTok = this.SKIP_TOKEN() + const nextTok = this.SKIP_TOKEN() this.consumeToken() return nextTok } @@ -243,8 +243,8 @@ export class Recoverable { return false } - let mismatchedTok = this.LA(1) - let isMisMatchedTokInFollows = + const mismatchedTok = this.LA(1) + const isMisMatchedTokInFollows = find(follows, (possibleFollowsTokType: TokenType) => { return this.tokenMatcher(mismatchedTok, possibleFollowsTokType) }) !== undefined @@ -256,7 +256,7 @@ export class Recoverable { this: MixedInParser, expectedTokType: TokenType ): boolean { - let isNextTokenWhatIsExpected = this.tokenMatcher( + const isNextTokenWhatIsExpected = this.tokenMatcher( this.LA(2), expectedTokType ) @@ -267,18 +267,18 @@ export class Recoverable { this: MixedInParser, tokenTypeIdx: TokenType ): boolean { - let followKey = this.getCurrFollowKey() - let currentRuleReSyncSet = this.getFollowSetFromFollowKey(followKey) + const followKey = this.getCurrFollowKey() + const currentRuleReSyncSet = this.getFollowSetFromFollowKey(followKey) return contains(currentRuleReSyncSet, tokenTypeIdx) } findReSyncTokenType(this: MixedInParser): TokenType { - let allPossibleReSyncTokTypes = this.flattenFollowSet() + const allPossibleReSyncTokTypes = this.flattenFollowSet() // this loop will always terminate as EOF is always in the follow stack and also always (virtually) in the input let nextToken = this.LA(1) let k = 2 while (true) { - let nextTokenType: any = nextToken.tokenType + const nextTokenType: any = nextToken.tokenType if (contains(allPossibleReSyncTokTypes, nextTokenType)) { return nextTokenType } @@ -292,9 +292,9 @@ export class Recoverable { if (this.RULE_STACK.length === 1) { return EOF_FOLLOW_KEY } - let currRuleShortName = this.getLastExplicitRuleShortName() - let currRuleIdx = this.getLastExplicitRuleOccurrenceIndex() - let prevRuleShortName = this.getPreviousExplicitRuleShortName() + const currRuleShortName = this.getLastExplicitRuleShortName() + const currRuleIdx = this.getLastExplicitRuleOccurrenceIndex() + const prevRuleShortName = this.getPreviousExplicitRuleShortName() return { ruleName: this.shortRuleNameToFullName(currRuleShortName), @@ -304,8 +304,8 @@ export class Recoverable { } buildFullFollowKeyStack(this: MixedInParser): IFollowKey[] { - let explicitRuleStack = this.RULE_STACK - let explicitOccurrenceStack = this.RULE_OCCURRENCE_STACK + const explicitRuleStack = this.RULE_STACK + const explicitOccurrenceStack = this.RULE_OCCURRENCE_STACK return map(explicitRuleStack, (ruleName, idx) => { if (idx === 0) { @@ -320,7 +320,7 @@ export class Recoverable { } flattenFollowSet(this: MixedInParser): TokenType[] { - let followStack = map(this.buildFullFollowKeyStack(), (currKey) => { + const followStack = map(this.buildFullFollowKeyStack(), (currKey) => { return this.getFollowSetFromFollowKey(currKey) }) return flatten(followStack) @@ -334,7 +334,7 @@ export class Recoverable { return [EOF] } - let followName = + const followName = followKey.ruleName + followKey.idxInCallingRule + IN + followKey.inRule return this.resyncFollows[followName] @@ -354,7 +354,7 @@ export class Recoverable { } reSyncTo(this: MixedInParser, tokType: TokenType): IToken[] { - let resyncedTokens = [] + const resyncedTokens = [] let nextTok = this.LA(1) while (this.tokenMatcher(nextTok, tokType) === false) { nextTok = this.SKIP_TOKEN() @@ -383,9 +383,9 @@ export class Recoverable { tokType: TokenType, tokIdxInRule: number ): ITokenGrammarPath { - let pathRuleStack: string[] = this.getHumanReadableRuleStack() - let pathOccurrenceStack: number[] = cloneArr(this.RULE_OCCURRENCE_STACK) - let grammarPath: any = { + const pathRuleStack: string[] = this.getHumanReadableRuleStack() + const pathOccurrenceStack: number[] = cloneArr(this.RULE_OCCURRENCE_STACK) + const grammarPath: any = { ruleStack: pathRuleStack, occurrenceStack: pathOccurrenceStack, lastTok: tokType, @@ -411,12 +411,12 @@ export function attemptInRepetitionRecovery( nextToksWalker: typeof AbstractNextTerminalAfterProductionWalker, notStuck?: boolean ) { - let key = this.getKeyForAutomaticLookahead(dslMethodIdx, prodOccurrence) + const key = this.getKeyForAutomaticLookahead(dslMethodIdx, prodOccurrence) let firstAfterRepInfo = this.firstAfterRepMap[key] if (firstAfterRepInfo === undefined) { - let currRuleName = this.getCurrRuleFullName() - let ruleGrammar = this.getGAstProductions()[currRuleName] - let walker: AbstractNextTerminalAfterProductionWalker = new nextToksWalker( + const currRuleName = this.getCurrRuleFullName() + const ruleGrammar = this.getGAstProductions()[currRuleName] + const walker: AbstractNextTerminalAfterProductionWalker = new nextToksWalker( ruleGrammar, prodOccurrence ) @@ -426,7 +426,7 @@ export function attemptInRepetitionRecovery( let expectTokAfterLastMatch = firstAfterRepInfo.token let nextTokIdx = firstAfterRepInfo.occurrence - let isEndOfRule = firstAfterRepInfo.isEndOfRule + const isEndOfRule = firstAfterRepInfo.isEndOfRule // special edge case of a TOP most repetition after which the input should END. // this will force an attempt for inRule recovery in that scenario. diff --git a/packages/chevrotain/src/parse/parser/traits/tree_builder.ts b/packages/chevrotain/src/parse/parser/traits/tree_builder.ts index 118166f39..a206825cb 100644 --- a/packages/chevrotain/src/parse/parser/traits/tree_builder.ts +++ b/packages/chevrotain/src/parse/parser/traits/tree_builder.ts @@ -259,17 +259,17 @@ export class TreeBuilder { } getLastExplicitRuleShortName(this: MixedInParser): string { - let ruleStack = this.RULE_STACK + const ruleStack = this.RULE_STACK return ruleStack[ruleStack.length - 1] } getPreviousExplicitRuleShortName(this: MixedInParser): string { - let ruleStack = this.RULE_STACK + const ruleStack = this.RULE_STACK return ruleStack[ruleStack.length - 2] } getLastExplicitRuleOccurrenceIndex(this: MixedInParser): number { - let occurrenceStack = this.RULE_OCCURRENCE_STACK + const occurrenceStack = this.RULE_OCCURRENCE_STACK return occurrenceStack[occurrenceStack.length - 1] } } diff --git a/packages/chevrotain/src/scan/lexer.ts b/packages/chevrotain/src/scan/lexer.ts index a9e001898..c8961e48a 100644 --- a/packages/chevrotain/src/scan/lexer.ts +++ b/packages/chevrotain/src/scan/lexer.ts @@ -114,11 +114,11 @@ export function analyzeTokenTypes( tracer("Transform Patterns", () => { hasCustom = false allTransformedPatterns = map(onlyRelevantTypes, (currType) => { - let currPattern = currType[PATTERN] + const currPattern = currType[PATTERN] /* istanbul ignore else */ if (isRegExp(currPattern)) { - let regExpSource = currPattern.source + const regExpSource = currPattern.source if ( regExpSource.length === 1 && // only these regExp meta characters which can appear in a length one regExp @@ -175,11 +175,11 @@ export function analyzeTokenTypes( if (currPattern.length === 1) { return currPattern } else { - let escapedRegExpString = currPattern.replace( + const escapedRegExpString = currPattern.replace( /[\\^$.*+?()[\]{}|]/g, "\\$&" ) - let wrappedRegExp = new RegExp(escapedRegExpString) + const wrappedRegExp = new RegExp(escapedRegExpString) return options.useSticky ? addStickyFlag(wrappedRegExp) : addStartOfInput(wrappedRegExp) @@ -202,7 +202,7 @@ export function analyzeTokenTypes( ) patternIdxToGroup = map(onlyRelevantTypes, (clazz: any) => { - let groupName = clazz.GROUP + const groupName = clazz.GROUP /* istanbul ignore next */ if (groupName === Lexer.SKIPPED) { return undefined @@ -216,10 +216,10 @@ export function analyzeTokenTypes( }) patternIdxToLongerAltIdx = map(onlyRelevantTypes, (clazz: any) => { - let longerAltType = clazz.LONGER_ALT + const longerAltType = clazz.LONGER_ALT if (longerAltType) { - let longerAltIdx = indexOf(onlyRelevantTypes, longerAltType) + const longerAltIdx = indexOf(onlyRelevantTypes, longerAltType) return longerAltIdx } }) @@ -266,7 +266,7 @@ export function analyzeTokenTypes( emptyGroups = reduce( onlyRelevantTypes, (acc, clazz: any) => { - let groupName = clazz.GROUP + const groupName = clazz.GROUP if (isString(groupName) && !(groupName === Lexer.SKIPPED)) { acc[groupName] = [] } @@ -337,7 +337,7 @@ export function analyzeTokenTypes( ) } } else { - let optimizedCodes = getOptimizedStartCodesIndices( + const optimizedCodes = getOptimizedStartCodesIndices( currTokType.PATTERN, options.ensureOptimizations ) @@ -391,11 +391,11 @@ export function validatePatterns( ): ILexerDefinitionError[] { let errors = [] - let missingResult = findMissingPatterns(tokenTypes) + const missingResult = findMissingPatterns(tokenTypes) errors = errors.concat(missingResult.errors) - let invalidResult = findInvalidPatterns(missingResult.valid) - let validTokenTypes = invalidResult.valid + const invalidResult = findInvalidPatterns(missingResult.valid) + const validTokenTypes = invalidResult.valid errors = errors.concat(invalidResult.errors) errors = errors.concat(validateRegExpPattern(validTokenTypes)) @@ -415,7 +415,7 @@ function validateRegExpPattern( tokenTypes: TokenType[] ): ILexerDefinitionError[] { let errors = [] - let withRegExpPatterns = filter(tokenTypes, (currTokType) => + const withRegExpPatterns = filter(tokenTypes, (currTokType) => isRegExp(currTokType[PATTERN]) ) @@ -440,11 +440,11 @@ export interface ILexerFilterResult { export function findMissingPatterns( tokenTypes: TokenType[] ): ILexerFilterResult { - let tokenTypesWithMissingPattern = filter(tokenTypes, (currType) => { + const tokenTypesWithMissingPattern = filter(tokenTypes, (currType) => { return !has(currType, PATTERN) }) - let errors = map(tokenTypesWithMissingPattern, (currType) => { + const errors = map(tokenTypesWithMissingPattern, (currType) => { return { message: "Token Type: ->" + @@ -455,15 +455,15 @@ export function findMissingPatterns( } }) - let valid = difference(tokenTypes, tokenTypesWithMissingPattern) + const valid = difference(tokenTypes, tokenTypesWithMissingPattern) return { errors, valid } } export function findInvalidPatterns( tokenTypes: TokenType[] ): ILexerFilterResult { - let tokenTypesWithInvalidPattern = filter(tokenTypes, (currType) => { - let pattern = currType[PATTERN] + const tokenTypesWithInvalidPattern = filter(tokenTypes, (currType) => { + const pattern = currType[PATTERN] return ( !isRegExp(pattern) && !isFunction(pattern) && @@ -472,7 +472,7 @@ export function findInvalidPatterns( ) }) - let errors = map(tokenTypesWithInvalidPattern, (currType) => { + const errors = map(tokenTypesWithInvalidPattern, (currType) => { return { message: "Token Type: ->" + @@ -484,7 +484,7 @@ export function findInvalidPatterns( } }) - let valid = difference(tokenTypes, tokenTypesWithInvalidPattern) + const valid = difference(tokenTypes, tokenTypesWithInvalidPattern) return { errors, valid } } @@ -501,7 +501,7 @@ export function findEndOfInputAnchor( } } - let invalidRegex = filter(tokenTypes, (currType) => { + const invalidRegex = filter(tokenTypes, (currType) => { const pattern = currType[PATTERN] try { @@ -517,7 +517,7 @@ export function findEndOfInputAnchor( } }) - let errors = map(invalidRegex, (currType) => { + const errors = map(invalidRegex, (currType) => { return { message: "Unexpected RegExp Anchor Error:\n" + @@ -537,12 +537,12 @@ export function findEndOfInputAnchor( export function findEmptyMatchRegExps( tokenTypes: TokenType[] ): ILexerDefinitionError[] { - let matchesEmptyString = filter(tokenTypes, (currType) => { - let pattern = currType[PATTERN] + const matchesEmptyString = filter(tokenTypes, (currType) => { + const pattern = currType[PATTERN] return pattern.test("") }) - let errors = map(matchesEmptyString, (currType) => { + const errors = map(matchesEmptyString, (currType) => { return { message: "Token Type: ->" + @@ -569,7 +569,7 @@ export function findStartOfInputAnchor( } } - let invalidRegex = filter(tokenTypes, (currType) => { + const invalidRegex = filter(tokenTypes, (currType) => { const pattern = currType[PATTERN] try { const regexpAst = getRegExpAst(pattern) @@ -584,7 +584,7 @@ export function findStartOfInputAnchor( } }) - let errors = map(invalidRegex, (currType) => { + const errors = map(invalidRegex, (currType) => { return { message: "Unexpected RegExp Anchor Error:\n" + @@ -604,12 +604,12 @@ export function findStartOfInputAnchor( export function findUnsupportedFlags( tokenTypes: TokenType[] ): ILexerDefinitionError[] { - let invalidFlags = filter(tokenTypes, (currType) => { - let pattern = currType[PATTERN] + const invalidFlags = filter(tokenTypes, (currType) => { + const pattern = currType[PATTERN] return pattern instanceof RegExp && (pattern.multiline || pattern.global) }) - let errors = map(invalidFlags, (currType) => { + const errors = map(invalidFlags, (currType) => { return { message: "Token Type: ->" + @@ -627,7 +627,7 @@ export function findUnsupportedFlags( export function findDuplicatePatterns( tokenTypes: TokenType[] ): ILexerDefinitionError[] { - let found = [] + const found = [] let identicalPatterns = map(tokenTypes, (outerType: any) => { return reduce( tokenTypes, @@ -651,16 +651,16 @@ export function findDuplicatePatterns( identicalPatterns = compact(identicalPatterns) - let duplicatePatterns = filter(identicalPatterns, (currIdenticalSet) => { + const duplicatePatterns = filter(identicalPatterns, (currIdenticalSet) => { return currIdenticalSet.length > 1 }) - let errors = map(duplicatePatterns, (setOfIdentical: any) => { - let tokenTypeNames = map(setOfIdentical, (currType: any) => { + const errors = map(duplicatePatterns, (setOfIdentical: any) => { + const tokenTypeNames = map(setOfIdentical, (currType: any) => { return currType.name }) - let dupPatternSrc = (first(setOfIdentical)).PATTERN + const dupPatternSrc = (first(setOfIdentical)).PATTERN return { message: `The same RegExp pattern ->${dupPatternSrc}<-` + @@ -678,16 +678,16 @@ export function findDuplicatePatterns( export function findInvalidGroupType( tokenTypes: TokenType[] ): ILexerDefinitionError[] { - let invalidTypes = filter(tokenTypes, (clazz: any) => { + const invalidTypes = filter(tokenTypes, (clazz: any) => { if (!has(clazz, "GROUP")) { return false } - let group = clazz.GROUP + const group = clazz.GROUP return group !== Lexer.SKIPPED && group !== Lexer.NA && !isString(group) }) - let errors = map(invalidTypes, (currType) => { + const errors = map(invalidTypes, (currType) => { return { message: "Token Type: ->" + @@ -705,14 +705,14 @@ export function findModesThatDoNotExist( tokenTypes: TokenType[], validModes: string[] ): ILexerDefinitionError[] { - let invalidModes = filter(tokenTypes, (clazz: any) => { + const invalidModes = filter(tokenTypes, (clazz: any) => { return ( clazz.PUSH_MODE !== undefined && !contains(validModes, clazz.PUSH_MODE) ) }) - let errors = map(invalidModes, (tokType) => { - let msg = + const errors = map(invalidModes, (tokType) => { + const msg = `Token Type: ->${tokType.name}<- static 'PUSH_MODE' value cannot refer to a Lexer Mode ->${tokType.PUSH_MODE}<-` + `which does not exist` return { @@ -754,7 +754,7 @@ export function findUnreachablePatterns( forEach(tokenTypes, (tokType, testIdx) => { forEach(canBeTested, ({ str, idx, tokenType }) => { if (testIdx < idx && testTokenType(str, tokType.PATTERN)) { - let msg = + const msg = `Token: ->${tokenType.name}<- can never be matched.\n` + `Because it appears AFTER the Token Type ->${tokType.name}<-` + `in the lexer's definition.\n` + @@ -812,14 +812,14 @@ function noMetaChar(regExp: RegExp): boolean { } export function addStartOfInput(pattern: RegExp): RegExp { - let flags = pattern.ignoreCase ? "i" : "" + const flags = pattern.ignoreCase ? "i" : "" // always wrapping in a none capturing group preceded by '^' to make sure matching can only work on start of input. // duplicate/redundant start of input markers have no meaning (/^^^^A/ === /^A/) return new RegExp(`^(?:${pattern.source})`, flags) } export function addStickyFlag(pattern: RegExp): RegExp { - let flags = pattern.ignoreCase ? "iy" : "y" + const flags = pattern.ignoreCase ? "iy" : "y" // always wrapping in a none capturing group preceded by '^' to make sure matching can only work on start of input. // duplicate/redundant start of input markers have no meaning (/^^^^A/ === /^A/) return new RegExp(`${pattern.source}`, flags) @@ -830,7 +830,7 @@ export function performRuntimeChecks( trackLines: boolean, lineTerminatorCharacters: (number | string)[] ): ILexerDefinitionError[] { - let errors = [] + const errors = [] // some run time checks to help the end users. if (!has(lexerDefinition, DEFAULT_MODE)) { @@ -944,11 +944,11 @@ export function performWarningRuntimeChecks( export function cloneEmptyGroups(emptyGroups: { [groupName: string]: IToken }): { [groupName: string]: IToken } { - let clonedResult: any = {} - let groupKeys = keys(emptyGroups) + const clonedResult: any = {} + const groupKeys = keys(emptyGroups) forEach(groupKeys, (currKey) => { - let currGroupValue = emptyGroups[currKey] + const currGroupValue = emptyGroups[currKey] /* istanbul ignore else */ if (isArray(currGroupValue)) { @@ -963,7 +963,7 @@ export function cloneEmptyGroups(emptyGroups: { // TODO: refactor to avoid duplication export function isCustomPattern(tokenType: any): boolean { - let pattern = tokenType.PATTERN + const pattern = tokenType.PATTERN /* istanbul ignore else */ if (isRegExp(pattern)) { return false @@ -994,9 +994,9 @@ export function isShortPattern(pattern: any): number | boolean { export const LineTerminatorOptimizedTester: ILineTerminatorsTester = { // implements /\n|\r\n?/g.test test: function (text) { - let len = text.length + const len = text.length for (let i = this.lastIndex; i < len; i++) { - let c = text.charCodeAt(i) + const c = text.charCodeAt(i) if (c === 10) { this.lastIndex = i + 1 return true @@ -1120,6 +1120,7 @@ export const minOptimizationVal = 256 * note the hack for fast division integer part extraction * See: https://stackoverflow.com/a/4228528 */ +let charCodeToOptimizedIdxMap = [] export function charCodeToOptimizedIndex(charCode) { return charCode < minOptimizationVal ? charCode @@ -1134,7 +1135,6 @@ export function charCodeToOptimizedIndex(charCode) { * * TODO: Perhaps it should be lazy initialized only if a charCode > 255 is used. */ -let charCodeToOptimizedIdxMap = [] function initCharCodeToOptimizedIndexMap() { if (isEmpty(charCodeToOptimizedIdxMap)) { charCodeToOptimizedIdxMap = new Array(65536) diff --git a/packages/chevrotain/src/scan/lexer_public.ts b/packages/chevrotain/src/scan/lexer_public.ts index efada6d6f..84661e387 100644 --- a/packages/chevrotain/src/scan/lexer_public.ts +++ b/packages/chevrotain/src/scan/lexer_public.ts @@ -220,7 +220,7 @@ export class Lexer { ) }) - let allModeNames = keys(actualDefinition.modes) + const allModeNames = keys(actualDefinition.modes) forEach( actualDefinition.modes, @@ -280,10 +280,10 @@ export class Lexer { !isEmpty(this.lexerDefinitionErrors) && !this.config.deferDefinitionErrorsHandling ) { - let allErrMessages = map(this.lexerDefinitionErrors, (error) => { + const allErrMessages = map(this.lexerDefinitionErrors, (error) => { return error.message }) - let allErrMessagesString = allErrMessages.join( + const allErrMessagesString = allErrMessages.join( "-----------------------\n" ) throw new Error( @@ -379,10 +379,10 @@ export class Lexer { initialMode: string = this.defaultMode ): ILexingResult { if (!isEmpty(this.lexerDefinitionErrors)) { - let allErrMessages = map(this.lexerDefinitionErrors, (error) => { + const allErrMessages = map(this.lexerDefinitionErrors, (error) => { return error.message }) - let allErrMessagesString = allErrMessages.join( + const allErrMessagesString = allErrMessages.join( "-----------------------\n" ) throw new Error( @@ -391,7 +391,7 @@ export class Lexer { ) } - let lexResult = this.tokenizeInternal(text, initialMode) + const lexResult = this.tokenizeInternal(text, initialMode) return lexResult } @@ -414,30 +414,30 @@ export class Lexer { droppedChar, msg, match - let orgText = text - let orgLength = orgText.length + const orgText = text + const orgLength = orgText.length let offset = 0 let matchedTokensIndex = 0 // initializing the tokensArray to the "guessed" size. // guessing too little will still reduce the number of array re-sizes on pushes. // guessing too large (Tested by guessing x4 too large) may cost a bit more of memory // but would still have a faster runtime by avoiding (All but one) array resizing. - let guessedNumberOfTokens = this.hasCustom + const guessedNumberOfTokens = this.hasCustom ? 0 // will break custom token pattern APIs the matchedTokens array will contain undefined elements. : Math.floor(text.length / 10) - let matchedTokens = new Array(guessedNumberOfTokens) - let errors: ILexingError[] = [] + const matchedTokens = new Array(guessedNumberOfTokens) + const errors: ILexingError[] = [] let line = this.trackStartLines ? 1 : undefined let column = this.trackStartLines ? 1 : undefined - let groups: any = cloneEmptyGroups(this.emptyGroups) - let trackLines = this.trackStartLines + const groups: any = cloneEmptyGroups(this.emptyGroups) + const trackLines = this.trackStartLines const lineTerminatorPattern = this.config.lineTerminatorsPattern let currModePatternsLength = 0 let patternIdxToConfig = [] let currCharCodeToPatternIdxToConfig = [] - let modeStack = [] + const modeStack = [] const emptyArray = [] Object.freeze(emptyArray) @@ -458,7 +458,7 @@ export class Lexer { } } - let pop_mode = (popToken) => { + const pop_mode = (popToken) => { // TODO: perhaps avoid this error in the edge case there is no more input? if ( modeStack.length === 1 && @@ -468,7 +468,7 @@ export class Lexer { ) { // if we try to pop the last mode there lexer will no longer have ANY mode. // thus the pop is ignored, an error will be created and the lexer will continue parsing in the previous mode. - let msg = this.config.errorMessageProvider.buildUnableToPopLexerModeMessage( + const msg = this.config.errorMessageProvider.buildUnableToPopLexerModeMessage( popToken ) @@ -485,7 +485,7 @@ export class Lexer { }) } else { modeStack.pop() - let newMode = last(modeStack) + const newMode = last(modeStack) patternIdxToConfig = this.patternIdxToConfig[newMode] currCharCodeToPatternIdxToConfig = this.charCodeToPatternIdxToConfig[ newMode @@ -531,17 +531,17 @@ export class Lexer { while (offset < orgLength) { matchedImage = null - let nextCharCode = orgText.charCodeAt(offset) + const nextCharCode = orgText.charCodeAt(offset) const chosenPatternIdxToConfig = getPossiblePatterns(nextCharCode) - let chosenPatternsLength = chosenPatternIdxToConfig.length + const chosenPatternsLength = chosenPatternIdxToConfig.length for (i = 0; i < chosenPatternsLength; i++) { currConfig = chosenPatternIdxToConfig[i] - let currPattern = currConfig.pattern + const currPattern = currConfig.pattern payload = null // manually in-lined because > 600 chars won't be in-lined in V8 - let singleCharCode = currConfig.short + const singleCharCode = currConfig.short if (singleCharCode !== false) { if (nextCharCode === singleCharCode) { // single character string @@ -569,8 +569,8 @@ export class Lexer { if (longerAltIdx !== undefined) { // TODO: micro optimize, avoid extra prop access // by saving/linking longerAlt on the original config? - let longerAltConfig = patternIdxToConfig[longerAltIdx] - let longerAltPattern = longerAltConfig.pattern + const longerAltConfig = patternIdxToConfig[longerAltIdx] + const longerAltPattern = longerAltConfig.pattern altPayload = null // single Char can never be a longer alt so no need to test it. @@ -673,9 +673,9 @@ export class Lexer { this.handleModes(currConfig, pop_mode, push_mode, newToken) } else { // error recovery, drop characters until we identify a valid token's start point - let errorStartOffset = offset - let errorLine = line - let errorColumn = column + const errorStartOffset = offset + const errorLine = line + const errorColumn = column let foundResyncPoint = false while (!foundResyncPoint && offset < orgLength) { // drop chars until we succeed in matching something @@ -684,11 +684,11 @@ export class Lexer { text = this.chopInput(text, 1) offset++ for (j = 0; j < currModePatternsLength; j++) { - let currConfig = patternIdxToConfig[j] - let currPattern = currConfig.pattern + const currConfig = patternIdxToConfig[j] + const currPattern = currConfig.pattern // manually in-lined because > 600 chars won't be in-lined in V8 - let singleCharCode = currConfig.short + const singleCharCode = currConfig.short if (singleCharCode !== false) { if (orgText.charCodeAt(offset) === singleCharCode) { // single character string @@ -746,7 +746,7 @@ export class Lexer { if (config.pop === true) { // need to save the PUSH_MODE property as if the mode is popped // patternIdxToPopMode is updated to reflect the new mode after popping the stack - let pushMode = config.push + const pushMode = config.push pop_mode(newToken) if (pushMode !== undefined) { push_mode.call(this, pushMode) @@ -884,7 +884,7 @@ export class Lexer { } private matchWithTest(pattern: RegExp, text: string, offset: number): string { - let found = pattern.test(text) + const found = pattern.test(text) if (found === true) { return text.substring(offset, pattern.lastIndex) } @@ -892,7 +892,7 @@ export class Lexer { } private matchWithExec(pattern, text): string { - let regExpArray = pattern.exec(text) + const regExpArray = pattern.exec(text) return regExpArray !== null ? regExpArray[0] : regExpArray } diff --git a/packages/chevrotain/src/scan/tokens.ts b/packages/chevrotain/src/scan/tokens.ts index d9d9752e0..51e9411b4 100644 --- a/packages/chevrotain/src/scan/tokens.ts +++ b/packages/chevrotain/src/scan/tokens.ts @@ -35,7 +35,7 @@ export const tokenIdxToClass = {} export function augmentTokenTypes(tokenTypes: TokenType[]): void { // collect the parent Token Types as well. - let tokenTypesAndParents = expandCategories(tokenTypes) + const tokenTypesAndParents = expandCategories(tokenTypes) // add required tokenType and categoryMatches properties assignTokenDefaultProps(tokenTypesAndParents) @@ -59,7 +59,7 @@ export function expandCategories(tokenTypes: TokenType[]): TokenType[] { flatten(map(categories, (currTokType) => currTokType.CATEGORIES)) ) - let newCategories = difference(categories, result) + const newCategories = difference(categories, result) result = result.concat(newCategories) diff --git a/packages/chevrotain/src/scan/tokens_public.ts b/packages/chevrotain/src/scan/tokens_public.ts index dd9921a03..8c9c311fc 100644 --- a/packages/chevrotain/src/scan/tokens_public.ts +++ b/packages/chevrotain/src/scan/tokens_public.ts @@ -34,9 +34,9 @@ export function createToken(config: ITokenConfig): TokenType { } function createTokenInternal(config: ITokenConfig): TokenType { - let pattern = config.pattern + const pattern = config.pattern - let tokenType: TokenType = {} + const tokenType: TokenType = {} tokenType.name = config.name if (!isUndefined(pattern)) { diff --git a/packages/chevrotain/src/utils/utils.ts b/packages/chevrotain/src/utils/utils.ts index 7a3611148..09d474a25 100644 --- a/packages/chevrotain/src/utils/utils.ts +++ b/packages/chevrotain/src/utils/utils.ts @@ -20,8 +20,8 @@ export function keys(obj: any): string[] { } export function values(obj: any): any[] { - let vals = [] - let keys = Object.keys(obj) + const vals = [] + const keys = Object.keys(obj) for (let i = 0; i < keys.length; i++) { vals.push(obj[keys[i]]) } @@ -32,17 +32,17 @@ export function mapValues( obj: Object, callback: (value: I, key?: string) => O ): O[] { - let result: O[] = [] - let objKeys = keys(obj) + const result: O[] = [] + const objKeys = keys(obj) for (let idx = 0; idx < objKeys.length; idx++) { - let currKey = objKeys[idx] + const currKey = objKeys[idx] result.push(callback.call(null, obj[currKey], currKey)) } return result } export function map(arr: I[], callback: (I, idx?: number) => O): O[] { - let result: O[] = [] + const result: O[] = [] for (let idx = 0; idx < arr.length; idx++) { result.push(callback.call(null, arr[idx], idx)) } @@ -53,7 +53,7 @@ export function flatten(arr: any[]): T[] { let result = [] for (let idx = 0; idx < arr.length; idx++) { - let currItem = arr[idx] + const currItem = arr[idx] if (Array.isArray(currItem)) { result = result.concat(flatten(currItem)) } else { @@ -68,7 +68,7 @@ export function first(arr: T[]): T { } export function last(arr: T[]): T { - let len = arr && arr.length + const len = arr && arr.length return len ? arr[len - 1] : undefined } @@ -79,10 +79,10 @@ export function forEach(collection: any, iteratorCallback: Function): void { iteratorCallback.call(null, collection[i], i) } } else if (isObject(collection)) { - let colKeys = keys(collection) + const colKeys = keys(collection) for (let i = 0; i < colKeys.length; i++) { - let key = colKeys[i] - let value = collection[key] + const key = colKeys[i] + const value = collection[key] iteratorCallback.call(null, value, key) } } else { @@ -111,10 +111,10 @@ export function dropRight(arr: T[], howMuch: number = 1): T[] { } export function filter(arr: T[], predicate: (T) => boolean): T[] { - let result = [] + const result = [] if (Array.isArray(arr)) { for (let i = 0; i < arr.length; i++) { - let item = arr[i] + const item = arr[i] if (predicate.call(null, item)) { result.push(item) } @@ -128,12 +128,12 @@ export function reject(arr: T[], predicate: (T) => boolean): T[] { } export function pick(obj: Object, predicate: (item) => boolean) { - let keys = Object.keys(obj) - let result = {} + const keys = Object.keys(obj) + const result = {} for (let i = 0; i < keys.length; i++) { - let currKey = keys[i] - let currItem = obj[currKey] + const currKey = keys[i] + const currItem = obj[currKey] if (predicate(currItem)) { result[currKey] = currItem } @@ -157,7 +157,7 @@ export function contains(arr: T[], item): boolean { * shallow clone */ export function cloneArr(arr: T[]): T[] { - let newArr = [] + const newArr = [] for (let i = 0; i < arr.length; i++) { newArr.push(arr[i]) } @@ -168,8 +168,8 @@ export function cloneArr(arr: T[]): T[] { * shallow clone */ export function cloneObj(obj: Object): any { - let clonedObj = {} - for (let key in obj) { + const clonedObj = {} + for (const key in obj) { /* istanbul ignore else */ if (Object.prototype.hasOwnProperty.call(obj, key)) { clonedObj[key] = obj[key] @@ -180,7 +180,7 @@ export function cloneObj(obj: Object): any { export function find(arr: T[], predicate: (item: T) => boolean): T { for (let i = 0; i < arr.length; i++) { - let item = arr[i] + const item = arr[i] if (predicate.call(null, item)) { return item } @@ -189,9 +189,9 @@ export function find(arr: T[], predicate: (item: T) => boolean): T { } export function findAll(arr: T[], predicate: (item: T) => boolean): T[] { - let found = [] + const found = [] for (let i = 0; i < arr.length; i++) { - let item = arr[i] + const item = arr[i] if (predicate.call(null, item)) { found.push(item) } @@ -206,8 +206,8 @@ export function reduce( ): A { const isArr = Array.isArray(arrOrObj) - let vals: T[] = isArr ? >arrOrObj : values(arrOrObj) - let objKeys = isArr ? [] : keys(arrOrObj) + const vals: T[] = isArr ? >arrOrObj : values(arrOrObj) + const objKeys = isArr ? [] : keys(arrOrObj) let accumulator = initial for (let i = 0; i < vals.length; i++) { @@ -229,11 +229,11 @@ export function uniq( arr: T[], identity: (item: T) => any = (item) => item ): T[] { - let identities = [] + const identities = [] return reduce( arr, (result, currItem) => { - let currIdentity = identity(currItem) + const currIdentity = identity(currItem) if (contains(identities, currIdentity)) { return result } else { @@ -246,8 +246,8 @@ export function uniq( } export function partial(func: Function, ...restArgs: any[]): Function { - let firstArg = [null] - let allArgs = firstArg.concat(restArgs) + const firstArg = [null] + const allArgs = firstArg.concat(restArgs) return Function.bind.apply(func, allArgs) } @@ -298,7 +298,7 @@ export function indexOf(arr: T[], value: T): number { } export function sortBy(arr: T[], orderFunc: (item: T) => number): T[] { - let result = cloneArr(arr) + const result = cloneArr(arr) result.sort((a, b) => orderFunc(a) - orderFunc(b)) return result } @@ -308,7 +308,7 @@ export function zipObject(keys: any[], values: any[]): Object { throw Error("can't zipObject with different number of keys and values!") } - let result = {} + const result = {} for (let i = 0; i < keys.length; i++) { result[keys[i]] = values[i] } @@ -320,10 +320,10 @@ export function zipObject(keys: any[], values: any[]): Object { */ export function assign(target: Object, ...sources: Object[]): Object { for (let i = 0; i < sources.length; i++) { - let curSource = sources[i] - let currSourceKeys = keys(curSource) + const curSource = sources[i] + const currSourceKeys = keys(curSource) for (let j = 0; j < currSourceKeys.length; j++) { - let currKey = currSourceKeys[j] + const currKey = currSourceKeys[j] target[currKey] = curSource[currKey] } } @@ -338,10 +338,10 @@ export function assignNoOverwrite( ...sources: Object[] ): Object { for (let i = 0; i < sources.length; i++) { - let curSource = sources[i] - let currSourceKeys = keys(curSource) + const curSource = sources[i] + const currSourceKeys = keys(curSource) for (let j = 0; j < currSourceKeys.length; j++) { - let currKey = currSourceKeys[j] + const currKey = currSourceKeys[j] if (!has(target, currKey)) { target[currKey] = curSource[currKey] } @@ -351,18 +351,18 @@ export function assignNoOverwrite( } export function defaults(...sources: any[]): any { - return assignNoOverwrite.apply(null, [{}].concat(sources)) + return assignNoOverwrite({}, ...sources) } export function groupBy( arr: T[], groupKeyFunc: (item: T) => string ): { [groupKey: string]: T[] } { - let result: { [groupKey: string]: T[] } = {} + const result: { [groupKey: string]: T[] } = {} forEach(arr, (item) => { - let currGroupKey = groupKeyFunc(item) - let currGroupArr = result[currGroupKey] + const currGroupKey = groupKeyFunc(item) + const currGroupArr = result[currGroupKey] if (currGroupArr) { currGroupArr.push(item) @@ -379,11 +379,11 @@ export function groupBy( * Will overwrite existing properties with the same name */ export function merge(obj1: Object, obj2: Object): any { - let result = cloneObj(obj1) - let keys2 = keys(obj2) + const result = cloneObj(obj1) + const keys2 = keys(obj2) for (let i = 0; i < keys2.length; i++) { - let key = keys2[i] - let value = obj2[key] + const key = keys2[i] + const value = obj2[key] result[key] = value } diff --git a/packages/chevrotain/test/all.ts b/packages/chevrotain/test/all.ts index 981d0382a..0144a35f8 100644 --- a/packages/chevrotain/test/all.ts +++ b/packages/chevrotain/test/all.ts @@ -1,2 +1,2 @@ -let req = (require as any).context("./", true, /spec\.js$/) +const req = (require as any).context("./", true, /spec\.js$/) req.keys().forEach(req) diff --git a/packages/chevrotain/test/full_flow/backtracking/backtracking_parser_spec.ts b/packages/chevrotain/test/full_flow/backtracking/backtracking_parser_spec.ts index dc522cab8..b9c4d4fc0 100644 --- a/packages/chevrotain/test/full_flow/backtracking/backtracking_parser_spec.ts +++ b/packages/chevrotain/test/full_flow/backtracking/backtracking_parser_spec.ts @@ -18,7 +18,7 @@ describe("Simple backtracking example", () => { new BackTrackingParser() // TODO: modify example to use the Chevrotain Lexer to increase readability - let largeFqnTokenVector = [ + const largeFqnTokenVector = [ createRegularToken(IdentTok, "ns1"), createRegularToken(DotTok, "."), createRegularToken(IdentTok, "ns2"), @@ -48,7 +48,7 @@ describe("Simple backtracking example", () => { // largeFqnTokenVector,new DefaultTok(1,1), new NumberTok(1,1,"666"), createRegularToken(SemiColonTok, ";") it("can parse an element with Equals and a very long qualified name", () => { - let input: any = flatten([ + const input: any = flatten([ // element A:ns1.ns2.ns3.ns4.ns5.ns6.ns7.ns8.ns9.ns10.ns11.ns12 = 666; createRegularToken(ElementTok, "element"), createRegularToken(IdentTok, "A"), @@ -59,16 +59,16 @@ describe("Simple backtracking example", () => { createRegularToken(SemiColonTok, ";") ]) - let parser = new BackTrackingParser() + const parser = new BackTrackingParser() parser.input = input - let result = parser.statement() + const result = parser.statement() expect(parser.errors.length).to.equal(0) expect(result).to.equal(RET_TYPE.WITH_EQUALS) }) it("can parse an element with Default and a very long qualified name", () => { - let input: any = flatten([ + const input: any = flatten([ // element A:ns1.ns2.ns3.ns4.ns5.ns6.ns7.ns8.ns9.ns10.ns11.ns12 default 666; createRegularToken(ElementTok, "element"), createRegularToken(IdentTok, "A"), @@ -79,9 +79,9 @@ describe("Simple backtracking example", () => { createRegularToken(SemiColonTok, ";") ]) - let parser = new BackTrackingParser() + const parser = new BackTrackingParser() parser.input = input - let result = parser.statement() + const result = parser.statement() expect(parser.errors.length).to.equal(0) expect(result).to.equal(RET_TYPE.WITH_DEFAULT) diff --git a/packages/chevrotain/test/full_flow/ecma_quirks/ecma_quirks.ts b/packages/chevrotain/test/full_flow/ecma_quirks/ecma_quirks.ts index 1cace567e..571d10435 100644 --- a/packages/chevrotain/test/full_flow/ecma_quirks/ecma_quirks.ts +++ b/packages/chevrotain/test/full_flow/ecma_quirks/ecma_quirks.ts @@ -158,7 +158,7 @@ class EcmaScriptQuirksParser extends EmbeddedActionsParser { idx: number ): IToken { this.skipWhitespace() - let nextToken = this.consumeExpected(tokClass) + const nextToken = this.consumeExpected(tokClass) if (nextToken !== false) { return nextToken } else { @@ -169,7 +169,7 @@ class EcmaScriptQuirksParser extends EmbeddedActionsParser { startOffset: this.textIdx } const previousToken = this.LA(0) - let msg = this.errorMessageProvider.buildMismatchTokenMessage({ + const msg = this.errorMessageProvider.buildMismatchTokenMessage({ expected: tokClass, actual: errorToken, previous: previousToken, @@ -203,7 +203,7 @@ class EcmaScriptQuirksParser extends EmbeddedActionsParser { return function () { // save & restore lexer state as otherwise the text index will move ahead // and the parser will fail consuming the tokens we have looked ahead for. - let lexerState = this.exportLexerState() + const lexerState = this.exportLexerState() try { for (let i = 0; i < allTokenTypes.length; i++) { const nextToken = this.IS_NEXT_TOKEN(allTokenTypes[i]) @@ -240,7 +240,7 @@ class EcmaScriptQuirksParser extends EmbeddedActionsParser { return function () { // save & restore lexer state as otherwise the text index will move ahead // and the parser will fail consuming the tokens we have looked ahead for. - let lexerState = this.exportLexerState() + const lexerState = this.exportLexerState() try { for (let i = 0; i < allTokenTypesPerAlt.length; i++) { const currAltTypes = allTokenTypesPerAlt[i] @@ -268,7 +268,7 @@ const parser = new EcmaScriptQuirksParser() export function parse(text): any { parser.textInput = text - let value = parser.statement() + const value = parser.statement() return { value: value, diff --git a/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_parser.ts b/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_parser.ts index ae30db4e9..00633326d 100644 --- a/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_parser.ts +++ b/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_parser.ts @@ -91,7 +91,7 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { // DOCS: note how all the parsing rules in this example return a ParseTree, we require some output from the parser // to demonstrate the error recovery mechanisms. otherwise it is harder to prove we have indeed recovered. private parseDdl(): ParseTree { - let stmts = [] + const stmts = [] this.MANY(() => { this.OR([ @@ -117,12 +117,10 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { } private parseCreateStmt(): ParseTree { - let createKW, tableKW, qn, semiColon - - createKW = this.CONSUME1(CreateTok) - tableKW = this.CONSUME1(TableTok) - qn = this.SUBRULE(this.qualifiedName) - semiColon = this.CONSUME1(SemiColonTok) + const createKW = this.CONSUME1(CreateTok) + const tableKW = this.CONSUME1(TableTok) + const qn = this.SUBRULE(this.qualifiedName) + const semiColon = this.CONSUME1(SemiColonTok) return PT(createRegularToken(CREATE_STMT), [ PT(createKW), @@ -133,14 +131,12 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { } private parseInsertStmt(): ParseTree { - let insertKW, recordValue, intoKW, qn, semiColon - // parse - insertKW = this.CONSUME1(InsertTok) - recordValue = this.SUBRULE(this.recordValue) - intoKW = this.CONSUME1(IntoTok) - qn = this.SUBRULE(this.qualifiedName) - semiColon = this.CONSUME1(SemiColonTok) + const insertKW = this.CONSUME1(InsertTok) + const recordValue = this.SUBRULE(this.recordValue) + const intoKW = this.CONSUME1(IntoTok) + const qn = this.SUBRULE(this.qualifiedName) + const semiColon = this.CONSUME1(SemiColonTok) // tree rewrite return PT(createRegularToken(INSERT_STMT), [ @@ -153,14 +149,12 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { } private parseDeleteStmt(): ParseTree { - let deleteKW, recordValue, fromKW, qn, semiColon - // parse - deleteKW = this.CONSUME1(DeleteTok) - recordValue = this.SUBRULE(this.recordValue) - fromKW = this.CONSUME1(FromTok) - qn = this.SUBRULE(this.qualifiedName) - semiColon = this.CONSUME1(SemiColonTok) + const deleteKW = this.CONSUME1(DeleteTok) + const recordValue = this.SUBRULE(this.recordValue) + const fromKW = this.CONSUME1(FromTok) + const qn = this.SUBRULE(this.qualifiedName) + const semiColon = this.CONSUME1(SemiColonTok) // tree rewrite return PT(createRegularToken(DELETE_STMT), [ @@ -173,8 +167,8 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { } private parseQualifiedName(): ParseTree { - let dots = [] - let idents = [] + const dots = [] + const idents = [] // parse // DOCS: note how we use CONSUME1(IdentTok) here @@ -188,15 +182,15 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { }) // tree rewrite - let allIdentsPts = WRAP_IN_PT(idents) - let dotsPt = PT(createRegularToken(DOTS), WRAP_IN_PT(dots)) - let allPtChildren = allIdentsPts.concat([dotsPt]) + const allIdentsPts = WRAP_IN_PT(idents) + const dotsPt = PT(createRegularToken(DOTS), WRAP_IN_PT(dots)) + const allPtChildren = allIdentsPts.concat([dotsPt]) return PT(createRegularToken(QUALIFIED_NAME), allPtChildren) } private parseRecordValue(): ParseTree { - let values = [] - let commas = [] + const values = [] + const commas = [] // parse this.CONSUME1(LParenTok) @@ -207,8 +201,8 @@ export class DDLExampleRecoveryParser extends EmbeddedActionsParser { }) this.CONSUME1(RParenTok) // tree rewrite - let commasPt = PT(createRegularToken(COMMAS), WRAP_IN_PT(commas)) - let allPtChildren = values.concat([commasPt]) + const commasPt = PT(createRegularToken(COMMAS), WRAP_IN_PT(commas)) + const allPtChildren = values.concat([commasPt]) return PT(createRegularToken(QUALIFIED_NAME), allPtChildren) } @@ -237,7 +231,7 @@ function PT(token: IToken, children: ParseTree[] = []): ParseTree { } export function WRAP_IN_PT(toks: IToken[]): ParseTree[] { - let parseTrees = new Array(toks.length) + const parseTrees = new Array(toks.length) for (let i = 0; i < toks.length; i++) { parseTrees[i] = PT(toks[i]) } diff --git a/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_spec.ts b/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_spec.ts index 56895e657..cb53099e4 100644 --- a/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_spec.ts +++ b/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_spec.ts @@ -37,13 +37,13 @@ import { IToken } from "../../../../api" // for side effect if augmenting the Token classes. new DDLExampleRecoveryParser() describe("Error Recovery SQL DDL Example", () => { - let schemaFQN = [ + const schemaFQN = [ createRegularToken(IdentTok, "schema2"), createRegularToken(DotTok), createRegularToken(IdentTok, "Persons") ] /* tslint:disable:quotemark */ - let shahar32Record = [ + const shahar32Record = [ createRegularToken(LParenTok), createRegularToken(IntTok, "32"), createRegularToken(CommaTok), @@ -51,7 +51,7 @@ describe("Error Recovery SQL DDL Example", () => { createRegularToken(RParenTok) ] - let shahar31Record = [ + const shahar31Record = [ createRegularToken(LParenTok), createRegularToken(IntTok, "31"), createRegularToken(CommaTok), @@ -61,7 +61,7 @@ describe("Error Recovery SQL DDL Example", () => { /* tslint:enable:quotemark */ it("can parse a series of three statements successfully", () => { - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons createRegularToken(CreateTok), createRegularToken(TableTok), @@ -81,15 +81,15 @@ describe("Error Recovery SQL DDL Example", () => { createRegularToken(SemiColonTok) ]) - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input - let ptResult = parser.ddl() + const ptResult = parser.ddl() expect(parser.errors.length).to.equal(0) assertAllThreeStatementsPresentAndValid(ptResult) }) describe("Single Token insertion recovery mechanism", () => { - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons createRegularToken(CreateTok), createRegularToken(TableTok), @@ -109,24 +109,24 @@ describe("Error Recovery SQL DDL Example", () => { ]) it("can perform single token insertion for a missing semicolon", () => { - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() // one error encountered expect(parser.errors.length).to.equal(1) // yet the whole input has been parsed // and the output parseTree contains ALL three statements assertAllThreeStatementsPresentAndValid(ptResult) - let insertedSemiColon: IToken = ptResult.children[1].children[4].payload + const insertedSemiColon: IToken = ptResult.children[1].children[4].payload // the semicolon is present even though it did not exist in the input, magic! expect(tokenMatcher(insertedSemiColon, SemiColonTok)).to.be.true expect(insertedSemiColon.isInsertedInRecovery).to.equal(true) }) it("can disable single token insertion for a missing semicolon", () => { - let parser = new DDLExampleRecoveryParser(false) + const parser = new DDLExampleRecoveryParser(false) parser.input = input - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() expect(parser.errors.length).to.equal(1) expect(ptResult.payload.tokenType).to.equal(INVALID_DDL) expect(ptResult.children).to.have.length(0) @@ -134,7 +134,7 @@ describe("Error Recovery SQL DDL Example", () => { }) describe("Single Token deletion recovery mechanism", () => { - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons createRegularToken(CreateTok), createRegularToken(TableTok), @@ -156,9 +156,9 @@ describe("Error Recovery SQL DDL Example", () => { ]) it("can perform single token deletion for a redundant keyword", () => { - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input - let ptResult = parser.ddl() + const ptResult = parser.ddl() // one error encountered expect(parser.errors.length).to.equal(1) // yet the whole input has been parsed @@ -167,9 +167,9 @@ describe("Error Recovery SQL DDL Example", () => { }) it("can disable single token deletion for a redundant keyword", () => { - let parser = new DDLExampleRecoveryParser(false) + const parser = new DDLExampleRecoveryParser(false) parser.input = input - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() expect(parser.errors.length).to.equal(1) expect(ptResult.payload.tokenType).to.equal(INVALID_DDL) expect(ptResult.children).to.have.length(0) @@ -178,7 +178,7 @@ describe("Error Recovery SQL DDL Example", () => { describe("resync recovery mechanism", () => { it("can perform re-sync recovery and only 'lose' part of the input", () => { - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons createRegularToken(CreateTok), createRegularToken(TableTok), @@ -200,10 +200,10 @@ describe("Error Recovery SQL DDL Example", () => { createRegularToken(SemiColonTok) ]) - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() // one error encountered expect(parser.errors.length).to.equal(1) // yet the whole input has been parsed @@ -225,7 +225,7 @@ describe("Error Recovery SQL DDL Example", () => { ) }) // (32, "SHAHAR" ( <-- wrong parenthesis - let badShahar32Record = [ + const badShahar32Record = [ createRegularToken(LParenTok), createRegularToken(IntTok, "32"), createRegularToken(CommaTok), @@ -233,7 +233,7 @@ describe("Error Recovery SQL DDL Example", () => { createRegularToken(LParenTok) ] - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons createRegularToken(CreateTok), createRegularToken(TableTok), @@ -256,9 +256,9 @@ describe("Error Recovery SQL DDL Example", () => { ]) it("can perform re-sync recovery and only 'lose' part of the input even when re-syncing to two rules 'above'", () => { - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() // one error encountered expect(parser.errors.length).to.equal(1) // yet the whole input has been parsed @@ -281,9 +281,9 @@ describe("Error Recovery SQL DDL Example", () => { }) it("can disable re-sync recovery and only 'lose' part of the input even when re-syncing to two rules 'above'", () => { - let parser = new DDLExampleRecoveryParser(false) + const parser = new DDLExampleRecoveryParser(false) parser.input = input - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() // one error encountered expect(parser.errors.length).to.equal(1) // yet the whole input has been parsed @@ -311,7 +311,7 @@ describe("Error Recovery SQL DDL Example", () => { } it("will encounter an NotAllInputParsedException when some of the input vector has not been parsed", () => { - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons; TABLE <-- redundant "TABLE" token createRegularToken(CreateTok), createRegularToken(TableTok), @@ -319,7 +319,7 @@ describe("Error Recovery SQL DDL Example", () => { createRegularToken(SemiColonTok), createRegularToken(TableTok) ]) - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input parser.ddl() @@ -328,18 +328,18 @@ describe("Error Recovery SQL DDL Example", () => { }) it("can use the same parser instance to parse multiple inputs", () => { - let input1: any = flatten([ + const input1: any = flatten([ // CREATE TABLE schema2.Persons; createRegularToken(CreateTok), createRegularToken(TableTok), schemaFQN, createRegularToken(SemiColonTok) ]) - let parser = new DDLExampleRecoveryParser(input1) + const parser = new DDLExampleRecoveryParser(input1) parser.ddl() expect(parser.errors.length).to.equal(0) - let input2: any = flatten([ + const input2: any = flatten([ // DELETE (31, "SHAHAR") FROM schema2.Persons createRegularToken(DeleteTok), shahar31Record, @@ -350,7 +350,7 @@ describe("Error Recovery SQL DDL Example", () => { // the parser is being reset instead of creating a new instance for each new input parser.reset() parser.input = input2 - let ptResult: any = parser.ddl() + const ptResult: any = parser.ddl() expect(parser.errors.length).to.equal(0) // verify returned ParseTree expect(ptResult.payload.tokenType).to.equal(STATEMENTS) @@ -362,7 +362,7 @@ describe("Error Recovery SQL DDL Example", () => { }) it("can re-sync to the next iteration in a MANY rule", () => { - let input: any = flatten([ + const input: any = flatten([ // CREATE TABLE schema2.Persons createRegularToken(CreateTok), createRegularToken(TableTok), @@ -384,9 +384,9 @@ describe("Error Recovery SQL DDL Example", () => { createRegularToken(SemiColonTok) ]) - let parser = new DDLExampleRecoveryParser() + const parser = new DDLExampleRecoveryParser() parser.input = input - let ptResult = parser.ddl() + const ptResult = parser.ddl() expect(parser.errors.length).to.equal(1) assertAllThreeStatementsPresentAndValid(ptResult) }) diff --git a/packages/chevrotain/test/full_flow/error_recovery/switch_case/switchcase_recovery_parser.ts b/packages/chevrotain/test/full_flow/error_recovery/switch_case/switchcase_recovery_parser.ts index b4b24de32..e46ff05c5 100644 --- a/packages/chevrotain/test/full_flow/error_recovery/switch_case/switchcase_recovery_parser.ts +++ b/packages/chevrotain/test/full_flow/error_recovery/switch_case/switchcase_recovery_parser.ts @@ -97,7 +97,7 @@ export class SwitchCaseRecoveryParser extends EmbeddedActionsParser { // previous grammar rule invocations. this.invalidIdx = 1 - let retObj: RetType = {} + const retObj: RetType = {} this.CONSUME(SwitchTok) this.CONSUME(LParenTok) @@ -115,20 +115,18 @@ export class SwitchCaseRecoveryParser extends EmbeddedActionsParser { } private parseCaseStmt(): RetType { - let keyTok, valueTok, key, value - this.CONSUME(CaseTok) - keyTok = this.CONSUME(StringTok) + const keyTok = this.CONSUME(StringTok) this.CONSUME(ColonTok) this.CONSUME(ReturnTok) - valueTok = this.CONSUME(IntTok) + const valueTok = this.CONSUME(IntTok) this.OPTION6(() => { this.CONSUME(SemiColonTok) }) - key = keyTok.image - value = parseInt(valueTok.image, 10) - let caseKeyValue: RetType = {} + const key = keyTok.image + const value = parseInt(valueTok.image, 10) + const caseKeyValue: RetType = {} caseKeyValue[key] = value return caseKeyValue } @@ -139,7 +137,7 @@ export class SwitchCaseRecoveryParser extends EmbeddedActionsParser { private INVALID(): () => RetType { return () => { - let retObj: RetType = {} + const retObj: RetType = {} retObj["invalid" + this.invalidIdx++] = undefined return retObj } diff --git a/packages/chevrotain/test/full_flow/error_recovery/switch_case/swithcase_recovery_spec.ts b/packages/chevrotain/test/full_flow/error_recovery/switch_case/swithcase_recovery_spec.ts index 0c77a728a..862c915d4 100644 --- a/packages/chevrotain/test/full_flow/error_recovery/switch_case/swithcase_recovery_spec.ts +++ b/packages/chevrotain/test/full_flow/error_recovery/switch_case/swithcase_recovery_spec.ts @@ -27,7 +27,7 @@ describe("Error Recovery switch-case Example", () => { new SwitchCaseRecoveryParser([]) it("can parse a valid text successfully", () => { - let input = [ + const input = [ // switch (name) { createRegularToken(SwitchTok), createRegularToken(LParenTok), @@ -58,10 +58,10 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(RCurlyTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.switchStmt() + const parseResult = parser.switchStmt() expect(parser.errors.length).to.equal(0) expect(parseResult).to.deep.equal({ @@ -72,7 +72,7 @@ describe("Error Recovery switch-case Example", () => { }) it("can perform re-sync recovery to the next case stmt", () => { - let input = [ + const input = [ // switch (name) { createRegularToken(SwitchTok), createRegularToken(LParenTok), @@ -105,10 +105,10 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(RCurlyTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.switchStmt() + const parseResult = parser.switchStmt() expect(parseResult).to.deep.equal({ Terry: 2, @@ -125,7 +125,7 @@ describe("Error Recovery switch-case Example", () => { }) it("will detect an error if missing AT_LEAST_ONCE occurrence", () => { - let input = [ + const input = [ // switch (name) { } createRegularToken(SwitchTok), createRegularToken(LParenTok), @@ -135,17 +135,17 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(RCurlyTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.switchStmt() + const parseResult = parser.switchStmt() expect(parser.errors.length).to.equal(1) expect(parser.errors[0]).to.be.an.instanceof(EarlyExitException) expect(parseResult).to.deep.equal({}) }) it("can perform re-sync recovery to the next case stmt even if the unexpected tokens are between valid case stmts", () => { - let input = [ + const input = [ // switch (name) { createRegularToken(SwitchTok), createRegularToken(LParenTok), @@ -182,10 +182,10 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(RCurlyTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.switchStmt() + const parseResult = parser.switchStmt() expect(parser.errors.length).to.equal(1) expect(parseResult).to.deep.equal({ @@ -196,7 +196,7 @@ describe("Error Recovery switch-case Example", () => { }) it("can perform re-sync recovery to the right curly after the case statements repetition", () => { - let input = [ + const input = [ // switch (name) { createRegularToken(SwitchTok), createRegularToken(LParenTok), @@ -230,10 +230,10 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(RCurlyTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.switchStmt() + const parseResult = parser.switchStmt() expect(parser.errors.length).to.equal(1) expect(parseResult).to.deep.equal({ Terry: 2, @@ -249,7 +249,7 @@ describe("Error Recovery switch-case Example", () => { }) it("can perform single token deletion recovery", () => { - let input = [ + const input = [ // switch (name) { createRegularToken(SwitchTok), createRegularToken(LParenTok), @@ -281,10 +281,10 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(RCurlyTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.switchStmt() + const parseResult = parser.switchStmt() expect(parser.errors.length).to.equal(1) expect(parseResult).to.deep.equal({ Terry: 2, @@ -294,7 +294,7 @@ describe("Error Recovery switch-case Example", () => { }) it("will perform single token insertion for a missing colon", () => { - let input = [ + const input = [ // case "Terry" return 2 <-- missing the colon between "Terry" and return createRegularToken(CaseTok), createRegularToken(StringTok, "Terry"), @@ -303,17 +303,17 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(SemiColonTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.caseStmt() + const parseResult = parser.caseStmt() expect(parser.errors.length).to.equal(1) expect(parser.errors[0]).to.be.an.instanceof(MismatchedTokenException) expect(parseResult).to.deep.equal({ Terry: 2 }) }) it("will NOT perform single token insertion for a missing string", () => { - let input = [ + const input = [ // case : return 2 <-- missing the string for the case's value createRegularToken(CaseTok), /* new StringTok("Terry" , 0, 1, 1),*/ createRegularToken(ColonTok), @@ -322,10 +322,10 @@ describe("Error Recovery switch-case Example", () => { createRegularToken(SemiColonTok) ] - let parser = new SwitchCaseRecoveryParser() + const parser = new SwitchCaseRecoveryParser() parser.input = input - let parseResult = parser.caseStmt() + const parseResult = parser.caseStmt() expect(parser.errors.length).to.equal(1) expect(parser.errors[0]).to.be.an.instanceof(MismatchedTokenException) expect(parseResult).to.deep.equal({ invalid1: undefined }) diff --git a/packages/chevrotain/test/full_flow/parse_tree.ts b/packages/chevrotain/test/full_flow/parse_tree.ts index b31469215..24894c4da 100644 --- a/packages/chevrotain/test/full_flow/parse_tree.ts +++ b/packages/chevrotain/test/full_flow/parse_tree.ts @@ -29,7 +29,7 @@ export function PT( tokenOrTokenClass: TokenType | IToken, children: ParseTree[] = [] ): ParseTree { - let childrenCompact = compact(children) + const childrenCompact = compact(children) if ((tokenOrTokenClass).image !== undefined) { return new ParseTree(tokenOrTokenClass, childrenCompact) diff --git a/packages/chevrotain/test/parse/cst_spec.ts b/packages/chevrotain/test/parse/cst_spec.ts index df29e9d08..68817dee0 100644 --- a/packages/chevrotain/test/parse/cst_spec.ts +++ b/packages/chevrotain/test/parse/cst_spec.ts @@ -13,11 +13,11 @@ function createTokenVector(tokTypes: TokenType[]): any[] { function defineTestSuit(recoveryMode) { context(`CST Recovery: ${recoveryMode}`, () => { - let A = createToken({ name: "A" }) - let B = createToken({ name: "B" }) - let C = createToken({ name: "C" }) - let D = createToken({ name: "D" }) - let E = createToken({ name: "E" }) + const A = createToken({ name: "A" }) + const B = createToken({ name: "B" }) + const C = createToken({ name: "C" }) + const D = createToken({ name: "D" }) + const E = createToken({ name: "E" }) const ALL_TOKENS = [A, B, C, D, E] @@ -40,13 +40,13 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(C) ] - let parser = new CstTerminalParser(input) - let cst: any = parser.testRule() + const parser = new CstTerminalParser(input) + const cst: any = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B", "bamba") expect(tokenStructuredMatcher(cst.children.A[0], A)).to.be.true @@ -75,13 +75,13 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(C) ] - let parser = new CstTerminalParser2(input) - let cst: any = parser.testRule() + const parser = new CstTerminalParser2(input) + const cst: any = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("myLabel", "B", "myOtherLabel") expect(tokenStructuredMatcher(cst.children.myLabel[0], A)).to.be.true @@ -113,9 +113,9 @@ function defineTestSuit(recoveryMode) { }) } - let input = [createRegularToken(A), createRegularToken(B)] - let parser = new CstTerminalParserWithLabels(input) - let cst = parser.testRule() + const input = [createRegularToken(A), createRegularToken(B)] + const parser = new CstTerminalParserWithLabels(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("myLabel", "B", "myOtherLabel") @@ -158,9 +158,9 @@ function defineTestSuit(recoveryMode) { }) } - let input = [createRegularToken(A)] - let parser = new CstTerminalAlternationParser(input) - let cst = parser.testRule() + const input = [createRegularToken(A)] + const parser = new CstTerminalAlternationParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A") expect(tokenStructuredMatcher(cst.children.A[0], A)).to.be.true @@ -189,9 +189,9 @@ function defineTestSuit(recoveryMode) { }) } - let input = [createRegularToken(A), createRegularToken(B)] - let parser = new CstTerminalAlternationSingleAltParser(input) - let cst = parser.testRule() + const input = [createRegularToken(A), createRegularToken(B)] + const parser = new CstTerminalAlternationSingleAltParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B") expect(tokenStructuredMatcher(cst.children.A[0], A)).to.be.true @@ -215,13 +215,13 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(A) ] - let parser = new CstMultiTerminalParser(input) - let cst = parser.testRule() + const parser = new CstMultiTerminalParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B") expect(cst.children.A).to.have.length(2) @@ -253,7 +253,7 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(C), createRegularToken(A), @@ -262,8 +262,8 @@ function defineTestSuit(recoveryMode) { createRegularToken(C), createRegularToken(B) ] - let parser = new CstMultiTerminalWithManyParser(input) - let cst = parser.testRule() + const parser = new CstMultiTerminalWithManyParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B", "bamba") expect(cst.children.A).to.have.length(3) @@ -303,13 +303,13 @@ function defineTestSuit(recoveryMode) { } it("path taken", () => { - let input = [ + const input = [ createRegularToken(A), createRegularToken(C), createRegularToken(B) ] - let parser = new CstOptionalTerminalParser(input) - let cst = parser.ruleWithOptional() + const parser = new CstOptionalTerminalParser(input) + const cst = parser.ruleWithOptional() expect(cst.name).to.equal("ruleWithOptional") expect(cst.children).to.have.keys("A", "B", "bamba") expect(tokenStructuredMatcher(cst.children.A[0], A)).to.be.true @@ -321,9 +321,9 @@ function defineTestSuit(recoveryMode) { }) it("path NOT taken", () => { - let input = [createRegularToken(B)] - let parser = new CstOptionalTerminalParser(input) - let cst = parser.ruleWithOptional() + const input = [createRegularToken(B)] + const parser = new CstOptionalTerminalParser(input) + const cst = parser.ruleWithOptional() expect(cst.name).to.equal("ruleWithOptional") expect(cst.children).to.have.keys("B") expect(cst.children.A).to.be.undefined @@ -350,14 +350,14 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(A), createRegularToken(A), createRegularToken(B) ] - let parser = new CstMultiTerminalWithAtLeastOneParser(input) - let cst = parser.testRule() + const parser = new CstMultiTerminalWithAtLeastOneParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B") expect(cst.children.A).to.have.length(3) @@ -388,14 +388,14 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(C), createRegularToken(A), createRegularToken(B) ] - let parser = new CstMultiTerminalWithManySepParser(input) - let cst = parser.testRule() + const parser = new CstMultiTerminalWithManySepParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B", "C") expect(cst.children.A).to.have.length(2) @@ -428,14 +428,14 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A), createRegularToken(C), createRegularToken(A), createRegularToken(B) ] - let parser = new CstMultiTerminalWithAtLeastOneSepParser(input) - let cst = parser.testRule() + const parser = new CstMultiTerminalWithAtLeastOneSepParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("A", "B", "C") expect(cst.children.A).to.have.length(2) @@ -562,14 +562,14 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A, "1", 1, NaN, NaN, 2), createRegularToken(B, "2", 12, NaN, NaN, 13), createRegularToken(C, "3", 15, NaN, NaN, 16), createRegularToken(D, "4", 17, NaN, NaN, 18) ] - let parser = new CstTerminalParser(input) - let cst = parser.testRule() + const parser = new CstTerminalParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("B", "C", "first", "empty", "second") expect(tokenStructuredMatcher(cst.children.B[0], B)).to.be.true @@ -628,14 +628,14 @@ function defineTestSuit(recoveryMode) { }) } - let input = [ + const input = [ createRegularToken(A, "", 1, 1, 1, 2, 1, 2), createRegularToken(B, "", 12, 1, 3, 13, 1, 4), createRegularToken(C, "", 15, 2, 10, 16, 3, 15), createRegularToken(D, "", 17, 5, 2, 18, 5, 4) ] - let parser = new CstTerminalParser(input) - let cst = parser.testRule() + const parser = new CstTerminalParser(input) + const cst = parser.testRule() expect(cst.name).to.equal("testRule") expect(cst.children).to.have.keys("B", "C", "first", "second") expect(tokenStructuredMatcher(cst.children.B[0], B)).to.be.true @@ -698,9 +698,9 @@ function defineTestSuit(recoveryMode) { } } - let input = createTokenVector([A, E, E, C, D]) - let parser = new CstRecoveryParserReSync(input) - let cst = parser.root() + const input = createTokenVector([A, E, E, C, D]) + const parser = new CstRecoveryParserReSync(input) + const cst = parser.root() expect(parser.errors).to.have.lengthOf(1) expect(parser.errors[0].message).to.include( "Expecting token of type --> B <--" @@ -713,17 +713,17 @@ function defineTestSuit(recoveryMode) { expect(cst.name).to.equal("root") expect(cst.children).to.have.keys("first", "second") - let firstCollection = cst.children.first + const firstCollection = cst.children.first expect(firstCollection).to.have.lengthOf(1) - let first = firstCollection[0] as CstNode + const first = firstCollection[0] as CstNode expect(first.recoveredNode).to.be.true expect(first.children).to.have.keys("A") expect(tokenStructuredMatcher(first.children.A[0], A)).to.be.true expect(first.children.B).to.be.undefined - let secondCollection = cst.children.second + const secondCollection = cst.children.second expect(secondCollection).to.have.lengthOf(1) - let second = secondCollection[0] as CstNode + const second = secondCollection[0] as CstNode expect(second.recoveredNode).to.be.undefined expect(second.children).to.have.keys("C", "D") expect(tokenStructuredMatcher(second.children.C[0], C)).to.be.true @@ -777,9 +777,9 @@ function defineTestSuit(recoveryMode) { } } - let input = createTokenVector([A, E, E, C, D]) - let parser = new CstRecoveryParserReSyncNested(input) - let cst = parser.root() + const input = createTokenVector([A, E, E, C, D]) + const parser = new CstRecoveryParserReSyncNested(input) + const cst = parser.root() expect(parser.errors).to.have.lengthOf(1) expect(parser.errors[0].message).to.include( "Expecting token of type --> B <--" @@ -791,20 +791,20 @@ function defineTestSuit(recoveryMode) { expect(cst.name).to.equal("root") expect(cst.children).to.have.keys("first_root", "second") - let firstRootCollection = cst.children.first_root + const firstRootCollection = cst.children.first_root expect(firstRootCollection).to.have.lengthOf(1) - let firstRoot = firstRootCollection[0] as CstNode + const firstRoot = firstRootCollection[0] as CstNode expect(firstRoot.children).to.have.keys("first") - let first = firstRoot.children.first[0] as CstNode + const first = firstRoot.children.first[0] as CstNode expect(first.recoveredNode).to.be.true expect(first.children).to.have.keys("A") expect(tokenStructuredMatcher(first.children.A[0], A)).to.be.true expect(first.children.B).to.be.undefined - let secondCollection = cst.children.second + const secondCollection = cst.children.second expect(secondCollection).to.have.lengthOf(1) - let second = secondCollection[0] as CstNode + const second = secondCollection[0] as CstNode expect(second.recoveredNode).to.be.undefined expect(second.children).to.have.keys("C", "D") expect(tokenStructuredMatcher(second.children.C[0], C)).to.be.true diff --git a/packages/chevrotain/test/parse/cst_visitor_spec.ts b/packages/chevrotain/test/parse/cst_visitor_spec.ts index 8b311da45..f07a73f56 100644 --- a/packages/chevrotain/test/parse/cst_visitor_spec.ts +++ b/packages/chevrotain/test/parse/cst_visitor_spec.ts @@ -5,9 +5,9 @@ import { keys } from "../../src/utils/utils" import { IToken } from "../../api" describe("The CSTVisitor", () => { - let A = createToken({ name: "A" }) - let B = createToken({ name: "B" }) - let C = createToken({ name: "C" }) + const A = createToken({ name: "A" }) + const B = createToken({ name: "B" }) + const C = createToken({ name: "C" }) const ALL_TOKENS = [A, B, C] @@ -63,15 +63,15 @@ describe("The CSTVisitor", () => { } } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(C) ] parserInstance.input = input - let cst = parserInstance.testRule() + const cst = parserInstance.testRule() - let visitor = new CstVisitorValidator() + const visitor = new CstVisitorValidator() expect(visitor.visit(cst)).to.equal(666) }) @@ -95,15 +95,15 @@ describe("The CSTVisitor", () => { } } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(C) ] parserInstance.input = input - let cst = parserInstance.testRule() + const cst = parserInstance.testRule() - let visitor = new CstVisitorValidator() + const visitor = new CstVisitorValidator() expect(visitor.visit(cst, 1)).to.equal(667) }) @@ -121,15 +121,15 @@ describe("The CSTVisitor", () => { } } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(C) ] parserInstance.input = input - let cst = parserInstance.testRule() + const cst = parserInstance.testRule() - let visitor = new CstVisitorValidator() + const visitor = new CstVisitorValidator() visitor.visit(cst) expect(visited).to.be.true }) @@ -152,15 +152,15 @@ describe("The CSTVisitor", () => { } } - let input = [ + const input = [ createRegularToken(A), createRegularToken(B), createRegularToken(C) ] parserInstance.input = input - let cst = parserInstance.testRule() + const cst = parserInstance.testRule() - let visitor = new CstVisitorValidator() + const visitor = new CstVisitorValidator() expect(visitor.visit([cst], 1)).to.equal(667) expect(visitor.visit([], 1)).to.be.undefined }) diff --git a/packages/chevrotain/test/parse/exceptions_spec.ts b/packages/chevrotain/test/parse/exceptions_spec.ts index 26f05e1bb..ea93e70a8 100644 --- a/packages/chevrotain/test/parse/exceptions_spec.ts +++ b/packages/chevrotain/test/parse/exceptions_spec.ts @@ -9,11 +9,20 @@ import { describe("Chevrotain's Parsing Exceptions", () => { describe("the exception instance subclasses Error with the right properties for: ", () => { - let currentToken = createTokenInstance(EOF, "cur", -1, -1, -1, -1, -1, -1) - let previousToken = createTokenInstance(EOF, "prv", -1, -1, -1, -1, -1, -1) + const currentToken = createTokenInstance(EOF, "cur", -1, -1, -1, -1, -1, -1) + const previousToken = createTokenInstance( + EOF, + "prv", + -1, + -1, + -1, + -1, + -1, + -1 + ) it("EarlyExitException", () => { - let exceptionInstance = new EarlyExitException( + const exceptionInstance = new EarlyExitException( "error message", currentToken, previousToken @@ -28,7 +37,7 @@ describe("Chevrotain's Parsing Exceptions", () => { }) it("NoViableAltException", () => { - let exceptionInstance = new NoViableAltException( + const exceptionInstance = new NoViableAltException( "error message", currentToken, previousToken @@ -43,7 +52,7 @@ describe("Chevrotain's Parsing Exceptions", () => { }) it("NotAllInputParsedException", () => { - let exceptionInstance = new NotAllInputParsedException( + const exceptionInstance = new NotAllInputParsedException( "error message", currentToken ) @@ -56,7 +65,7 @@ describe("Chevrotain's Parsing Exceptions", () => { }) it("MismatchedTokenException", () => { - let exceptionInstance = new MismatchedTokenException( + const exceptionInstance = new MismatchedTokenException( "error message", currentToken, previousToken @@ -71,7 +80,7 @@ describe("Chevrotain's Parsing Exceptions", () => { }) describe("the exception instance stacktrace is valid for: ", () => { - let dummyToken = createTokenInstance(EOF, "cur", -1, -1, -1, -1, -1, -1) + const dummyToken = createTokenInstance(EOF, "cur", -1, -1, -1, -1, -1, -1) function throwAndCatchException(errorFactory: () => Error) { try { @@ -82,37 +91,37 @@ describe("Chevrotain's Parsing Exceptions", () => { } it("EarlyExitException", () => { - let exceptionInstance = throwAndCatchException( + const exceptionInstance = throwAndCatchException( () => new EarlyExitException("", dummyToken, dummyToken) ) - let stacktrace = ErrorStackParser.parse(exceptionInstance) + const stacktrace = ErrorStackParser.parse(exceptionInstance) expect(stacktrace[0].functionName).to.be.undefined // lambda function expect(stacktrace[1].functionName).to.equal("throwAndCatchException") }) it("NoViableAltException", () => { - let exceptionInstance = throwAndCatchException( + const exceptionInstance = throwAndCatchException( () => new NoViableAltException("", dummyToken, dummyToken) ) - let stacktrace = ErrorStackParser.parse(exceptionInstance) + const stacktrace = ErrorStackParser.parse(exceptionInstance) expect(stacktrace[0].functionName).to.be.undefined // lambda function expect(stacktrace[1].functionName).to.equal("throwAndCatchException") }) it("NotAllInputParsedException", () => { - let exceptionInstance = throwAndCatchException( + const exceptionInstance = throwAndCatchException( () => new NotAllInputParsedException("", dummyToken) ) - let stacktrace = ErrorStackParser.parse(exceptionInstance) + const stacktrace = ErrorStackParser.parse(exceptionInstance) expect(stacktrace[0].functionName).to.be.undefined // lambda function expect(stacktrace[1].functionName).to.equal("throwAndCatchException") }) it("MismatchedTokenException", () => { - let exceptionInstance = throwAndCatchException( + const exceptionInstance = throwAndCatchException( () => new MismatchedTokenException("", dummyToken, dummyToken) ) - let stacktrace = ErrorStackParser.parse(exceptionInstance) + const stacktrace = ErrorStackParser.parse(exceptionInstance) expect(stacktrace[0].functionName).to.be.undefined // lambda function expect(stacktrace[1].functionName).to.equal("throwAndCatchException") }) diff --git a/packages/chevrotain/test/parse/grammar/checks_spec.ts b/packages/chevrotain/test/parse/grammar/checks_spec.ts index 2b99ae257..00010882e 100644 --- a/packages/chevrotain/test/parse/grammar/checks_spec.ts +++ b/packages/chevrotain/test/parse/grammar/checks_spec.ts @@ -34,7 +34,7 @@ import { IToken } from "../../../api" describe("the grammar validations", () => { it("validates every one of the TOP_RULEs in the input", () => { - let expectedErrorsNoMsg = [ + const expectedErrorsNoMsg = [ { type: ParserDefinitionErrorType.DUPLICATE_PRODUCTIONS, ruleName: "qualifiedNameErr1", @@ -64,7 +64,7 @@ describe("the grammar validations", () => { } ] - let qualifiedNameErr1 = new Rule({ + const qualifiedNameErr1 = new Rule({ name: "qualifiedNameErr1", definition: [ new Terminal({ terminalType: IdentTok, idx: 1 }), @@ -80,7 +80,7 @@ describe("the grammar validations", () => { ] }) - let qualifiedNameErr2 = new Rule({ + const qualifiedNameErr2 = new Rule({ name: "qualifiedNameErr2", definition: [ new Terminal({ terminalType: IdentTok, idx: 1 }), @@ -104,7 +104,7 @@ describe("the grammar validations", () => { }) ] }) - let actualErrors = validateGrammar( + const actualErrors = validateGrammar( [qualifiedNameErr1, qualifiedNameErr2], 5, [], @@ -119,7 +119,7 @@ describe("the grammar validations", () => { }) it("does not allow duplicate grammar rule names", () => { - let noErrors = validateRuleDoesNotAlreadyExist( + const noErrors = validateRuleDoesNotAlreadyExist( new Rule({ name: "A", definition: [] }), [ new Rule({ name: "B", definition: [] }), @@ -131,7 +131,7 @@ describe("the grammar validations", () => { //noinspection BadExpressionStatementJS expect(noErrors).to.be.empty - let duplicateErr = validateRuleDoesNotAlreadyExist( + const duplicateErr = validateRuleDoesNotAlreadyExist( new Rule({ name: "A", definition: [] }), [ new Rule({ name: "A", definition: [] }), @@ -153,7 +153,11 @@ describe("the grammar validations", () => { }) it("does not allow overriding a rule which does not already exist", () => { - let positive = validateRuleIsOverridden("AAA", ["BBB", "CCC"], "className") + const positive = validateRuleIsOverridden( + "AAA", + ["BBB", "CCC"], + "className" + ) expect(positive).to.have.lengthOf(1) expect(positive[0].message).to.contain("Invalid rule override") expect(positive[0].type).to.equal( @@ -161,7 +165,7 @@ describe("the grammar validations", () => { ) expect(positive[0].ruleName).to.equal("AAA") - let negative = validateRuleIsOverridden( + const negative = validateRuleIsOverridden( "AAA", ["BBB", "CCC", "AAA"], "className" @@ -172,42 +176,42 @@ describe("the grammar validations", () => { describe("identifyProductionForDuplicates function", () => { it("generates DSL code for a ProdRef", () => { - let dslCode = identifyProductionForDuplicates( + const dslCode = identifyProductionForDuplicates( new NonTerminal({ nonTerminalName: "ActionDeclaration" }) ) expect(dslCode).to.equal("SUBRULE_#_1_#_ActionDeclaration") }) it("generates DSL code for a OPTION", () => { - let dslCode = identifyProductionForDuplicates( + const dslCode = identifyProductionForDuplicates( new Option({ definition: [], idx: 3 }) ) expect(dslCode).to.equal("OPTION_#_3_#_") }) it("generates DSL code for a AT_LEAST_ONE", () => { - let dslCode = identifyProductionForDuplicates( + const dslCode = identifyProductionForDuplicates( new RepetitionMandatory({ definition: [] }) ) expect(dslCode).to.equal("AT_LEAST_ONE_#_1_#_") }) it("generates DSL code for a MANY", () => { - let dslCode = identifyProductionForDuplicates( + const dslCode = identifyProductionForDuplicates( new Repetition({ definition: [], idx: 5 }) ) expect(dslCode).to.equal("MANY_#_5_#_") }) it("generates DSL code for a OR", () => { - let dslCode = identifyProductionForDuplicates( + const dslCode = identifyProductionForDuplicates( new Alternation({ definition: [], idx: 1 }) ) expect(dslCode).to.equal("OR_#_1_#_") }) it("generates DSL code for a Terminal", () => { - let dslCode = identifyProductionForDuplicates( + const dslCode = identifyProductionForDuplicates( new Terminal({ terminalType: IdentTok, idx: 4 }) ) expect(dslCode).to.equal("CONSUME_#_4_#_IdentTok") @@ -216,13 +220,13 @@ describe("identifyProductionForDuplicates function", () => { describe("OccurrenceValidationCollector GASTVisitor class", () => { it("collects all the productions relevant to occurrence validation", () => { - let qualifiedNameVisitor = new OccurrenceValidationCollector() + const qualifiedNameVisitor = new OccurrenceValidationCollector() qualifiedName.accept(qualifiedNameVisitor) expect(qualifiedNameVisitor.allProductions.length).to.equal(4) // TODO: check set equality - let actionDecVisitor = new OccurrenceValidationCollector() + const actionDecVisitor = new OccurrenceValidationCollector() actionDec.accept(actionDecVisitor) expect(actionDecVisitor.allProductions.length).to.equal(13) @@ -233,15 +237,15 @@ describe("OccurrenceValidationCollector GASTVisitor class", () => { class DummyToken { static PATTERN = /NA/ } -let dummyRule = new Rule({ +const dummyRule = new Rule({ name: "dummyRule", definition: [new Terminal({ terminalType: DummyToken })] }) -let dummyRule2 = new Rule({ +const dummyRule2 = new Rule({ name: "dummyRule2", definition: [new Terminal({ terminalType: DummyToken })] }) -let dummyRule3 = new Rule({ +const dummyRule3 = new Rule({ name: "dummyRule3", definition: [new Terminal({ terminalType: DummyToken })] }) @@ -252,7 +256,7 @@ describe("the getFirstNoneTerminal function", () => { }) it("can find the firstNoneTerminal of a sequence with only one item", () => { - let result = getFirstNoneTerminal([ + const result = getFirstNoneTerminal([ new NonTerminal({ nonTerminalName: "dummyRule", referencedRule: dummyRule @@ -263,7 +267,7 @@ describe("the getFirstNoneTerminal function", () => { }) it("can find the firstNoneTerminal of a sequence with two items", () => { - let sqeuence = [ + const sqeuence = [ new NonTerminal({ nonTerminalName: "dummyRule", referencedRule: dummyRule @@ -273,13 +277,13 @@ describe("the getFirstNoneTerminal function", () => { referencedRule: dummyRule2 }) ] - let result = getFirstNoneTerminal(sqeuence) + const result = getFirstNoneTerminal(sqeuence) expect(result).to.have.length(1) expect(first(result).name).to.equal("dummyRule") }) it("can find the firstNoneTerminal of a sequence with two items where the first is optional", () => { - let sqeuence = [ + const sqeuence = [ new Option({ definition: [ new NonTerminal({ @@ -293,14 +297,14 @@ describe("the getFirstNoneTerminal function", () => { referencedRule: dummyRule2 }) ] - let result = getFirstNoneTerminal(sqeuence) + const result = getFirstNoneTerminal(sqeuence) expect(result).to.have.length(2) - let resultRuleNames = map(result, (currItem) => currItem.name) + const resultRuleNames = map(result, (currItem) => currItem.name) expect(resultRuleNames).to.include.members(["dummyRule", "dummyRule2"]) }) it("can find the firstNoneTerminal of an alternation", () => { - let alternation = [ + const alternation = [ new Alternation({ definition: [ new Alternative({ @@ -330,9 +334,9 @@ describe("the getFirstNoneTerminal function", () => { ] }) ] - let result = getFirstNoneTerminal(alternation) + const result = getFirstNoneTerminal(alternation) expect(result).to.have.length(3) - let resultRuleNames = map(result, (currItem) => currItem.name) + const resultRuleNames = map(result, (currItem) => currItem.name) expect(resultRuleNames).to.include.members([ "dummyRule", "dummyRule2", @@ -341,7 +345,7 @@ describe("the getFirstNoneTerminal function", () => { }) it("can find the firstNoneTerminal of an optional repetition", () => { - let alternation = [ + const alternation = [ new Repetition({ definition: [ new Alternative({ @@ -367,14 +371,14 @@ describe("the getFirstNoneTerminal function", () => { referencedRule: dummyRule3 }) ] - let result = getFirstNoneTerminal(alternation) + const result = getFirstNoneTerminal(alternation) expect(result).to.have.length(2) - let resultRuleNames = map(result, (currItem) => currItem.name) + const resultRuleNames = map(result, (currItem) => currItem.name) expect(resultRuleNames).to.include.members(["dummyRule", "dummyRule3"]) }) it("can find the firstNoneTerminal of a mandatory repetition", () => { - let alternation = [ + const alternation = [ new RepetitionMandatory({ definition: [ new Alternative({ @@ -400,9 +404,9 @@ describe("the getFirstNoneTerminal function", () => { referencedRule: dummyRule3 }) ] - let result = getFirstNoneTerminal(alternation) + const result = getFirstNoneTerminal(alternation) expect(result).to.have.length(1) - let resultRuleNames = map(result, (currItem) => currItem.name) + const resultRuleNames = map(result, (currItem) => currItem.name) expect(resultRuleNames).to.include.members(["dummyRule"]) }) }) @@ -432,8 +436,8 @@ class ErroneousOccurrenceNumUsageParser2 extends EmbeddedActionsParser { }) } -let myToken = createToken({ name: "myToken" }) -let myOtherToken = createToken({ name: "myOtherToken" }) +const myToken = createToken({ name: "myToken" }) +const myOtherToken = createToken({ name: "myOtherToken" }) class ValidOccurrenceNumUsageParser extends EmbeddedActionsParser { constructor(input: IToken[] = []) { @@ -513,7 +517,7 @@ describe("The duplicate occurrence validations full flow", () => { it("won't detect issues in a Parser using Tokens created by extendToken(...) utility (anonymous)", () => { //noinspection JSUnusedLocalSymbols - let parser = new ValidOccurrenceNumUsageParser() + const parser = new ValidOccurrenceNumUsageParser() }) }) @@ -1228,9 +1232,9 @@ describe("The empty alternative detection full flow", () => { describe("The prefix ambiguity detection full flow", () => { it("will throw an error when an a common prefix ambiguity is detected - categories", () => { - let A = createToken({ name: "A" }) - let B = createToken({ name: "B", categories: A }) - let C = createToken({ name: "C" }) + const A = createToken({ name: "A" }) + const B = createToken({ name: "B", categories: A }) + const C = createToken({ name: "C" }) class PrefixAltAmbiguity extends EmbeddedActionsParser { constructor(input: IToken[] = []) { diff --git a/packages/chevrotain/test/parse/grammar/first_spec.ts b/packages/chevrotain/test/parse/grammar/first_spec.ts index d8f4192b0..de1ea341d 100644 --- a/packages/chevrotain/test/parse/grammar/first_spec.ts +++ b/packages/chevrotain/test/parse/grammar/first_spec.ts @@ -17,26 +17,26 @@ import { describe("The Grammar Ast first model", () => { it("can compute the first for a terminal", () => { - let terminal = new Terminal({ terminalType: EntityTok }) - let actual = first(terminal) + const terminal = new Terminal({ terminalType: EntityTok }) + const actual = first(terminal) expect(actual.length).to.equal(1) expect(actual[0]).to.equal(EntityTok) - let terminal2 = new Terminal({ terminalType: CommaTok }) - let actual2 = first(terminal2) + const terminal2 = new Terminal({ terminalType: CommaTok }) + const actual2 = first(terminal2) expect(actual2.length).to.equal(1) expect(actual2[0]).to.equal(CommaTok) }) it("can compute the first for a Sequence production ", () => { - let seqProduction = new Alternative({ + const seqProduction = new Alternative({ definition: [new Terminal({ terminalType: EntityTok })] }) - let actual = first(seqProduction) + const actual = first(seqProduction) expect(actual.length).to.equal(1) expect(actual[0]).to.equal(EntityTok) - let seqProduction2 = new Alternative({ + const seqProduction2 = new Alternative({ definition: [ new Terminal({ terminalType: EntityTok }), new Option({ @@ -44,13 +44,13 @@ describe("The Grammar Ast first model", () => { }) ] }) - let actual2 = first(seqProduction2) + const actual2 = first(seqProduction2) expect(actual2.length).to.equal(1) expect(actual2[0]).to.equal(EntityTok) }) it("can compute the first for an alternatives production ", () => { - let altProduction = new Alternation({ + const altProduction = new Alternation({ definition: [ new Alternative({ definition: [new Terminal({ terminalType: EntityTok })] @@ -63,7 +63,7 @@ describe("The Grammar Ast first model", () => { }) ] }) - let actual = first(altProduction) + const actual = first(altProduction) expect(actual.length).to.equal(3) expect(actual[0]).to.equal(EntityTok) expect(actual[1]).to.equal(NamespaceTok) @@ -71,7 +71,7 @@ describe("The Grammar Ast first model", () => { }) it("can compute the first for an production with optional prefix", () => { - let withOptionalPrefix = new Alternative({ + const withOptionalPrefix = new Alternative({ definition: [ new Option({ definition: [new Terminal({ terminalType: NamespaceTok })] @@ -79,10 +79,10 @@ describe("The Grammar Ast first model", () => { new Terminal({ terminalType: EntityTok }) ] }) - let actual = first(withOptionalPrefix) + const actual = first(withOptionalPrefix) setEquality(actual, [NamespaceTok, EntityTok]) - let withTwoOptPrefix = new Alternative({ + const withTwoOptPrefix = new Alternative({ definition: [ new Option({ definition: [new Terminal({ terminalType: NamespaceTok })] @@ -96,7 +96,7 @@ describe("The Grammar Ast first model", () => { }) ] }) - let actual2 = first(withTwoOptPrefix) + const actual2 = first(withTwoOptPrefix) setEquality(actual2, [NamespaceTok, ColonTok, EntityTok]) }) }) diff --git a/packages/chevrotain/test/parse/grammar/follow_spec.ts b/packages/chevrotain/test/parse/grammar/follow_spec.ts index 808374692..0a390e2ae 100644 --- a/packages/chevrotain/test/parse/grammar/follow_spec.ts +++ b/packages/chevrotain/test/parse/grammar/follow_spec.ts @@ -18,27 +18,27 @@ import { keys } from "../../../src/utils/utils" describe("The Grammar Ast Follows model", () => { it("can build a followNamePrefix from a Terminal", () => { - let terminal = new Terminal({ terminalType: IdentTok }) - let actual = buildInProdFollowPrefix(terminal) + const terminal = new Terminal({ terminalType: IdentTok }) + const actual = buildInProdFollowPrefix(terminal) expect(actual).to.equal("IdentTok1_~IN~_") - let terminal2 = new Terminal({ terminalType: EntityTok }) + const terminal2 = new Terminal({ terminalType: EntityTok }) terminal2.idx = 3 - let actual2 = buildInProdFollowPrefix(terminal2) + const actual2 = buildInProdFollowPrefix(terminal2) expect(actual2).to.equal("EntityTok3_~IN~_") }) it("can build a followName prefix from a TopLevel Production and index", () => { - let prod = new Rule({ name: "bamba", definition: [] }) - let index = 5 + const prod = new Rule({ name: "bamba", definition: [] }) + const index = 5 - let actual = buildBetweenProdsFollowPrefix(prod, index) + const actual = buildBetweenProdsFollowPrefix(prod, index) expect(actual).to.equal("bamba5_~IN~_") }) it("can compute the follows for Top level production ref in ActionDec", () => { - let actual = new ResyncFollowsWalker(actionDec).startWalking() - let actualFollowNames = keys(actual) + const actual = new ResyncFollowsWalker(actionDec).startWalking() + const actualFollowNames = keys(actual) expect(actualFollowNames.length).to.equal(3) expect(actual["paramSpec1_~IN~_actionDec"].length).to.equal(2) setEquality(actual["paramSpec1_~IN~_actionDec"], [CommaTok, RParenTok]) @@ -49,7 +49,7 @@ describe("The Grammar Ast Follows model", () => { }) it("can compute all follows for a set of top level productions", () => { - let actual = computeAllProdsFollows([actionDec]) + const actual = computeAllProdsFollows([actionDec]) expect(keys(actual).length).to.equal(3) }) }) diff --git a/packages/chevrotain/test/parse/grammar/gast_spec.ts b/packages/chevrotain/test/parse/grammar/gast_spec.ts index 24b2cbd75..ffd35a5ef 100644 --- a/packages/chevrotain/test/parse/grammar/gast_spec.ts +++ b/packages/chevrotain/test/parse/grammar/gast_spec.ts @@ -18,7 +18,7 @@ import { describe("GAst namespace", () => { describe("the ProdRef class", () => { it("will always return a valid empty definition, even if it's ref is unresolved", () => { - let prodRef = new NonTerminal({ + const prodRef = new NonTerminal({ nonTerminalName: "SomeGrammarRuleName" }) expect(prodRef.definition).to.be.an.instanceof(Array) @@ -42,34 +42,34 @@ describe("GAst namespace", () => { } it("Terminal", () => { - let gastInstance = new Terminal({ terminalType: Comma }) + const gastInstance = new Terminal({ terminalType: Comma }) expect(getProductionDslName(gastInstance)).to.equal("CONSUME") }) it("NonTerminal", () => { - let gastInstance = new NonTerminal({ + const gastInstance = new NonTerminal({ nonTerminalName: "bamba" }) expect(getProductionDslName(gastInstance)).to.equal("SUBRULE") }) it("Option", () => { - let gastInstance = new Option({ definition: [] }) + const gastInstance = new Option({ definition: [] }) expect(getProductionDslName(gastInstance)).to.equal("OPTION") }) it("Alternation", () => { - let gastInstance = new Alternation({ definition: [] }) + const gastInstance = new Alternation({ definition: [] }) expect(getProductionDslName(gastInstance)).to.equal("OR") }) it("RepetitionMandatory", () => { - let gastInstance = new RepetitionMandatory({ definition: [] }) + const gastInstance = new RepetitionMandatory({ definition: [] }) expect(getProductionDslName(gastInstance)).to.equal("AT_LEAST_ONE") }) it("RepetitionMandatoryWithSeparator", () => { - let gastInstance = new RepetitionMandatoryWithSeparator({ + const gastInstance = new RepetitionMandatoryWithSeparator({ definition: [], separator: Comma }) @@ -77,7 +77,7 @@ describe("GAst namespace", () => { }) it("RepetitionWithSeparator", () => { - let gastInstance = new RepetitionWithSeparator({ + const gastInstance = new RepetitionWithSeparator({ definition: [], separator: Comma }) @@ -85,28 +85,28 @@ describe("GAst namespace", () => { }) it("Repetition", () => { - let gastInstance = new Repetition({ definition: [] }) + const gastInstance = new Repetition({ definition: [] }) expect(getProductionDslName(gastInstance)).to.equal("MANY") }) }) describe("the GAst serialization capabilities", () => { - let A = createToken({ name: "A" }) + const A = createToken({ name: "A" }) A.LABEL = "bamba" - let B = createToken({ name: "B", pattern: /[a-zA-Z]\w*/ }) - let C = createToken({ name: "C" }) - let D = createToken({ name: "D" }) - let Comma = createToken({ name: "Comma" }) - let WithLiteral = createToken({ + const B = createToken({ name: "B", pattern: /[a-zA-Z]\w*/ }) + const C = createToken({ name: "C" }) + const D = createToken({ name: "D" }) + const Comma = createToken({ name: "Comma" }) + const WithLiteral = createToken({ name: "WithLiteral", pattern: "bamba" }) it("can serialize a NonTerminal", () => { - let input = new NonTerminal({ + const input = new NonTerminal({ nonTerminalName: "qualifiedName" }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "NonTerminal", name: "qualifiedName", @@ -115,13 +115,13 @@ describe("GAst namespace", () => { }) it("can serialize a Alternative", () => { - let input = new Alternative({ + const input = new Alternative({ definition: [ new Terminal({ terminalType: WithLiteral }), new NonTerminal({ nonTerminalName: "bamba" }) ] }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Alternative", definition: [ @@ -142,13 +142,13 @@ describe("GAst namespace", () => { }) it("can serialize a Option", () => { - let input = new Option({ + const input = new Option({ definition: [ new Terminal({ terminalType: C }), new NonTerminal({ nonTerminalName: "bamba" }) ] }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Option", idx: 1, @@ -169,13 +169,13 @@ describe("GAst namespace", () => { }) it("can serialize a RepetitionMandatory", () => { - let input = new RepetitionMandatory({ + const input = new RepetitionMandatory({ definition: [ new Terminal({ terminalType: C }), new NonTerminal({ nonTerminalName: "bamba" }) ] }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "RepetitionMandatory", idx: 1, @@ -196,14 +196,14 @@ describe("GAst namespace", () => { }) it("can serialize a RepetitionMandatoryWithSeparator", () => { - let input = new RepetitionMandatoryWithSeparator({ + const input = new RepetitionMandatoryWithSeparator({ definition: [ new Terminal({ terminalType: C }), new NonTerminal({ nonTerminalName: "bamba" }) ], separator: Comma }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "RepetitionMandatoryWithSeparator", idx: 1, @@ -230,13 +230,13 @@ describe("GAst namespace", () => { }) it("can serialize a Repetition", () => { - let input = new Repetition({ + const input = new Repetition({ definition: [ new Terminal({ terminalType: C }), new NonTerminal({ nonTerminalName: "bamba" }) ] }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Repetition", idx: 1, @@ -257,14 +257,14 @@ describe("GAst namespace", () => { }) it("can serialize a RepetitionWithSeparator", () => { - let input = new RepetitionWithSeparator({ + const input = new RepetitionWithSeparator({ definition: [ new Terminal({ terminalType: C }), new NonTerminal({ nonTerminalName: "bamba" }) ], separator: Comma }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "RepetitionWithSeparator", idx: 1, @@ -291,7 +291,7 @@ describe("GAst namespace", () => { }) it("can serialize a Alternation", () => { - let input = new Alternation({ + const input = new Alternation({ definition: [ new Alternative({ definition: [new Terminal({ terminalType: A })] @@ -305,7 +305,7 @@ describe("GAst namespace", () => { ] }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Alternation", idx: 1, @@ -349,8 +349,8 @@ describe("GAst namespace", () => { }) it("can serialize a Terminal with a custom label", () => { - let input = new Terminal({ terminalType: A }) - let actual = serializeProduction(input) + const input = new Terminal({ terminalType: A }) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Terminal", name: "A", @@ -360,8 +360,8 @@ describe("GAst namespace", () => { }) it("can serialize a Terminal with a pattern", () => { - let input = new Terminal({ terminalType: B }) - let actual = serializeProduction(input) + const input = new Terminal({ terminalType: B }) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Terminal", name: "B", @@ -372,7 +372,7 @@ describe("GAst namespace", () => { }) it("can serialize a Rule", () => { - let input = new Rule({ + const input = new Rule({ name: "myRule", orgText: "", definition: [ @@ -380,7 +380,7 @@ describe("GAst namespace", () => { new NonTerminal({ nonTerminalName: "bamba" }) ] }) - let actual = serializeProduction(input) + const actual = serializeProduction(input) expect(actual).to.deep.equal({ type: "Rule", name: "myRule", @@ -402,7 +402,7 @@ describe("GAst namespace", () => { }) it("can serialize an array of Rules", () => { - let input = [ + const input = [ new Rule({ name: "myRule", orgText: "", @@ -420,7 +420,7 @@ describe("GAst namespace", () => { ] }) ] - let actual = serializeGrammar(input) + const actual = serializeGrammar(input) expect(actual).to.deep.equal([ { type: "Rule", diff --git a/packages/chevrotain/test/parse/grammar/interperter_spec.ts b/packages/chevrotain/test/parse/grammar/interperter_spec.ts index d145a089e..b074609c1 100644 --- a/packages/chevrotain/test/parse/grammar/interperter_spec.ts +++ b/packages/chevrotain/test/parse/grammar/interperter_spec.ts @@ -52,14 +52,14 @@ import { IToken, ITokenGrammarPath, TokenType } from "../../../api" describe("The Grammar Interpeter namespace", () => { describe("The NextAfterTokenWalker", () => { it("can compute the next possible token types From ActionDec in scope of ActionDec #1", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: ActionTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -68,14 +68,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From ActionDec in scope of ActionDec #2", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: IdentTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -84,14 +84,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From ActionDec in scope of ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: LParenTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -100,14 +100,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From ActionDec in scope of ActionDec #4", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: CommaTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -116,14 +116,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From ActionDec in scope of ActionDec #5", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: RParenTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -132,14 +132,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From ActionDec in scope of ActionDec #6", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: ColonTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -148,14 +148,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From ActionDec in scope of ActionDec #7", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec"], occurrenceStack: [1], lastTok: SemicolonTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -163,14 +163,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the first paramSpec INSIDE ActionDec #1", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 1], lastTok: IdentTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -179,14 +179,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the first paramSpec INSIDE ActionDec #2", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 1], lastTok: ColonTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -195,14 +195,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the first paramSpec INSIDE ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 1], lastTok: LSquareTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -211,14 +211,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the first paramSpec INSIDE ActionDec #4", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 1], lastTok: RSquareTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -227,14 +227,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the second paramSpec INSIDE ActionDec #1", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 2], lastTok: IdentTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -243,14 +243,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the second paramSpec INSIDE ActionDec #2", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 2], lastTok: ColonTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -259,14 +259,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the second paramSpec INSIDE ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 2], lastTok: LSquareTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -275,14 +275,14 @@ describe("The Grammar Interpeter namespace", () => { }) it("can compute the next possible token types From the second paramSpec INSIDE ActionDec #4", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec"], occurrenceStack: [1, 2], lastTok: RSquareTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -294,14 +294,14 @@ describe("The Grammar Interpeter namespace", () => { "can compute the next possible token types From a fqn inside an actionParamSpec" + " inside an paramSpec INSIDE ActionDec #1", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec", "qualifiedName"], occurrenceStack: [1, 1, 1], lastTok: IdentTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -319,14 +319,14 @@ describe("The Grammar Interpeter namespace", () => { "can compute the next possible token types From a fqn inside an actionParamSpec" + " inside an paramSpec INSIDE ActionDec #2", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec", "qualifiedName"], occurrenceStack: [1, 1, 1], lastTok: DotTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -339,14 +339,14 @@ describe("The Grammar Interpeter namespace", () => { "can compute the next possible token types From a fqn inside an actionParamSpec" + " inside an paramSpec INSIDE ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["actionDec", "paramSpec", "qualifiedName"], occurrenceStack: [1, 1, 1], lastTok: IdentTok, lastTokOccurrence: 2 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( actionDec, caPath ).startWalking() @@ -364,14 +364,14 @@ describe("The Grammar Interpeter namespace", () => { "can compute the next possible token types From a fqn inside an actionParamSpec" + " inside an paramSpec INSIDE ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["paramSpec", "qualifiedName"], occurrenceStack: [1, 1], lastTok: IdentTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( paramSpec, caPath ).startWalking() @@ -384,14 +384,14 @@ describe("The Grammar Interpeter namespace", () => { "can compute the next possible token types From a fqn inside an actionParamSpec" + " inside an paramSpec INSIDE ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["paramSpec", "qualifiedName"], occurrenceStack: [1, 1], lastTok: DotTok, lastTokOccurrence: 1 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( paramSpec, caPath ).startWalking() @@ -404,14 +404,14 @@ describe("The Grammar Interpeter namespace", () => { "can compute the next possible token types From a fqn inside an actionParamSpec" + " inside an paramSpec INSIDE ActionDec #3", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["paramSpec", "qualifiedName"], occurrenceStack: [1, 1], lastTok: IdentTok, lastTokOccurrence: 2 } - let possibleNextTokTypes = new NextAfterTokenWalker( + const possibleNextTokTypes = new NextAfterTokenWalker( paramSpec, caPath ).startWalking() @@ -421,14 +421,14 @@ describe("The Grammar Interpeter namespace", () => { ) it("will fail if we try to compute the next token starting from a rule that does not match the path", () => { - let caPath: ITokenGrammarPath = { + const caPath: ITokenGrammarPath = { ruleStack: ["I_WILL_FAIL_THE_WALKER", "qualifiedName"], occurrenceStack: [1, 1], lastTok: IdentTok, lastTokOccurrence: 2 } - let walker = new NextAfterTokenWalker(paramSpec, caPath) + const walker = new NextAfterTokenWalker(paramSpec, caPath) expect(() => walker.startWalking()).to.throw( "The path does not start with the walker's top Rule!" ) @@ -438,7 +438,7 @@ describe("The Grammar Interpeter namespace", () => { describe("The NextTerminalAfterManyWalker", () => { it("can compute the next possible token types after the MANY in QualifiedName", () => { - let rule = new Rule({ + const rule = new Rule({ name: "TwoRepetitionRule", definition: [ new Repetition({ @@ -466,7 +466,7 @@ describe("The NextTerminalAfterManyWalker", () => { ] }) - let result = new NextTerminalAfterManyWalker(rule, 1).startWalking() + const result = new NextTerminalAfterManyWalker(rule, 1).startWalking() //noinspection BadExpressionStatementJS expect(result.occurrence).to.be.undefined //noinspection BadExpressionStatementJS @@ -474,7 +474,7 @@ describe("The NextTerminalAfterManyWalker", () => { }) it("can compute the next possible token types after the MANY in paramSpec inside ActionDec", () => { - let result = new NextTerminalAfterManyWalker(actionDec, 1).startWalking() + const result = new NextTerminalAfterManyWalker(actionDec, 1).startWalking() expect(result.occurrence).to.equal(1) expect(result.token).to.equal(RParenTok) }) @@ -482,7 +482,7 @@ describe("The NextTerminalAfterManyWalker", () => { describe("The NextTerminalAfterManySepWalker", () => { it("can compute the next possible token types after the MANY_SEP in QualifiedName", () => { - let result = new NextTerminalAfterManySepWalker( + const result = new NextTerminalAfterManySepWalker( callArguments, 1 ).startWalking() @@ -493,7 +493,7 @@ describe("The NextTerminalAfterManySepWalker", () => { }) it("can compute the next possible token types after the MANY in paramSpec inside ActionDec", () => { - let result = new NextTerminalAfterManySepWalker( + const result = new NextTerminalAfterManySepWalker( actionDecSep, 1 ).startWalking() @@ -504,21 +504,21 @@ describe("The NextTerminalAfterManySepWalker", () => { describe("The NextTerminalAfterAtLeastOneWalker", () => { it("can compute the next possible token types after an AT_LEAST_ONE production", () => { - let result = new NextTerminalAfterAtLeastOneWalker( + const result = new NextTerminalAfterAtLeastOneWalker( atLeastOneRule, 1 ).startWalking() expect(result.occurrence).to.equal(2) expect(result.token).to.equal(DotTok) - let result2 = new NextTerminalAfterAtLeastOneWalker( + const result2 = new NextTerminalAfterAtLeastOneWalker( atLeastOneRule, 2 ).startWalking() expect(result2.occurrence).to.equal(1) expect(result2.token).to.equal(DotTok) - let result3 = new NextTerminalAfterAtLeastOneWalker( + const result3 = new NextTerminalAfterAtLeastOneWalker( atLeastOneRule, 3 ).startWalking() @@ -527,7 +527,7 @@ describe("The NextTerminalAfterAtLeastOneWalker", () => { }) it("can compute the next possible token types after an AT_LEAST_ONE production - EMPTY", () => { - let atLeastOneRule = new Rule({ + const atLeastOneRule = new Rule({ name: "atLeastOneRule", definition: [ new RepetitionMandatory({ @@ -541,7 +541,7 @@ describe("The NextTerminalAfterAtLeastOneWalker", () => { ] }) - let result = new NextTerminalAfterAtLeastOneWalker( + const result = new NextTerminalAfterAtLeastOneWalker( atLeastOneRule, 1 ).startWalking() @@ -552,21 +552,21 @@ describe("The NextTerminalAfterAtLeastOneWalker", () => { describe("The NextTerminalAfterAtLeastOneSepWalker", () => { it("can compute the next possible token types after an AT_LEAST_ONE_SEP production", () => { - let result = new NextTerminalAfterAtLeastOneSepWalker( + const result = new NextTerminalAfterAtLeastOneSepWalker( atLeastOneSepRule, 1 ).startWalking() expect(result.occurrence).to.equal(2) expect(result.token).to.equal(DotTok) - let result2 = new NextTerminalAfterAtLeastOneSepWalker( + const result2 = new NextTerminalAfterAtLeastOneSepWalker( atLeastOneSepRule, 2 ).startWalking() expect(result2.occurrence).to.equal(1) expect(result2.token).to.equal(DotTok) - let result3 = new NextTerminalAfterAtLeastOneSepWalker( + const result3 = new NextTerminalAfterAtLeastOneSepWalker( atLeastOneSepRule, 3 ).startWalking() @@ -575,7 +575,7 @@ describe("The NextTerminalAfterAtLeastOneSepWalker", () => { }) it("can compute the next possible token types after an AT_LEAST_ONE_SEP production EMPTY", () => { - let result = new NextTerminalAfterAtLeastOneSepWalker( + const result = new NextTerminalAfterAtLeastOneSepWalker( qualifiedNameSep, 1 ).startWalking() @@ -611,7 +611,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { context("can calculate the next possible paths in a", () => { it("Sequence", () => { - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new Terminal({ terminalType: Beta }), new Terminal({ terminalType: Gamma }) @@ -632,7 +632,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Optional", () => { - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new Option({ definition: [new Terminal({ terminalType: Beta })] @@ -658,7 +658,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Alternation", () => { - let alts = [ + const alts = [ new Alternation({ definition: [ new Alternative({ @@ -704,7 +704,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition", () => { - let rep = [ + const rep = [ new Repetition({ definition: [ new Terminal({ terminalType: Alpha }), @@ -735,7 +735,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Mandatory Repetition", () => { - let repMand = [ + const repMand = [ new RepetitionMandatory({ definition: [ new Terminal({ terminalType: Alpha }), @@ -764,7 +764,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { it("Repetition with Separator", () => { // same as Mandatory Repetition because currently possiblePaths only cares about // the first repetition. - let rep = [ + const rep = [ new RepetitionWithSeparator({ definition: [ new Terminal({ terminalType: Alpha }), @@ -797,7 +797,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { it("Mandatory Repetition with Separator", () => { // same as Mandatory Repetition because currently possiblePaths only cares about // the first repetition. - let repMandSep = [ + const repMandSep = [ new RepetitionMandatoryWithSeparator({ definition: [ new Terminal({ terminalType: Alpha }), @@ -829,12 +829,12 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("NonTerminal", () => { - let someSubRule = new Rule({ + const someSubRule = new Rule({ name: "blah", definition: [new Terminal({ terminalType: Beta })] }) - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new NonTerminal({ nonTerminalName: "blah", @@ -868,7 +868,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { } it("Sequence positive", () => { - let seq = [ + const seq = [ new Alternative({ definition: [ new Terminal({ terminalType: Alpha }), @@ -909,7 +909,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Sequence negative", () => { - let seq = [ + const seq = [ new Alternative({ definition: [ new Terminal({ terminalType: Alpha }), @@ -942,7 +942,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Optional positive", () => { - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new Option({ definition: [new Terminal({ terminalType: Beta })] @@ -966,7 +966,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Optional Negative", () => { - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new Option({ definition: [new Terminal({ terminalType: Beta })] @@ -996,7 +996,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Alternation positive", () => { - let alts = [ + const alts = [ new Alternation({ definition: [ new Alternative({ @@ -1053,7 +1053,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Alternation Negative", () => { - let alts = [ + const alts = [ new Alternation({ definition: [ new Alternative({ @@ -1101,7 +1101,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition - positive", () => { - let rep = [ + const rep = [ new Repetition({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1164,7 +1164,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition - negative", () => { - let rep = [ + const rep = [ new Repetition({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1204,7 +1204,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Mandatory Repetition - positive", () => { - let repMand = [ + const repMand = [ new RepetitionMandatory({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1267,7 +1267,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Mandatory Repetition - negative", () => { - let repMand = [ + const repMand = [ new RepetitionMandatory({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1312,7 +1312,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition with Separator - positive", () => { - let repSep = [ + const repSep = [ new RepetitionWithSeparator({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1376,7 +1376,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition with Separator - negative", () => { - let repMand = [ + const repMand = [ new RepetitionWithSeparator({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1422,7 +1422,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition with Separator Mandatory - positive", () => { - let repSep = [ + const repSep = [ new RepetitionMandatoryWithSeparator({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1486,7 +1486,7 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("Repetition with Separator Mandatory - negative", () => { - let repMand = [ + const repMand = [ new RepetitionMandatoryWithSeparator({ definition: [ new Terminal({ terminalType: Alpha }), @@ -1540,12 +1540,12 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("NonTerminal - positive", () => { - let someSubRule = new Rule({ + const someSubRule = new Rule({ name: "blah", definition: [new Terminal({ terminalType: Beta })] }) - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new NonTerminal({ nonTerminalName: "blah", @@ -1585,12 +1585,12 @@ describe("The chevrotain grammar interpreter capabilities", () => { }) it("NonTerminal - negative", () => { - let someSubRule = new Rule({ + const someSubRule = new Rule({ name: "blah", definition: [new Terminal({ terminalType: Beta })] }) - let seq = [ + const seq = [ new Terminal({ terminalType: Alpha }), new NonTerminal({ nonTerminalName: "blah", diff --git a/packages/chevrotain/test/parse/grammar/lookahead_spec.ts b/packages/chevrotain/test/parse/grammar/lookahead_spec.ts index 67514df81..27a7d7e10 100644 --- a/packages/chevrotain/test/parse/grammar/lookahead_spec.ts +++ b/packages/chevrotain/test/parse/grammar/lookahead_spec.ts @@ -49,7 +49,7 @@ const AsteriskTok = createToken({ name: "AsteriskTok" }) const EntityTok = createToken({ name: "EntityTok" }) const KeyTok = createToken({ name: "KeyTok" }) -let atLeastOneRule = new Rule({ +const atLeastOneRule = new Rule({ name: "atLeastOneRule", definition: [ new RepetitionMandatory({ @@ -71,7 +71,7 @@ let atLeastOneRule = new Rule({ ] }) -let atLeastOneSepRule = new Rule({ +const atLeastOneSepRule = new Rule({ name: "atLeastOneSepRule", definition: [ new RepetitionMandatoryWithSeparator({ @@ -96,7 +96,7 @@ let atLeastOneSepRule = new Rule({ ] }) -let qualifiedName = new Rule({ +const qualifiedName = new Rule({ name: "qualifiedName", definition: [ new Terminal({ terminalType: IdentTok }), @@ -109,7 +109,7 @@ let qualifiedName = new Rule({ ] }) -let qualifiedNameSep = new Rule({ +const qualifiedNameSep = new Rule({ name: "qualifiedNameSep", definition: [ new RepetitionMandatoryWithSeparator({ @@ -119,7 +119,7 @@ let qualifiedNameSep = new Rule({ ] }) -let paramSpec = new Rule({ +const paramSpec = new Rule({ name: "paramSpec", definition: [ new Terminal({ terminalType: IdentTok }), @@ -137,7 +137,7 @@ let paramSpec = new Rule({ ] }) -let actionDec = new Rule({ +const actionDec = new Rule({ name: "actionDec", definition: [ new Terminal({ terminalType: ActionTok }), @@ -176,7 +176,7 @@ let actionDec = new Rule({ ] }) -let actionDecSep = new Rule({ +const actionDecSep = new Rule({ name: "actionDecSep", definition: [ new Terminal({ terminalType: ActionTok }), @@ -209,7 +209,7 @@ let actionDecSep = new Rule({ ] }) -let manyActions = new Rule({ +const manyActions = new Rule({ name: "manyActions", definition: [ new Repetition({ @@ -224,7 +224,7 @@ let manyActions = new Rule({ ] }) -let cardinality = new Rule({ +const cardinality = new Rule({ name: "cardinality", definition: [ new Terminal({ terminalType: LSquareTok }), @@ -249,7 +249,7 @@ let cardinality = new Rule({ ] }) -let assignedTypeSpec = new Rule({ +const assignedTypeSpec = new Rule({ name: "assignedTypeSpec", definition: [ new Terminal({ terminalType: ColonTok }), @@ -269,7 +269,7 @@ let assignedTypeSpec = new Rule({ ] }) -let lotsOfOrs = new Rule({ +const lotsOfOrs = new Rule({ name: "lotsOfOrs", definition: [ new Alternation({ @@ -325,7 +325,7 @@ let lotsOfOrs = new Rule({ ] }) -let emptyAltOr = new Rule({ +const emptyAltOr = new Rule({ name: "emptyAltOr", definition: [ new Alternation({ @@ -352,7 +352,7 @@ let emptyAltOr = new Rule({ ] }) -let callArguments = new Rule({ +const callArguments = new Rule({ name: "callArguments", definition: [ new RepetitionWithSeparator({ @@ -460,10 +460,10 @@ context("lookahead specs", () => { describe("The Grammar Lookahead namespace", () => { it("can compute the lookahead function for the first OPTION in ActionDec", () => { - let colonMock = new ColonParserMock() - let indentMock = new IdentParserMock() + const colonMock = new ColonParserMock() + const indentMock = new IdentParserMock() - let laFunc = buildLookaheadFuncForOptionalProd( + const laFunc = buildLookaheadFuncForOptionalProd( 1, actionDec, 1, @@ -477,10 +477,10 @@ context("lookahead specs", () => { }) it("can compute the lookahead function for the second OPTION in ActionDec", () => { - let colonParserMock = new ColonParserMock() - let identParserMock = new IdentParserMock() + const colonParserMock = new ColonParserMock() + const identParserMock = new IdentParserMock() - let laFunc = buildLookaheadFuncForOptionalProd( + const laFunc = buildLookaheadFuncForOptionalProd( 2, actionDec, 1, @@ -497,7 +497,7 @@ context("lookahead specs", () => { const B = createToken({ name: "B" }) const C = createToken({ name: "C", categories: [B] }) - let optionRule = new Rule({ + const optionRule = new Rule({ name: "optionRule", definition: [ new Option({ @@ -511,7 +511,7 @@ context("lookahead specs", () => { ] }) - let laFunc = buildLookaheadFuncForOptionalProd( + const laFunc = buildLookaheadFuncForOptionalProd( 1, optionRule, 1, @@ -531,10 +531,10 @@ context("lookahead specs", () => { }) it("can compute the lookahead function for the first MANY in ActionDec", () => { - let identParserMock = new IdentParserMock() - let commaParserMock = new CommaParserMock() + const identParserMock = new IdentParserMock() + const commaParserMock = new CommaParserMock() - let laFunc = buildLookaheadFuncForOptionalProd( + const laFunc = buildLookaheadFuncForOptionalProd( 1, actionDec, 1, @@ -548,12 +548,12 @@ context("lookahead specs", () => { }) it("can compute the lookahead function for lots of ORs sample", () => { - let keyParserMock = new KeyParserMock() - let entityParserMock = new EntityParserMock() - let colonParserMock = new ColonParserMock() - let commaParserMock = new CommaParserMock() + const keyParserMock = new KeyParserMock() + const entityParserMock = new EntityParserMock() + const colonParserMock = new ColonParserMock() + const commaParserMock = new CommaParserMock() - let laFunc = buildLookaheadFuncForOr( + const laFunc = buildLookaheadFuncForOr( 1, lotsOfOrs, 1, @@ -573,7 +573,7 @@ context("lookahead specs", () => { const B = createToken({ name: "B" }) const C = createToken({ name: "C", categories: [B] }) - let orRule = new Rule({ + const orRule = new Rule({ name: "orRule", definition: [ new Alternation({ @@ -619,11 +619,11 @@ context("lookahead specs", () => { }) it("can compute the lookahead function for EMPTY OR sample", () => { - let commaParserMock = new CommaParserMock() - let keyParserMock = new KeyParserMock() - let entityParserMock = new EntityParserMock() + const commaParserMock = new CommaParserMock() + const keyParserMock = new KeyParserMock() + const entityParserMock = new EntityParserMock() - let laFunc = buildLookaheadFuncForOr( + const laFunc = buildLookaheadFuncForOr( 1, emptyAltOr, 1, @@ -658,7 +658,7 @@ context("lookahead specs", () => { context("computing lookahead sequences for", () => { it("two simple one token alternatives", () => { - let alt1 = new Alternation({ + const alt1 = new Alternation({ definition: [ new Alternative({ definition: [new Terminal({ terminalType: Alpha })] @@ -671,14 +671,14 @@ context("lookahead specs", () => { }) ] }) - let alt2 = new Terminal({ terminalType: Gamma }) + const alt2 = new Terminal({ terminalType: Gamma }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) expect(actual).to.deep.equal([[[Alpha], [Beta]], [[Gamma]]]) }) it("three simple one token alternatives", () => { - let alt1 = new Alternation({ + const alt1 = new Alternation({ definition: [ new Alternative({ definition: [new Terminal({ terminalType: Alpha })] @@ -691,20 +691,20 @@ context("lookahead specs", () => { }) ] }) - let alt2 = new Terminal({ terminalType: Gamma }) - let alt3 = new Alternative({ + const alt2 = new Terminal({ terminalType: Gamma }) + const alt3 = new Alternative({ definition: [ new Terminal({ terminalType: Delta }), new Terminal({ terminalType: Charlie }) ] }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2, alt3], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2, alt3], 5) expect(actual).to.deep.equal([[[Alpha], [Beta]], [[Gamma]], [[Delta]]]) }) it("two complex multi token alternatives", () => { - let alt1 = new Alternation({ + const alt1 = new Alternation({ definition: [ new Alternative({ definition: [ @@ -724,7 +724,7 @@ context("lookahead specs", () => { }) ] }) - let alt2 = new Alternation({ + const alt2 = new Alternation({ definition: [ new Alternative({ definition: [ @@ -738,7 +738,7 @@ context("lookahead specs", () => { ] }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) expect(actual).to.deep.equal([ [[Beta], [Alpha, Beta], [Alpha, Gamma]], [[Charlie], [Alpha, Delta]] @@ -746,7 +746,7 @@ context("lookahead specs", () => { }) it("three complex multi token alternatives", () => { - let alt1 = new Alternation({ + const alt1 = new Alternation({ definition: [ new Alternative({ definition: [ @@ -760,7 +760,7 @@ context("lookahead specs", () => { }) ] }) - let alt2 = new Alternation({ + const alt2 = new Alternation({ definition: [ new Alternative({ definition: [ @@ -779,7 +779,7 @@ context("lookahead specs", () => { }) ] }) - let alt3 = new Alternation({ + const alt3 = new Alternation({ definition: [ new Alternative({ definition: [ @@ -797,7 +797,7 @@ context("lookahead specs", () => { ] }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2, alt3], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2, alt3], 5) expect(actual).to.deep.equal([ [[Beta], [Alpha, Beta, Gamma]], [[Charlie], [Gamma], [Alpha, Delta]], @@ -809,7 +809,7 @@ context("lookahead specs", () => { }) it("two complex multi token alternatives with shared prefix", () => { - let alt1 = new Alternative({ + const alt1 = new Alternative({ definition: [ new Terminal({ terminalType: Alpha }), new Terminal({ terminalType: Beta }), @@ -818,7 +818,7 @@ context("lookahead specs", () => { ] }) - let alt2 = new Alternative({ + const alt2 = new Alternative({ definition: [ new Terminal({ terminalType: Alpha }), new Terminal({ terminalType: Beta }), @@ -829,7 +829,7 @@ context("lookahead specs", () => { ] }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) expect(actual).to.deep.equal([ [[Alpha, Beta, Charlie, Delta]], [[Alpha, Beta, Charlie, Delta, Gamma]] @@ -837,19 +837,19 @@ context("lookahead specs", () => { }) it("simple ambiguous alternatives", () => { - let alt1 = new Alternative({ + const alt1 = new Alternative({ definition: [new Terminal({ terminalType: Alpha })] }) - let alt2 = new Alternative({ + const alt2 = new Alternative({ definition: [new Terminal({ terminalType: Alpha })] }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) expect(actual).to.deep.equal([[[Alpha]], [[Alpha]]]) }) it("complex(multi-token) ambiguous alternatives", () => { - let alt1 = new Alternative({ + const alt1 = new Alternative({ definition: [ new Terminal({ terminalType: Alpha }), new Terminal({ terminalType: Beta }), @@ -857,7 +857,7 @@ context("lookahead specs", () => { ] }) - let alt2 = new Alternative({ + const alt2 = new Alternative({ definition: [ new Terminal({ terminalType: Alpha }), new Terminal({ terminalType: Beta }), @@ -865,7 +865,7 @@ context("lookahead specs", () => { ] }) - let actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) + const actual = lookAheadSequenceFromAlternatives([alt1, alt2], 5) expect(actual).to.deep.equal([ [[Alpha, Beta, Charlie]], [[Alpha, Beta, Charlie]] @@ -893,12 +893,12 @@ context("lookahead specs", () => { } it("inheritance Alternative alternatives - positive", () => { - let alternatives = [ + const alternatives = [ [[ExtendsAlphaAlpha]], // 0 [[ExtendsAlpha]], // 1 [[Alpha]] // 2 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -911,12 +911,12 @@ context("lookahead specs", () => { }) it("simple alternatives - positive", () => { - let alternatives = [ + const alternatives = [ [[Alpha], [Beta]], // 0 [[Delta], [Gamma]], // 1 [[Charlie]] // 2 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -931,11 +931,11 @@ context("lookahead specs", () => { }) it("simple alternatives - negative", () => { - let alternatives = [ + const alternatives = [ [[Alpha], [Beta]], // 0 [[Delta], [Gamma]] // 1 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -947,7 +947,7 @@ context("lookahead specs", () => { }) it("complex alternatives - positive", () => { - let alternatives = [ + const alternatives = [ [ [Alpha, Beta, Gamma], [Alpha, Beta, Delta] @@ -955,7 +955,7 @@ context("lookahead specs", () => { [[Alpha, Beta, Beta]], // 1 [[Alpha, Beta]] // 2 - Prefix of '1' alternative ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -972,7 +972,7 @@ context("lookahead specs", () => { }) it("complex alternatives - negative", () => { - let alternatives = [ + const alternatives = [ [ [Alpha, Beta, Gamma], [Alpha, Beta, Delta] @@ -980,7 +980,7 @@ context("lookahead specs", () => { [[Alpha, Beta, Beta]], // 1 [[Alpha, Beta], [Gamma]] // 2 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -996,12 +996,12 @@ context("lookahead specs", () => { }) it("complex alternatives with inheritance - positive", () => { - let alternatives = [ + const alternatives = [ [[ExtendsAlpha, Beta]], // 0 [[Alpha, Beta]] // 1 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -1018,12 +1018,12 @@ context("lookahead specs", () => { }) it("complex alternatives with inheritance - negative", () => { - let alternatives = [ + const alternatives = [ [[ExtendsAlpha, Beta]], // 0 [[Alpha, Gamma]] // 1 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -1036,11 +1036,11 @@ context("lookahead specs", () => { }) it("Empty alternatives", () => { - let alternatives = [ + const alternatives = [ [[Alpha]], // 0 [[]] // 1 ] - let laFunc = buildAlternativesLookAheadFunc( + const laFunc = buildAlternativesLookAheadFunc( alternatives, false, tokenStructuredMatcher, @@ -1053,8 +1053,8 @@ context("lookahead specs", () => { }) it("simple optional - positive", () => { - let alternative = [[Alpha], [Beta], [Charlie]] - let laFunc = buildSingleAlternativeLookaheadFunction( + const alternative = [[Alpha], [Beta], [Charlie]] + const laFunc = buildSingleAlternativeLookaheadFunction( alternative, tokenStructuredMatcher, false @@ -1066,8 +1066,8 @@ context("lookahead specs", () => { }) it("simple optional - negative", () => { - let alternative = [[Alpha], [Beta], [Charlie]] - let laFunc = buildSingleAlternativeLookaheadFunction( + const alternative = [[Alpha], [Beta], [Charlie]] + const laFunc = buildSingleAlternativeLookaheadFunction( alternative, tokenStructuredMatcher, false @@ -1078,8 +1078,8 @@ context("lookahead specs", () => { }) it("complex optional - positive", () => { - let alternative = [[Alpha, Beta, Gamma], [Beta], [Charlie, Delta]] - let laFunc = buildSingleAlternativeLookaheadFunction( + const alternative = [[Alpha, Beta, Gamma], [Beta], [Charlie, Delta]] + const laFunc = buildSingleAlternativeLookaheadFunction( alternative, tokenStructuredMatcher, false @@ -1091,8 +1091,8 @@ context("lookahead specs", () => { }) it("complex optional - Negative", () => { - let alternative = [[Alpha, Beta, Gamma], [Beta], [Charlie, Delta]] - let laFunc = buildSingleAlternativeLookaheadFunction( + const alternative = [[Alpha, Beta, Gamma], [Beta], [Charlie, Delta]] + const laFunc = buildSingleAlternativeLookaheadFunction( alternative, tokenStructuredMatcher, false @@ -1104,8 +1104,8 @@ context("lookahead specs", () => { }) it("complex optional with inheritance - positive", () => { - let alternative = [[Alpha, ExtendsAlpha, ExtendsAlphaAlpha]] - let laFunc = buildSingleAlternativeLookaheadFunction( + const alternative = [[Alpha, ExtendsAlpha, ExtendsAlphaAlpha]] + const laFunc = buildSingleAlternativeLookaheadFunction( alternative, tokenStructuredMatcher, false @@ -1136,8 +1136,8 @@ context("lookahead specs", () => { }) it("complex optional with inheritance - negative", () => { - let alternative = [[Alpha, ExtendsAlpha, ExtendsAlphaAlpha]] - let laFunc = buildSingleAlternativeLookaheadFunction( + const alternative = [[Alpha, ExtendsAlpha, ExtendsAlphaAlpha]] + const laFunc = buildSingleAlternativeLookaheadFunction( alternative, tokenStructuredMatcher, false diff --git a/packages/chevrotain/test/parse/grammar/resolver_spec.ts b/packages/chevrotain/test/parse/grammar/resolver_spec.ts index 5df38e3c4..7e27822cb 100644 --- a/packages/chevrotain/test/parse/grammar/resolver_spec.ts +++ b/packages/chevrotain/test/parse/grammar/resolver_spec.ts @@ -20,11 +20,11 @@ import { forEach, map, uniq } from "../../../src/utils/utils" describe("The RefResolverVisitor", () => { it("will fail when trying to resolve a ref to a grammar rule that does not exist", () => { - let ref = new NonTerminal({ nonTerminalName: "missingRule" }) - let topLevel = new Rule({ name: "TOP", definition: [ref] }) - let topLevelRules = {} + const ref = new NonTerminal({ nonTerminalName: "missingRule" }) + const topLevel = new Rule({ name: "TOP", definition: [ref] }) + const topLevelRules = {} topLevelRules["TOP"] = topLevel - let resolver = new GastRefResolverVisitor( + const resolver = new GastRefResolverVisitor( topLevelRules, defaultGrammarResolverErrorProvider ) diff --git a/packages/chevrotain/test/parse/grammar/samples.ts b/packages/chevrotain/test/parse/grammar/samples.ts index 73904643d..ea000d0de 100644 --- a/packages/chevrotain/test/parse/grammar/samples.ts +++ b/packages/chevrotain/test/parse/grammar/samples.ts @@ -39,7 +39,7 @@ export const RequiredTok = createToken({ name: "RequiredTok", pattern: /NA/ }) export const KeyTok = createToken({ name: "KeyTok", pattern: /NA/ }) export const ElementTok = createToken({ name: "ElementTok", pattern: /NA/ }) -export let atLeastOneRule = new Rule({ +export const atLeastOneRule = new Rule({ name: "atLeastOneRule", definition: [ new RepetitionMandatory({ @@ -61,7 +61,7 @@ export let atLeastOneRule = new Rule({ ] }) -export let atLeastOneSepRule = new Rule({ +export const atLeastOneSepRule = new Rule({ name: "atLeastOneSepRule", definition: [ new RepetitionMandatoryWithSeparator({ @@ -86,7 +86,7 @@ export let atLeastOneSepRule = new Rule({ ] }) -export let qualifiedName = new Rule({ +export const qualifiedName = new Rule({ name: "qualifiedName", definition: [ new Terminal({ terminalType: IdentTok }), @@ -99,7 +99,7 @@ export let qualifiedName = new Rule({ ] }) -export let qualifiedNameSep = new Rule({ +export const qualifiedNameSep = new Rule({ name: "qualifiedNameSep", definition: [ new RepetitionMandatoryWithSeparator({ @@ -109,7 +109,7 @@ export let qualifiedNameSep = new Rule({ ] }) -export let paramSpec = new Rule({ +export const paramSpec = new Rule({ name: "paramSpec", definition: [ new Terminal({ terminalType: IdentTok }), @@ -127,7 +127,7 @@ export let paramSpec = new Rule({ ] }) -export let actionDec = new Rule({ +export const actionDec = new Rule({ name: "actionDec", definition: [ new Terminal({ terminalType: ActionTok }), @@ -166,7 +166,7 @@ export let actionDec = new Rule({ ] }) -export let actionDecSep = new Rule({ +export const actionDecSep = new Rule({ name: "actionDecSep", definition: [ new Terminal({ terminalType: ActionTok }), @@ -199,7 +199,7 @@ export let actionDecSep = new Rule({ ] }) -export let manyActions = new Rule({ +export const manyActions = new Rule({ name: "manyActions", definition: [ new Repetition({ @@ -214,7 +214,7 @@ export let manyActions = new Rule({ ] }) -export let cardinality = new Rule({ +export const cardinality = new Rule({ name: "cardinality", definition: [ new Terminal({ terminalType: LSquareTok }), @@ -239,7 +239,7 @@ export let cardinality = new Rule({ ] }) -export let assignedTypeSpec = new Rule({ +export const assignedTypeSpec = new Rule({ name: "assignedTypeSpec", definition: [ new Terminal({ terminalType: ColonTok }), @@ -259,7 +259,7 @@ export let assignedTypeSpec = new Rule({ ] }) -export let lotsOfOrs = new Rule({ +export const lotsOfOrs = new Rule({ name: "lotsOfOrs", definition: [ new Alternation({ @@ -315,7 +315,7 @@ export let lotsOfOrs = new Rule({ ] }) -export let emptyAltOr = new Rule({ +export const emptyAltOr = new Rule({ name: "emptyAltOr", definition: [ new Alternation({ @@ -342,7 +342,7 @@ export let emptyAltOr = new Rule({ ] }) -export let callArguments = new Rule({ +export const callArguments = new Rule({ name: "callArguments", definition: [ new RepetitionWithSeparator({ diff --git a/packages/chevrotain/test/parse/predicate_spec.ts b/packages/chevrotain/test/parse/predicate_spec.ts index 1a0a73f52..19426e0f4 100644 --- a/packages/chevrotain/test/parse/predicate_spec.ts +++ b/packages/chevrotain/test/parse/predicate_spec.ts @@ -20,7 +20,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" static PATTERN = /a/ } - let ALL_TOKENS = [A, B, C] + const ALL_TOKENS = [A, B, C] augmentTokenTypes(ALL_TOKENS) it("OPTION", () => { @@ -48,25 +48,25 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputGood = new PredicateOptionParser( + const gateOpenInputGood = new PredicateOptionParser( [createRegularToken(A)], true ).optionRule() expect(gateOpenInputGood).to.equal("entered!") - let gateOpenInputBad = new PredicateOptionParser( + const gateOpenInputBad = new PredicateOptionParser( [createRegularToken(B)], true ).optionRule() expect(gateOpenInputBad).to.equal("not entered!") - let gateClosedInputGood = new PredicateOptionParser( + const gateClosedInputGood = new PredicateOptionParser( [createRegularToken(A)], false ).optionRule() expect(gateClosedInputGood).to.equal("not entered!") - let gateClosedInputBad = new PredicateOptionParser( + const gateClosedInputBad = new PredicateOptionParser( [createRegularToken(B)], false ).optionRule() @@ -99,25 +99,25 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputGood = new PredicateManyParser( + const gateOpenInputGood = new PredicateManyParser( [createRegularToken(A), createRegularToken(A)], true ).manyRule() expect(gateOpenInputGood).to.equal("entered!") - let gateOpenInputBad = new PredicateManyParser( + const gateOpenInputBad = new PredicateManyParser( [createRegularToken(B)], true ).manyRule() expect(gateOpenInputBad).to.equal("not entered!") - let gateClosedInputGood = new PredicateManyParser( + const gateClosedInputGood = new PredicateManyParser( [createRegularToken(A), createRegularToken(A)], false ).manyRule() expect(gateClosedInputGood).to.equal("not entered!") - let gateClosedInputBad = new PredicateManyParser( + const gateClosedInputBad = new PredicateManyParser( [createRegularToken(B)], false ).manyRule() @@ -150,13 +150,13 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputGood = new PredicateAtLeastOneParser( + const gateOpenInputGood = new PredicateAtLeastOneParser( [createRegularToken(A), createRegularToken(A)], true ).atLeastOneRule() expect(gateOpenInputGood).to.equal("entered!") - let gateOpenInputBadParser = new PredicateAtLeastOneParser( + const gateOpenInputBadParser = new PredicateAtLeastOneParser( [createRegularToken(B)], true ) @@ -166,7 +166,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" EarlyExitException ) - let gateClosedInputGood = new PredicateAtLeastOneParser( + const gateClosedInputGood = new PredicateAtLeastOneParser( [createRegularToken(A), createRegularToken(A)], false ) @@ -176,7 +176,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" EarlyExitException ) - let gateClosedInputBad = new PredicateAtLeastOneParser( + const gateClosedInputBad = new PredicateAtLeastOneParser( [createRegularToken(B)], false ) @@ -224,31 +224,31 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputA = new PredicateOrParser( + const gateOpenInputA = new PredicateOrParser( [createRegularToken(A)], true ).orRule() expect(gateOpenInputA).to.equal("A") - let gateOpenInputB = new PredicateOrParser( + const gateOpenInputB = new PredicateOrParser( [createRegularToken(B)], true ).orRule() expect(gateOpenInputB).to.equal("B") - let gateOpenInputC = new PredicateOrParser( + const gateOpenInputC = new PredicateOrParser( [createRegularToken(C)], true ).orRule() expect(gateOpenInputC).to.equal("C") - let gateClosedInputA = new PredicateOrParser( + const gateClosedInputA = new PredicateOrParser( [createRegularToken(A)], false ).orRule() expect(gateClosedInputA).to.equal("A") - let gateClosedInputBad = new PredicateOrParser( + const gateClosedInputBad = new PredicateOrParser( [createRegularToken(B)], false ) @@ -258,7 +258,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" NoViableAltException ) - let gateClosedInputC = new PredicateOrParser( + const gateClosedInputC = new PredicateOrParser( [createRegularToken(C)], false ).orRule() @@ -288,13 +288,13 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputA = new PredicateWithRuleOrParser([ + const gateOpenInputA = new PredicateWithRuleOrParser([ createRegularToken(A, "a") ]).topRule(1, [true]) expect(gateOpenInputA).to.equal("a") // if the predicate function still kept a reference via a closure to the original param this will not work. - let gateOpenInputB = new PredicateWithRuleOrParser([ + const gateOpenInputB = new PredicateWithRuleOrParser([ createRegularToken(B, "b") ]).topRule(1, [false]) expect(gateOpenInputB).to.equal("b") @@ -319,7 +319,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" const parser = new PredicateWithRuleOptionParser([ createRegularToken(B, "b") ]) - let gateOpenInputB = parser.topRule(1, [false]) + const gateOpenInputB = parser.topRule(1, [false]) expect(gateOpenInputB).to.equal("b") // // if the predicate function still kept a reference via a closure to the original param this will not work. @@ -352,14 +352,14 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputB = new PredicateWithRuleManyParser([ + const gateOpenInputB = new PredicateWithRuleManyParser([ createRegularToken(B, "b") ]).topRule(1, [false]) expect(gateOpenInputB).to.equal("b") // if the predicate function still kept a reference via a closure to the original param this will not work. // because the <() => param> in the MANY will ALWAYS return false (the original param) - let gateOpenInputA = new PredicateWithRuleManyParser([ + const gateOpenInputA = new PredicateWithRuleManyParser([ createRegularToken(A, "a"), createRegularToken(A, "a"), createRegularToken(A, "a"), @@ -401,7 +401,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" }) } - let gateOpenInputB = new PredicateWithRuleAtLeastOneParser([ + const gateOpenInputB = new PredicateWithRuleAtLeastOneParser([ createRegularToken(A, "a"), createRegularToken(B, "b") ]).topRule(1, [false]) @@ -409,7 +409,7 @@ describe("The chevrotain support for custom gates/predicates on DSL production:" // if the predicate function still kept a reference via a closure to the original param this will not work. // because the <() => param> in the AT_LEAST_ONE will ALWAYS return false (the original param) - let gateOpenInputA = new PredicateWithRuleAtLeastOneParser([ + const gateOpenInputA = new PredicateWithRuleAtLeastOneParser([ createRegularToken(A, "a"), createRegularToken(A, "a"), createRegularToken(A, "a"), diff --git a/packages/chevrotain/test/parse/recognizer/recognizer_config_spec.ts b/packages/chevrotain/test/parse/recognizer/recognizer_config_spec.ts index 58c7df37c..fdeb947db 100644 --- a/packages/chevrotain/test/parse/recognizer/recognizer_config_spec.ts +++ b/packages/chevrotain/test/parse/recognizer/recognizer_config_spec.ts @@ -28,7 +28,7 @@ describe("The Recognizer's Configuration", () => { } } - let parser = new EmptyConfigParser() + const parser = new EmptyConfigParser() expect((parser).recoveryEnabled).to.be.false expect((parser).maxLookahead).to.equal(3) expect((parser).nodeLocationTracking).to.be.equal("none") @@ -43,7 +43,7 @@ describe("The Recognizer's Configuration", () => { } } - let parser = new NoConfigParser() + const parser = new NoConfigParser() expect((parser).recoveryEnabled).to.be.false expect((parser).maxLookahead).to.equal(3) expect((parser).nodeLocationTracking).to.be.equal("none") diff --git a/packages/chevrotain/test/parse/recognizer/rules_override_spec.ts b/packages/chevrotain/test/parse/recognizer/rules_override_spec.ts index 394ab0496..1e78cb113 100644 --- a/packages/chevrotain/test/parse/recognizer/rules_override_spec.ts +++ b/packages/chevrotain/test/parse/recognizer/rules_override_spec.ts @@ -54,19 +54,19 @@ describe("The Recognizer's capabilities for overriding grammar productions", () }) } - let superParser = new SuperOverrideParser() + const superParser = new SuperOverrideParser() superParser.input = [createRegularToken(PlusTok)] - let superResult = superParser.topRule() + const superResult = superParser.topRule() expect(superResult).to.equal("yey") expect(superParser.errors).to.be.empty - let childParser = new ChildOverrideParser() + const childParser = new ChildOverrideParser() childParser.input = [ createRegularToken(MinusTok), createRegularToken(MinusTok), createRegularToken(MinusTok) ] - let childResult = childParser.topRule() + const childResult = childParser.topRule() expect(childResult).to.equal("ney") expect(superParser.errors).to.be.empty }) diff --git a/packages/chevrotain/test/parse/recognizer_lookahead_spec.ts b/packages/chevrotain/test/parse/recognizer_lookahead_spec.ts index 0852f18fa..a2297817f 100644 --- a/packages/chevrotain/test/parse/recognizer_lookahead_spec.ts +++ b/packages/chevrotain/test/parse/recognizer_lookahead_spec.ts @@ -5,17 +5,17 @@ import { IToken } from "../../api" import { isES2015MapSupported } from "../../src/utils/utils" describe("lookahead Regular Tokens Mode", () => { - let OneTok = createToken({ name: "OneTok" }) - let TwoTok = createToken({ name: "TwoTok" }) - let ThreeTok = createToken({ name: "ThreeTok" }) - let FourTok = createToken({ name: "FourTok" }) - let FiveTok = createToken({ name: "FiveTok" }) - let SixTok = createToken({ name: "SixTok" }) - let SevenTok = createToken({ name: "SevenTok" }) - let EightTok = createToken({ name: "EightTok" }) - let NineTok = createToken({ name: "NineTok" }) - let TenTok = createToken({ name: "TenTok" }) - let Comma = createToken({ name: "Comma" }) + const OneTok = createToken({ name: "OneTok" }) + const TwoTok = createToken({ name: "TwoTok" }) + const ThreeTok = createToken({ name: "ThreeTok" }) + const FourTok = createToken({ name: "FourTok" }) + const FiveTok = createToken({ name: "FiveTok" }) + const SixTok = createToken({ name: "SixTok" }) + const SevenTok = createToken({ name: "SevenTok" }) + const EightTok = createToken({ name: "EightTok" }) + const NineTok = createToken({ name: "NineTok" }) + const TenTok = createToken({ name: "TenTok" }) + const Comma = createToken({ name: "Comma" }) const ALL_TOKENS = [ OneTok, @@ -92,38 +92,38 @@ describe("lookahead Regular Tokens Mode", () => { } it("can automatically compute lookahead for OPTION1", () => { - let input = [createRegularToken(OneTok)] - let parser = new OptionsImplicitLookAheadParser(input) + const input = [createRegularToken(OneTok)] + const parser = new OptionsImplicitLookAheadParser(input) expect(parser.manyOptionsRule()).to.equal("1") }) it("can automatically compute lookahead for OPTION2", () => { - let input = [createRegularToken(TwoTok)] - let parser = new OptionsImplicitLookAheadParser(input) + const input = [createRegularToken(TwoTok)] + const parser = new OptionsImplicitLookAheadParser(input) expect(parser.manyOptionsRule()).to.equal("2") }) it("can automatically compute lookahead for OPTION3", () => { - let input = [createRegularToken(ThreeTok)] - let parser = new OptionsImplicitLookAheadParser(input) + const input = [createRegularToken(ThreeTok)] + const parser = new OptionsImplicitLookAheadParser(input) expect(parser.manyOptionsRule()).to.equal("3") }) it("can automatically compute lookahead for OPTION4", () => { - let input = [createRegularToken(FourTok)] - let parser = new OptionsImplicitLookAheadParser(input) + const input = [createRegularToken(FourTok)] + const parser = new OptionsImplicitLookAheadParser(input) expect(parser.manyOptionsRule()).to.equal("4") }) it("can automatically compute lookahead for OPTION5", () => { - let input = [createRegularToken(FiveTok)] - let parser = new OptionsImplicitLookAheadParser(input) + const input = [createRegularToken(FiveTok)] + const parser = new OptionsImplicitLookAheadParser(input) expect(parser.manyOptionsRule()).to.equal("5") }) it("can automatically compute lookahead for option(idx, ...)", () => { - let input = [createRegularToken(SixTok)] - let parser = new OptionsImplicitLookAheadParser(input) + const input = [createRegularToken(SixTok)] + const parser = new OptionsImplicitLookAheadParser(input) expect(parser.manyOptionsRule()).to.equal("6") }) }) @@ -207,67 +207,67 @@ describe("lookahead Regular Tokens Mode", () => { } it("can automatically compute lookahead for MANY1", () => { - let input = [createRegularToken(OneTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(OneTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("1") }) it("can automatically compute lookahead for MANY2", () => { - let input = [createRegularToken(TwoTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(TwoTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("2") }) it("can automatically compute lookahead for MANY3", () => { - let input = [createRegularToken(ThreeTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(ThreeTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("3") }) it("can automatically compute lookahead for MANY4", () => { - let input = [createRegularToken(FourTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(FourTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("4") }) it("can automatically compute lookahead for MANY5", () => { - let input = [createRegularToken(FiveTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(FiveTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("5") }) it("can automatically compute lookahead for MANY6", () => { - let input = [createRegularToken(SixTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(SixTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("6") }) it("can automatically compute lookahead for MANY7", () => { - let input = [createRegularToken(SevenTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(SevenTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("7") }) it("can automatically compute lookahead for MANY8", () => { - let input = [createRegularToken(EightTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(EightTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("8") }) it("can automatically compute lookahead for MANY9", () => { - let input = [createRegularToken(NineTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(NineTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("9") }) it("can automatically compute lookahead for many(idx, ...)", () => { - let input = [createRegularToken(TenTok)] - let parser = new ManyImplicitLookAheadParser(input) + const input = [createRegularToken(TenTok)] + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("10") }) it("can accept lookahead function param for flow mixing several MANYs", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(OneTok), createRegularToken(ThreeTok), @@ -275,7 +275,7 @@ describe("lookahead Regular Tokens Mode", () => { createRegularToken(ThreeTok), createRegularToken(FiveTok) ] - let parser = new ManyImplicitLookAheadParser(input) + const parser = new ManyImplicitLookAheadParser(input) expect(parser.manyRule()).to.equal("113335") }) }) @@ -303,7 +303,7 @@ describe("lookahead Regular Tokens Mode", () => { private parseManyRule(): any { let total = "" - let separators = [] + const separators = [] this.MANY_SEP1({ SEP: Comma, @@ -385,61 +385,61 @@ describe("lookahead Regular Tokens Mode", () => { } it("can automatically compute lookahead for MANY_SEP1", () => { - let input = [createRegularToken(OneTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(OneTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("1") }) it("can automatically compute lookahead for MANY_SEP2", () => { - let input = [createRegularToken(TwoTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(TwoTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("2") }) it("can automatically compute lookahead for MANY_SEP3", () => { - let input = [createRegularToken(ThreeTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(ThreeTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("3") }) it("can automatically compute lookahead for MANY_SEP4", () => { - let input = [createRegularToken(FourTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(FourTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("4") }) it("can automatically compute lookahead for MANY_SEP5", () => { - let input = [createRegularToken(FiveTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(FiveTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("5") }) it("can automatically compute lookahead for MANY_SEP6", () => { - let input = [createRegularToken(SixTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(SixTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("6") }) it("can automatically compute lookahead for MANY_SEP7", () => { - let input = [createRegularToken(SevenTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(SevenTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("7") }) it("can automatically compute lookahead for MANY_SEP8", () => { - let input = [createRegularToken(EightTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(EightTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("8") }) it("can automatically compute lookahead for MANY_SEP9", () => { - let input = [createRegularToken(NineTok)] - let parser = new ManySepImplicitLookAheadParser(input) + const input = [createRegularToken(NineTok)] + const parser = new ManySepImplicitLookAheadParser(input) expect(parser.manySepRule().total).to.equal("9") }) it("can accept lookahead function param for flow mixing several MANY_SEPs", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(Comma), createRegularToken(OneTok), @@ -450,8 +450,8 @@ describe("lookahead Regular Tokens Mode", () => { createRegularToken(ThreeTok), createRegularToken(FiveTok) ] - let parser = new ManySepImplicitLookAheadParser(input) - let result = parser.manySepRule() + const parser = new ManySepImplicitLookAheadParser(input) + const result = parser.manySepRule() expect(result.total).to.equal("113335") }) }) @@ -543,7 +543,7 @@ describe("lookahead Regular Tokens Mode", () => { } it("can accept lookahead function param for AT_LEAST_ONE", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(TwoTok), @@ -559,18 +559,18 @@ describe("lookahead Regular Tokens Mode", () => { createRegularToken(NineTok), createRegularToken(TenTok) ] - let parser = new AtLeastOneImplicitLookAheadParser(input) + const parser = new AtLeastOneImplicitLookAheadParser(input) expect(parser.atLeastOneRule()).to.equal("122344567888910") }) it("will fail when zero occurrences of AT_LEAST_ONE in input", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(TwoTok) /*createToken(ThreeTok),*/, createRegularToken(FourTok), createRegularToken(FiveTok) ] - let parser = new AtLeastOneImplicitLookAheadParser(input) + const parser = new AtLeastOneImplicitLookAheadParser(input) expect(parser.atLeastOneRule()).to.equal("-666") }) }) @@ -609,7 +609,7 @@ describe("lookahead Regular Tokens Mode", () => { private parseAtLeastOneRule(): any { let total = "" - let separators = [] + const separators = [] this.AT_LEAST_ONE_SEP1({ SEP: Comma, @@ -691,7 +691,7 @@ describe("lookahead Regular Tokens Mode", () => { } it("can accept lookahead function param for AT_LEAST_ONE_SEP", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(Comma), @@ -710,19 +710,19 @@ describe("lookahead Regular Tokens Mode", () => { createRegularToken(EightTok), createRegularToken(NineTok) ] - let parser = new AtLeastOneSepImplicitLookAheadParser(input) - let parseResult = parser.atLeastOneSepRule() + const parser = new AtLeastOneSepImplicitLookAheadParser(input) + const parseResult = parser.atLeastOneSepRule() expect(parseResult.total).to.equal("1223445677789") }) it("will fail when zero occurrences of AT_LEAST_ONE_SEP in input", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(TwoTok), /*createToken(ThreeTok),*/ createRegularToken(FourTok), createRegularToken(FiveTok) ] - let parser = new AtLeastOneSepImplicitLookAheadParser(input) + const parser = new AtLeastOneSepImplicitLookAheadParser(input) expect(parser.atLeastOneSepRule().total).to.equal("-666") }) }) @@ -956,7 +956,7 @@ describe("lookahead Regular Tokens Mode", () => { } it("can compute the lookahead automatically for OR", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -964,13 +964,13 @@ describe("lookahead Regular Tokens Mode", () => { createRegularToken(FiveTok), createRegularToken(ThreeTok) ] - let parser = new OrImplicitLookAheadParser(input) + const parser = new OrImplicitLookAheadParser(input) expect(parser.orRule()).to.equal("A1B2C3D4E5F3") }) it("will fail when none of the alternatives match", () => { - let input = [createRegularToken(SixTok)] - let parser = new OrImplicitLookAheadParser(input) + const input = [createRegularToken(SixTok)] + const parser = new OrImplicitLookAheadParser(input) expect(parser.orRule()).to.equal("-666") }) }) @@ -1275,20 +1275,20 @@ describe("lookahead Regular Tokens Mode", () => { } it("can compute the lookahead automatically for OR", () => { - let input = [ + const input = [ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), createRegularToken(FourTok), createRegularToken(FiveTok) ] - let parser = new OrImplicitLookAheadParserIgnoreAmbiguities(input) + const parser = new OrImplicitLookAheadParserIgnoreAmbiguities(input) expect(parser.orRule()).to.equal("A1B2C3D4E5") }) it("will fail when none of the alternatives match", () => { - let input = [createRegularToken(SixTok)] - let parser = new OrImplicitLookAheadParserIgnoreAmbiguities(input) + const input = [createRegularToken(SixTok)] + const parser = new OrImplicitLookAheadParserIgnoreAmbiguities(input) expect(parser.orRule()).to.equal("-666") }) }) @@ -1317,13 +1317,13 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let parser = new MultiTokenLookAheadForOptionParser([ + const parser = new MultiTokenLookAheadForOptionParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(parser.rule()).to.equal("OPTION Not Taken") - let parser2 = new MultiTokenLookAheadForOptionParser([ + const parser2 = new MultiTokenLookAheadForOptionParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1356,13 +1356,13 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let parser = new MultiTokenLookAheadForManyParser([ + const parser = new MultiTokenLookAheadForManyParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(parser.rule()).to.equal(0) - let oneIterationParser = new MultiTokenLookAheadForManyParser([ + const oneIterationParser = new MultiTokenLookAheadForManyParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1371,7 +1371,7 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForManyParser([ + const twoIterationsParser = new MultiTokenLookAheadForManyParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1411,13 +1411,13 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let parser = new MultiTokenLookAheadForManySepParser([ + const parser = new MultiTokenLookAheadForManySepParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(parser.rule()).to.equal(0) - let oneIterationParser = new MultiTokenLookAheadForManySepParser([ + const oneIterationParser = new MultiTokenLookAheadForManySepParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1426,7 +1426,7 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForManySepParser([ + const twoIterationsParser = new MultiTokenLookAheadForManySepParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1483,26 +1483,26 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let alt1Parser = new MultiTokenLookAheadForOrParser([ + const alt1Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(OneTok), createRegularToken(OneTok) ]) expect(alt1Parser.orRule()).to.equal("alt1 Taken") - let alt2Parser = new MultiTokenLookAheadForOrParser([ + const alt2Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok) ]) expect(alt2Parser.orRule()).to.equal("alt2 Taken") - let alt3Parser = new MultiTokenLookAheadForOrParser([ + const alt3Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(alt3Parser.orRule()).to.equal("alt3 Taken") - let alt4Parser = new MultiTokenLookAheadForOrParser([ + const alt4Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(FourTok) ]) expect(alt4Parser.orRule()).to.equal("alt4 Taken") @@ -1531,7 +1531,7 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let oneIterationParser = new MultiTokenLookAheadForAtLeastOneParser([ + const oneIterationParser = new MultiTokenLookAheadForAtLeastOneParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1540,7 +1540,7 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ + const twoIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1553,7 +1553,7 @@ describe("lookahead Regular Tokens Mode", () => { expect(twoIterationsParser.rule()).to.equal(2) - let threeIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ + const threeIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1596,7 +1596,7 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let oneIterationParser = new MultiTokenLookAheadForAtLeastOneSepParser([ + const oneIterationParser = new MultiTokenLookAheadForAtLeastOneSepParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1605,20 +1605,22 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser([ - createRegularToken(OneTok), - createRegularToken(TwoTok), - createRegularToken(ThreeTok), - createRegularToken(Comma), - createRegularToken(OneTok), - createRegularToken(TwoTok), - createRegularToken(ThreeTok), - createRegularToken(OneTok), - createRegularToken(TwoTok) - ]) + const twoIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser( + [ + createRegularToken(OneTok), + createRegularToken(TwoTok), + createRegularToken(ThreeTok), + createRegularToken(Comma), + createRegularToken(OneTok), + createRegularToken(TwoTok), + createRegularToken(ThreeTok), + createRegularToken(OneTok), + createRegularToken(TwoTok) + ] + ) expect(twoIterationsParser.rule()).to.equal(2) - let threeIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser( + const threeIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser( [ createRegularToken(OneTok), createRegularToken(TwoTok), @@ -1666,7 +1668,7 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let parser = new MultiTokenLookAheadForOptionParser([ + const parser = new MultiTokenLookAheadForOptionParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) @@ -1709,13 +1711,13 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let parser = new MultiTokenLookAheadForManyParser([ + const parser = new MultiTokenLookAheadForManyParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(parser.rule()).to.equal(0) - let oneIterationParser = new MultiTokenLookAheadForManyParser([ + const oneIterationParser = new MultiTokenLookAheadForManyParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1724,7 +1726,7 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForManyParser([ + const twoIterationsParser = new MultiTokenLookAheadForManyParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1765,13 +1767,13 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let parser = new MultiTokenLookAheadForManySepParser([ + const parser = new MultiTokenLookAheadForManySepParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(parser.rule()).to.equal(0) - let oneIterationParser = new MultiTokenLookAheadForManySepParser([ + const oneIterationParser = new MultiTokenLookAheadForManySepParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1780,7 +1782,7 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForManySepParser([ + const twoIterationsParser = new MultiTokenLookAheadForManySepParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1850,20 +1852,20 @@ describe("lookahead Regular Tokens Mode", () => { // ]) // expect(alt1Parser.orRule()).to.equal("alt1 Taken") - let alt2Parser = new MultiTokenLookAheadForOrParser([ + const alt2Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok) ]) expect(alt2Parser.orRule()).to.equal("alt2 Taken") - let alt3Parser = new MultiTokenLookAheadForOrParser([ + const alt3Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(OneTok), createRegularToken(TwoTok) ]) expect(alt3Parser.orRule()).to.equal("alt3 Taken") - let alt4Parser = new MultiTokenLookAheadForOrParser([ + const alt4Parser = new MultiTokenLookAheadForOrParser([ createRegularToken(FourTok) ]) expect(alt4Parser.orRule()).to.equal("alt4 Taken") @@ -1895,7 +1897,7 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let oneIterationParser = new MultiTokenLookAheadForAtLeastOneParser([ + const oneIterationParser = new MultiTokenLookAheadForAtLeastOneParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1904,7 +1906,7 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ + const twoIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1917,7 +1919,7 @@ describe("lookahead Regular Tokens Mode", () => { expect(twoIterationsParser.rule()).to.equal(2) - let threeIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ + const threeIterationsParser = new MultiTokenLookAheadForAtLeastOneParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1961,7 +1963,7 @@ describe("lookahead Regular Tokens Mode", () => { }) } - let oneIterationParser = new MultiTokenLookAheadForAtLeastOneSepParser([ + const oneIterationParser = new MultiTokenLookAheadForAtLeastOneSepParser([ createRegularToken(OneTok), createRegularToken(TwoTok), createRegularToken(ThreeTok), @@ -1970,20 +1972,22 @@ describe("lookahead Regular Tokens Mode", () => { ]) expect(oneIterationParser.rule()).to.equal(1) - let twoIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser([ - createRegularToken(OneTok), - createRegularToken(TwoTok), - createRegularToken(ThreeTok), - createRegularToken(Comma), - createRegularToken(OneTok), - createRegularToken(TwoTok), - createRegularToken(ThreeTok), - createRegularToken(OneTok), - createRegularToken(TwoTok) - ]) + const twoIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser( + [ + createRegularToken(OneTok), + createRegularToken(TwoTok), + createRegularToken(ThreeTok), + createRegularToken(Comma), + createRegularToken(OneTok), + createRegularToken(TwoTok), + createRegularToken(ThreeTok), + createRegularToken(OneTok), + createRegularToken(TwoTok) + ] + ) expect(twoIterationsParser.rule()).to.equal(2) - let threeIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser( + const threeIterationsParser = new MultiTokenLookAheadForAtLeastOneSepParser( [ createRegularToken(OneTok), createRegularToken(TwoTok), @@ -2042,7 +2046,7 @@ describe("lookahead Regular Tokens Mode", () => { }) it("Won't throw NoViableAltException when the repetition appears twice", () => { - let input = [ + const input = [ createRegularToken(Comma), createRegularToken(Comma), createRegularToken(TwoTok) diff --git a/packages/chevrotain/test/parse/recognizer_spec.ts b/packages/chevrotain/test/parse/recognizer_spec.ts index 9c34e1b3a..1b7c692d7 100644 --- a/packages/chevrotain/test/parse/recognizer_spec.ts +++ b/packages/chevrotain/test/parse/recognizer_spec.ts @@ -26,12 +26,12 @@ function defineRecognizerSpecs( tokenMatcher ) { context("Recognizer " + contextName, () => { - let PlusTok = createToken({ name: "PlusTok" }) + const PlusTok = createToken({ name: "PlusTok" }) PlusTok.LABEL = "+" - let MinusTok = createToken({ name: "MinusTok" }) - let IntTok = createToken({ name: "IntTok" }) - let DotTok = createToken({ name: "DotTok" }) - let IdentTok = createToken({ name: "IdentTok" }) + const MinusTok = createToken({ name: "MinusTok" }) + const IntTok = createToken({ name: "IntTok" }) + const DotTok = createToken({ name: "DotTok" }) + const IdentTok = createToken({ name: "IdentTok" }) const ALL_TOKENS = [PlusTok, MinusTok, IntTok, IdentTok, DotTok] augmentTokenTypes(ALL_TOKENS) @@ -68,15 +68,15 @@ function defineRecognizerSpecs( }) } - let input = [ + const input = [ createTokenInstance(PlusTok), createTokenInstance(PlusTok), createTokenInstance(PlusTok), createTokenInstance(PlusTok), createTokenInstance(PlusTok) ] - let parser = new SubRuleTestParser(input) - let result = parser.topRule() + const parser = new SubRuleTestParser(input) + const result = parser.topRule() expect(result).to.equal("12345") }) @@ -130,7 +130,7 @@ function defineRecognizerSpecs( ) } - let input = [ + const input = [ createTokenInstance(PlusTok), createTokenInstance(PlusTok), createTokenInstance(PlusTok), @@ -138,8 +138,8 @@ function defineRecognizerSpecs( createTokenInstance(PlusTok), createTokenInstance(PlusTok) ] - let parser = new SubRuleArgsParser(input) - let result = parser.topRule() + const parser = new SubRuleArgsParser(input) + const result = parser.topRule() expect(result.letters).to.equal("abcdef") expect(result.numbers).to.equal("654321") }) @@ -177,22 +177,22 @@ function defineRecognizerSpecs( } it("can match an non-empty alternative in an OR with an empty alternative", () => { - let input = [createTokenInstance(PlusTok)] - let parser = new EmptyAltParser(input) + const input = [createTokenInstance(PlusTok)] + const parser = new EmptyAltParser(input) expect(parser.orRule()).to.equal("+") }) it("can match an empty alternative", () => { - let input = [] - let parser = new EmptyAltParser(input) + const input = [] + const parser = new EmptyAltParser(input) expect(parser.orRule()).to.equal("EMPTY_ALT") }) it("has a utility function for defining EMPTY ALTERNATIVES", () => { - let noArgsEmptyAlt = EMPTY_ALT() + const noArgsEmptyAlt = EMPTY_ALT() expect(noArgsEmptyAlt()).to.be.undefined - let valueEmptyAlt = EMPTY_ALT(666) + const valueEmptyAlt = EMPTY_ALT(666) expect(valueEmptyAlt()).to.equal(666) }) }) @@ -200,9 +200,9 @@ function defineRecognizerSpecs( describe("Token categories support", () => { it("Can consume a Token that belongs to multiple categories", () => { - let Keyword = createToken({ name: "Keyword" }) - let Literal = createToken({ name: "Literal" }) - let TrueLiteral = createToken({ + const Keyword = createToken({ name: "Keyword" }) + const Literal = createToken({ name: "Literal" }) + const TrueLiteral = createToken({ name: "TrueLiteral", categories: [Keyword, Literal] }) @@ -256,7 +256,7 @@ function defineRecognizerSpecs( ) private parseQualifiedName(): string[] { - let idents = [] + const idents = [] idents.push(this.CONSUME1(IdentTok).image) this.MANY({ @@ -291,7 +291,7 @@ function defineRecognizerSpecs( ) private parseQualifiedName(): string[] { - let idents = [] + const idents = [] idents.push(this.CONSUME1(IdentTok).image) this.CONSUME1(DotTok) @@ -370,7 +370,7 @@ function defineRecognizerSpecs( ) private parseQualifiedName(): string[] { - let idents = [] + const idents = [] idents.push(this.CONSUME1(IdentTok).image) this.AT_LEAST_ONE({ @@ -406,7 +406,7 @@ function defineRecognizerSpecs( ) private parseQualifiedName(): string[] { - let idents = [] + const idents = [] this.AT_LEAST_ONE_SEP({ SEP: DotTok, @@ -422,11 +422,11 @@ function defineRecognizerSpecs( } it("can CONSUME tokens with an index specifying the occurrence for the specific token in the current rule", () => { - let parser: any = new EmbeddedActionsParser(ALL_TOKENS, { + const parser: any = new EmbeddedActionsParser(ALL_TOKENS, { recoveryEnabled: true }) parser.reset() - let testInput = [ + const testInput = [ createTokenInstance(IntTok, "1"), createTokenInstance(PlusTok), createTokenInstance(IntTok, "2"), @@ -445,7 +445,7 @@ function defineRecognizerSpecs( }) it("will not perform inRepetition recovery while in backtracking mode", () => { - let parser: any = new EmbeddedActionsParser([PlusTok], {}) + const parser: any = new EmbeddedActionsParser([PlusTok], {}) parser.isBackTrackingStack.push(1) expect(parser.shouldInRepetitionRecoveryBeTried(MinusTok, 1)).to.equal( false @@ -454,7 +454,7 @@ function defineRecognizerSpecs( it("can perform in-repetition recovery for MANY grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -462,14 +462,14 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new ManyRepetitionRecovery(input) + const parser = new ManyRepetitionRecovery(input) expect(parser.qualifiedName()).to.deep.equal(["a", "b", "c"]) expect(parser.errors.length).to.equal(1) }) it("can disable in-repetition recovery for MANY grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -477,14 +477,14 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new ManyRepetitionRecovery(input, false) + const parser = new ManyRepetitionRecovery(input, false) expect(parser.qualifiedName()).to.deep.equal(["666"]) expect(parser.errors.length).to.equal(1) }) it("can perform in-repetition recovery for MANY_SEP grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -492,14 +492,14 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new ManySepRepetitionRecovery(input) + const parser = new ManySepRepetitionRecovery(input) expect(parser.qualifiedName()).to.deep.equal(["a", "b", "c"]) expect(parser.errors.length).to.equal(1) }) it("can disable in-repetition recovery for MANY_SEP grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -507,28 +507,28 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new ManySepRepetitionRecovery(input, false) + const parser = new ManySepRepetitionRecovery(input, false) expect(parser.qualifiedName()).to.deep.equal(["333"]) expect(parser.errors.length).to.equal(1) }) it("can perform in-repetition recovery for MANY_SEP grammar rule #2", () => { // a.b..c...d - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(DotTok), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b") ] - let parser = new ManySepSubRuleRepetitionRecovery(input) + const parser = new ManySepSubRuleRepetitionRecovery(input) expect(parser.qualifiedName()).to.deep.equal(["a", "b"]) expect(parser.errors.length).to.equal(2) }) it("can perform in-repetition recovery for AT_LEAST_ONE grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -536,14 +536,14 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new AtLeastOneRepetitionRecovery(input) + const parser = new AtLeastOneRepetitionRecovery(input) expect(parser.qualifiedName()).to.deep.equal(["a", "b", "c"]) expect(parser.errors.length).to.equal(1) }) it("can disable in-repetition recovery for AT_LEAST_ONE grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -551,14 +551,14 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new AtLeastOneRepetitionRecovery(input, false) + const parser = new AtLeastOneRepetitionRecovery(input, false) expect(parser.qualifiedName()).to.deep.equal(["777"]) expect(parser.errors.length).to.equal(1) }) it("can perform in-repetition recovery for AT_LEAST_ONE_SEP grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -566,14 +566,14 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new AtLeastOneSepRepetitionRecovery(input) + const parser = new AtLeastOneSepRepetitionRecovery(input) expect(parser.qualifiedName()).to.deep.equal(["a", "b", "c"]) expect(parser.errors.length).to.equal(1) }) it("can disable in-repetition recovery for AT_LEAST_ONE_SEP grammar rule", () => { // a.b+.c - let input = [ + const input = [ createTokenInstance(IdentTok, "a"), createTokenInstance(DotTok), createTokenInstance(IdentTok, "b"), @@ -581,18 +581,18 @@ function defineRecognizerSpecs( createTokenInstance(DotTok), createTokenInstance(IdentTok, "c") ] - let parser = new AtLeastOneSepRepetitionRecovery(input, false) + const parser = new AtLeastOneSepRepetitionRecovery(input, false) expect(parser.qualifiedName()).to.deep.equal(["999"]) expect(parser.errors.length).to.equal(1) }) it("can perform single Token insertion", () => { - let A = createToken({ name: "A", pattern: /A/ }) - let B = createToken({ name: "B", pattern: /B/ }) - let C = createToken({ name: "C", pattern: /C/ }) - let allTokens = [A, B, C] + const A = createToken({ name: "A", pattern: /A/ }) + const B = createToken({ name: "B", pattern: /B/ }) + const C = createToken({ name: "C", pattern: /C/ }) + const allTokens = [A, B, C] - let lexer = new Lexer(allTokens, { + const lexer = new Lexer(allTokens, { positionTracking: "onlyOffset" }) @@ -608,16 +608,16 @@ function defineRecognizerSpecs( public topRule = this.RULE("topRule", () => { this.CONSUME(A) - let insertedToken = this.CONSUME(B) + const insertedToken = this.CONSUME(B) this.CONSUME(C) return insertedToken }) } - let lexResult = lexer.tokenize("AC") - let parser = new SingleTokenInsertRegular(lexResult.tokens) - let insertedToken = parser.topRule() + const lexResult = lexer.tokenize("AC") + const parser = new SingleTokenInsertRegular(lexResult.tokens) + const insertedToken = parser.topRule() expect(insertedToken.isInsertedInRecovery).to.be.true expect(insertedToken.image).to.equal("") @@ -658,7 +658,7 @@ function defineRecognizerSpecs( }) } - let parser = new OrExpressionParser([]) + const parser = new OrExpressionParser([]) parser.input = [createTokenInstance(MinusTok)] expect(parser.orRule()).to.equal(666) @@ -684,7 +684,7 @@ function defineRecognizerSpecs( }) } - let parser = new OptionExpressionParser([]) + const parser = new OptionExpressionParser([]) parser.input = [createTokenInstance(IdentTok)] expect(parser.optionRule()).to.equal("bamba") @@ -739,7 +739,7 @@ function defineRecognizerSpecs( }) it("can only SAVE_ERROR for recognition exceptions", () => { - let parser: any = new EmbeddedActionsParser([IntTok]) + const parser: any = new EmbeddedActionsParser([IntTok]) expect(() => parser.SAVE_ERROR(new Error("I am some random Error")) ).to.throw( @@ -749,7 +749,7 @@ function defineRecognizerSpecs( }) it("when it runs out of input EOF will be returned", () => { - let parser: any = new EmbeddedActionsParser([IntTok, PlusTok], {}) + const parser: any = new EmbeddedActionsParser([IntTok, PlusTok], {}) const sampleInput = [ createTokenInstance(IntTok, "1"), createTokenInstance(PlusTok) @@ -798,16 +798,16 @@ function defineRecognizerSpecs( }) } - let successfulOption = new OptionsReturnValueParser().trueOptionRule() + const successfulOption = new OptionsReturnValueParser().trueOptionRule() expect(successfulOption).to.equal(true) - let failedOption = new OptionsReturnValueParser().falseOptionRule() + const failedOption = new OptionsReturnValueParser().falseOptionRule() expect(failedOption).to.equal(undefined) }) it("will return false if a RecognitionException is thrown during backtracking and rethrow any other kind of Exception", () => { - let parser: any = new EmbeddedActionsParser([IntTok]) - let backTrackingThrows = parser.BACKTRACK( + const parser: any = new EmbeddedActionsParser([IntTok]) + const backTrackingThrows = parser.BACKTRACK( () => { throw new Error("division by zero, boom") }, @@ -819,13 +819,13 @@ function defineRecognizerSpecs( "division by zero, boom" ) - let throwsRecogError = () => { + const throwsRecogError = () => { throw new NotAllInputParsedException( "sad sad panda", createTokenInstance(PlusTok) ) } - let backTrackingFalse = parser.BACKTRACK(throwsRecogError, () => { + const backTrackingFalse = parser.BACKTRACK(throwsRecogError, () => { return true }) expect(backTrackingFalse.call(parser)).to.equal(false) @@ -897,25 +897,29 @@ function defineRecognizerSpecs( }) it("can be initialized with a vector of Tokens", () => { - let parser: any = new EmbeddedActionsParser([PlusTok, MinusTok, IntTok]) - let tokensMap = (parser).tokensMap + const parser: any = new EmbeddedActionsParser([ + PlusTok, + MinusTok, + IntTok + ]) + const tokensMap = (parser).tokensMap expect(tokensMap.PlusTok).to.equal(PlusTok) expect(tokensMap.MinusTok).to.equal(MinusTok) expect(tokensMap.IntTok).to.equal(IntTok) }) it("can be initialized with a Dictionary of Tokens", () => { - let initTokenDictionary = { + const initTokenDictionary = { PlusTok: PlusTok, MinusTok: MinusTok, IntToken: IntTok } - let parser: any = new EmbeddedActionsParser({ + const parser: any = new EmbeddedActionsParser({ PlusTok: PlusTok, MinusTok: MinusTok, IntToken: IntTok }) - let tokensMap = (parser).tokensMap + const tokensMap = (parser).tokensMap // the implementation should clone the dictionary to avoid bugs caused by mutability expect(tokensMap).not.to.equal(initTokenDictionary) expect(tokensMap.PlusTok).to.equal(PlusTok) @@ -924,15 +928,15 @@ function defineRecognizerSpecs( }) it("can be initialized with a IMultiModeLexerDefinition of Tokens", () => { - let multiModeLexerDef: IMultiModeLexerDefinition = { + const multiModeLexerDef: IMultiModeLexerDefinition = { modes: { bamba: [PlusTok], bisli: [MinusTok, IntTok] }, defaultMode: "bisli" } - let parser: any = new EmbeddedActionsParser(multiModeLexerDef) - let tokensMap = (parser).tokensMap + const parser: any = new EmbeddedActionsParser(multiModeLexerDef) + const tokensMap = (parser).tokensMap // the implementation should clone the dictionary to avoid bugs caused by mutability expect(tokensMap).not.to.equal(multiModeLexerDef) expect(tokensMap.PlusTok).to.equal(PlusTok) @@ -969,7 +973,7 @@ function defineRecognizerSpecs( this.CONSUME1(DotTok) }) } - let parser: any = new NotSwallowInRuleParser([ + const parser: any = new NotSwallowInRuleParser([ createTokenInstance(IntTok, "1") ]) parser.tryInRuleRecovery = () => { @@ -993,7 +997,7 @@ function defineRecognizerSpecs( this.CONSUME1(DotTok) }) } - let parser: any = new NotSwallowInTokenConsumption([ + const parser: any = new NotSwallowInTokenConsumption([ createTokenInstance(IntTok, "1") ]) ;(parser as any).consumeInternal = () => { @@ -1037,7 +1041,7 @@ function defineRecognizerSpecs( } ) } - let parser: any = new RethrowOtherErrors([ + const parser: any = new RethrowOtherErrors([ createTokenInstance(IntTok, "1") ]) parser.someRule() @@ -1057,7 +1061,7 @@ function defineRecognizerSpecs( }) } - let parser = new LabelTokParser([createTokenInstance(MinusTok)]) + const parser = new LabelTokParser([createTokenInstance(MinusTok)]) parser.rule() expect(parser.errors[0]).to.be.an.instanceof(MismatchedTokenException) expect(parser.errors[0].message).to.include("+") @@ -1078,7 +1082,7 @@ function defineRecognizerSpecs( }) } - let parser = new NoLabelTokParser([createTokenInstance(PlusTok)]) + const parser = new NoLabelTokParser([createTokenInstance(PlusTok)]) parser.rule() expect(parser.errors[0]).to.be.an.instanceof(MismatchedTokenException) expect(parser.errors[0].message).to.include("MinusTok") @@ -1104,7 +1108,7 @@ function defineRecognizerSpecs( }) } - let parser = new CustomConsumeErrorParser([ + const parser = new CustomConsumeErrorParser([ createTokenInstance(PlusTok) ]) parser.myStatement() @@ -1139,7 +1143,7 @@ function defineRecognizerSpecs( }) } - let parser = new CustomOrErrorParser([createTokenInstance(DotTok)]) + const parser = new CustomOrErrorParser([createTokenInstance(DotTok)]) parser.myStatement() expect(parser.errors[0]).to.be.an.instanceof(NoViableAltException) expect(parser.errors[0].message).to.include( @@ -1175,7 +1179,7 @@ function defineRecognizerSpecs( }) } - let parser = new LabelAltParser([]) + const parser = new LabelAltParser([]) parser.rule() expect(parser.errors[0]).to.be.an.instanceof(NoViableAltException) expect(parser.errors[0].context.ruleStack).to.deep.equal(["rule"]) @@ -1209,7 +1213,7 @@ function defineRecognizerSpecs( }) } - let parser = new MaxlookaheadOneAlt([]) + const parser = new MaxlookaheadOneAlt([]) parser.rule() expect(parser.errors[0]).to.be.an.instanceof(NoViableAltException) expect(parser.errors[0].context.ruleStack).to.deep.equal(["rule"]) @@ -1246,7 +1250,7 @@ function defineRecognizerSpecs( }) } - let parser = new LabelAltParser2([]) + const parser = new LabelAltParser2([]) parser.rule() expect(parser.errors[0]).to.be.an.instanceof(NoViableAltException) expect(parser.errors[0].context.ruleStack).to.deep.equal(["rule"]) @@ -1282,7 +1286,7 @@ function defineRecognizerSpecs( }) } - let parser = new NestedRulesParser([ + const parser = new NestedRulesParser([ createTokenInstance(MinusTok), createTokenInstance(MinusTok) ]) @@ -1331,7 +1335,7 @@ function defineRecognizerSpecs( }) } - let parser = new ImplicitAtLeastOneErrParser([ + const parser = new ImplicitAtLeastOneErrParser([ createTokenInstance(IntTok, "666"), createTokenInstance(MinusTok), createTokenInstance(MinusTok) @@ -1381,7 +1385,7 @@ function defineRecognizerSpecs( }) } - let parser = new ExplicitAtLeastOneErrParser([ + const parser = new ExplicitAtLeastOneErrParser([ createTokenInstance(IntTok, "666"), createTokenInstance(MinusTok), createTokenInstance(MinusTok) @@ -1426,7 +1430,7 @@ function defineRecognizerSpecs( }) } - let parser = new ImplicitAtLeastOneSepErrParser([ + const parser = new ImplicitAtLeastOneSepErrParser([ createTokenInstance(IntTok, "666"), createTokenInstance(MinusTok), createTokenInstance(MinusTok) @@ -1477,8 +1481,8 @@ function defineRecognizerSpecs( }) } - let parser = new SomeParser([]) - let serializedGrammar = parser.getSerializedGastProductions() + const parser = new SomeParser([]) + const serializedGrammar = parser.getSerializedGastProductions() // not bothering with more in-depth checks as those unit tests exist elsewhere expect(serializedGrammar).to.have.lengthOf(2) expect(serializedGrammar[0].type).to.equal("Rule") @@ -1516,7 +1520,7 @@ function defineRecognizerSpecs( }) } - let parser = new ContentAssistParser([]) + const parser = new ContentAssistParser([]) setEquality(parser.computeContentAssist("topRule", []), [ { nextTokenType: MinusTok, diff --git a/packages/chevrotain/test/scan/lexer_errors_public_spec.ts b/packages/chevrotain/test/scan/lexer_errors_public_spec.ts index 0c12fb3b7..ec76ca20b 100644 --- a/packages/chevrotain/test/scan/lexer_errors_public_spec.ts +++ b/packages/chevrotain/test/scan/lexer_errors_public_spec.ts @@ -3,7 +3,7 @@ import { IToken } from "../../api" describe("The Chevrotain default lexer error message provider", () => { it("Will build unexpected character message", () => { - let input = "1 LETTERS EXIT_LETTERS +" + const input = "1 LETTERS EXIT_LETTERS +" const msg = defaultLexerErrorProvider.buildUnexpectedCharactersMessage( input, 23, diff --git a/packages/chevrotain/test/scan/lexer_spec.ts b/packages/chevrotain/test/scan/lexer_spec.ts index 813ea3383..d244897e6 100644 --- a/packages/chevrotain/test/scan/lexer_spec.ts +++ b/packages/chevrotain/test/scan/lexer_spec.ts @@ -63,7 +63,7 @@ function defineLexerSpecs( BambaTok.LONGER_ALT = IdentifierTok - let testLexer = new Lexer( + const testLexer = new Lexer( [ SingleCharacterWithIgnoreCaseFlagTok, BambaTok, @@ -78,16 +78,16 @@ function defineLexerSpecs( describe("The Chevrotain Lexers", () => { it("can create a token from a string with priority to the First Token Type with the longest match #1", () => { // this can match either IdentifierTok or BambaTok but should match BambaTok has its pattern is defined before IdentifierTok - let input = "bamba" - let result = testLexer.tokenize(input) + const input = "bamba" + const result = testLexer.tokenize(input) expect(tokenMatcher(result.tokens[0], BambaTok)).to.be.true expect(result.tokens[0].image).to.equal("bamba") expect(result.tokens[0].startOffset).to.equal(0) }) it("can create a token from a string with priority to the First Token Type with the longest match #2", () => { - let input = "bambaMIA" - let result = testLexer.tokenize(input) + const input = "bambaMIA" + const result = testLexer.tokenize(input) expect(tokenMatcher(result.tokens[0], IdentifierTok)).to.be.true expect(result.tokens[0].image).to.equal("bambaMIA") expect(result.tokens[0].startOffset).to.equal(0) @@ -112,8 +112,8 @@ function defineLexerSpecs( const myLexer = new Lexer([BambaTok, IntegerTok, IdentTok], { positionTracking: "onlyOffset" }) - let input = "_bamba123" - let result = myLexer.tokenize(input) + const input = "_bamba123" + const result = myLexer.tokenize(input) expect(tokenMatcher(result.tokens[0], BambaTok)).to.be.true expect(result.tokens[0].image).to.equal("_bamba") @@ -122,8 +122,8 @@ function defineLexerSpecs( }) it("can create a token from a string", () => { - let input = "6666543221231" - let result = testLexer.tokenize(input) + const input = "6666543221231" + const result = testLexer.tokenize(input) expect(tokenMatcher(result.tokens[0], IntegerTok)).to.be.true expect(result.tokens[0].image).to.equal("6666543221231") expect(result.tokens[0].startOffset).to.equal(0) @@ -209,7 +209,7 @@ function defineLexerSpecs( if (!skipValidationChecks) { describe("The Simple Lexer Validations", () => { it("won't detect valid patterns as missing", () => { - let result = findMissingPatterns([ + const result = findMissingPatterns([ BambaTok, IntegerTok, IdentifierTok @@ -223,8 +223,8 @@ function defineLexerSpecs( }) it("will detect missing patterns", () => { - let tokenClasses = [ValidNaPattern, MissingPattern] - let result = findMissingPatterns(tokenClasses) + const tokenClasses = [ValidNaPattern, MissingPattern] + const result = findMissingPatterns(tokenClasses) expect(result.errors.length).to.equal(1) expect(result.errors[0].tokenTypes).to.deep.equal([MissingPattern]) expect(result.errors[0].type).to.equal( @@ -235,7 +235,7 @@ function defineLexerSpecs( }) it("won't detect valid patterns as invalid", () => { - let result = findInvalidPatterns([ + const result = findInvalidPatterns([ BambaTok, IntegerTok, IdentifierTok, @@ -251,8 +251,8 @@ function defineLexerSpecs( }) it("will detect invalid patterns as invalid", () => { - let tokenClasses = [ValidNaPattern, InvalidPattern] - let result = findInvalidPatterns(tokenClasses) + const tokenClasses = [ValidNaPattern, InvalidPattern] + const result = findInvalidPatterns(tokenClasses) expect(result.errors.length).to.equal(1) expect(result.errors[0].tokenTypes).to.deep.equal([InvalidPattern]) expect(result.errors[0].type).to.equal( @@ -263,7 +263,7 @@ function defineLexerSpecs( }) it("won't detect valid patterns as using unsupported flags", () => { - let errors = findUnsupportedFlags([ + const errors = findUnsupportedFlags([ BambaTok, IntegerTok, IdentifierTok, @@ -273,8 +273,8 @@ function defineLexerSpecs( }) it("will detect patterns using unsupported multiline flag", () => { - let tokenClasses = [ValidNaPattern, MultiLinePattern] - let errors = findUnsupportedFlags(tokenClasses) + const tokenClasses = [ValidNaPattern, MultiLinePattern] + const errors = findUnsupportedFlags(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([MultiLinePattern]) expect(errors[0].type).to.equal( @@ -284,8 +284,8 @@ function defineLexerSpecs( }) it("will detect patterns using unsupported global flag", () => { - let tokenClasses = [ValidNaPattern, GlobalPattern] - let errors = findUnsupportedFlags(tokenClasses) + const tokenClasses = [ValidNaPattern, GlobalPattern] + const errors = findUnsupportedFlags(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([GlobalPattern]) expect(errors[0].type).to.equal( @@ -295,22 +295,25 @@ function defineLexerSpecs( }) it("won't detect valid patterns as duplicates", () => { - let errors = findDuplicatePatterns([MultiLinePattern, IntegerValid]) + const errors = findDuplicatePatterns([MultiLinePattern, IntegerValid]) expect(errors).to.be.empty }) it("won't detect NA patterns as duplicates", () => { - let errors = findDuplicatePatterns([ValidNaPattern, ValidNaPattern2]) + const errors = findDuplicatePatterns([ + ValidNaPattern, + ValidNaPattern2 + ]) expect(errors).to.be.empty }) it("will detect patterns using unsupported end of input anchor", () => { - let InvalidToken = createToken({ + const InvalidToken = createToken({ name: "InvalidToken", pattern: /BAMBA$/ }) - let tokenClasses = [ValidNaPattern, InvalidToken] - let errors = findEndOfInputAnchor(tokenClasses) + const tokenClasses = [ValidNaPattern, InvalidToken] + const errors = findEndOfInputAnchor(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([InvalidToken]) expect(errors[0].type).to.equal( @@ -320,17 +323,17 @@ function defineLexerSpecs( }) it("won't detect valid patterns as using unsupported end of input anchor", () => { - let errors = findEndOfInputAnchor([IntegerTok, IntegerValid]) + const errors = findEndOfInputAnchor([IntegerTok, IntegerValid]) expect(errors).to.be.empty }) it("will detect patterns using unsupported start of input anchor", () => { - let InvalidToken = createToken({ + const InvalidToken = createToken({ name: "InvalidToken", pattern: /^BAMBA/ }) - let tokenClasses = [ValidNaPattern, InvalidToken] - let errors = findStartOfInputAnchor(tokenClasses) + const tokenClasses = [ValidNaPattern, InvalidToken] + const errors = findStartOfInputAnchor(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([InvalidToken]) expect(errors[0].type).to.equal( @@ -350,8 +353,8 @@ function defineLexerSpecs( pattern: /\w+/ }) - let tokenClasses = [Identifier, ClassKeyword] - let errors = findUnreachablePatterns(tokenClasses) + const tokenClasses = [Identifier, ClassKeyword] + const errors = findUnreachablePatterns(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([Identifier, ClassKeyword]) expect(errors[0].type).to.equal( @@ -361,22 +364,22 @@ function defineLexerSpecs( }) it("won't detect negation as using unsupported start of input anchor", () => { - let negationPattern = createToken({ + const negationPattern = createToken({ name: "negationPattern", pattern: /[^\\]/ }) - let errors = findStartOfInputAnchor([negationPattern]) + const errors = findStartOfInputAnchor([negationPattern]) expect(errors).to.be.empty }) it("won't detect valid patterns as using unsupported start of input anchor", () => { - let errors = findStartOfInputAnchor([IntegerTok, IntegerValid]) + const errors = findStartOfInputAnchor([IntegerTok, IntegerValid]) expect(errors).to.be.empty }) it("will detect identical patterns for different Token Types", () => { - let tokenClasses = [DecimalInvalid, IntegerValid] - let errors = findDuplicatePatterns(tokenClasses) + const tokenClasses = [DecimalInvalid, IntegerValid] + const errors = findDuplicatePatterns(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([ DecimalInvalid, @@ -396,8 +399,8 @@ function defineLexerSpecs( pattern: /\d*/ }) - let tokenClasses = [emptyMatch] - let errors = findEmptyMatchRegExps(tokenClasses) + const tokenClasses = [emptyMatch] + const errors = findEmptyMatchRegExps(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([emptyMatch]) expect(errors[0].type).to.equal( @@ -408,14 +411,14 @@ function defineLexerSpecs( }) it("won't detect valid groups as unsupported", () => { - let errors = findInvalidGroupType([IntegerTok, Skipped, Special]) + const errors = findInvalidGroupType([IntegerTok, Skipped, Special]) //noinspection BadExpressionStatementJS expect(errors).to.be.empty }) it("will detect unsupported group types", () => { - let tokenClasses = [InvalidGroupNumber] - let errors = findInvalidGroupType(tokenClasses) + const tokenClasses = [InvalidGroupNumber] + const errors = findInvalidGroupType(tokenClasses) expect(errors.length).to.equal(1) expect(errors[0].tokenTypes).to.deep.equal([InvalidGroupNumber]) expect(errors[0].type).to.equal( @@ -496,22 +499,22 @@ function defineLexerSpecs( describe("The Simple Lexer transformations", () => { it("can transform a pattern to one with startOfInput mark ('^') #1 (NO OP)", () => { - let orgSource = (BambaTok.PATTERN).source - let transPattern = addStartOfInput(BambaTok.PATTERN) + const orgSource = (BambaTok.PATTERN).source + const transPattern = addStartOfInput(BambaTok.PATTERN) expect(transPattern.source).to.equal("^(?:" + orgSource + ")") expect(/^\^/.test(transPattern.source)).to.equal(true) }) it("can transform a pattern to one with startOfInput mark ('^') #2", () => { - let orgSource = PatternNoStart.PATTERN.source - let transPattern = addStartOfInput(PatternNoStart.PATTERN) + const orgSource = PatternNoStart.PATTERN.source + const transPattern = addStartOfInput(PatternNoStart.PATTERN) expect(transPattern.source).to.equal("^(?:" + orgSource + ")") expect(/^\^/.test(transPattern.source)).to.equal(true) }) if (!skipValidationChecks) { it("can transform/analyze an array of Token Typees into matched/ignored/patternToClass", () => { - let tokenClasses = [ + const tokenClasses = [ Keyword, If, Else, @@ -523,17 +526,17 @@ function defineLexerSpecs( Whitespace, NewLine ] - let analyzeResult = analyzeTokenTypes(tokenClasses, { + const analyzeResult = analyzeTokenTypes(tokenClasses, { useSticky: false }) - let allPatterns = map( + const allPatterns = map( analyzeResult.patternIdxToConfig, (currConfig) => currConfig.pattern ) expect(allPatterns.length).to.equal(8) - let allPatternsString = map(allPatterns, (pattern) => { + const allPatternsString = map(allPatterns, (pattern) => { return isString(pattern) ? pattern : pattern.source }) setEquality(allPatternsString, [ @@ -547,7 +550,7 @@ function defineLexerSpecs( "^(?:return)" ]) - let patternIdxToClass = map( + const patternIdxToClass = map( analyzeResult.patternIdxToConfig, (currConfig) => currConfig.tokenType ) @@ -565,7 +568,7 @@ function defineLexerSpecs( if (!skipValidationChecks && ORG_SUPPORT_STICKY) { it("can transform/analyze an array of Token Typees into matched/ignored/patternToClass - sticky", () => { - let tokenClasses = [ + const tokenClasses = [ Keyword, If, Else, @@ -578,15 +581,15 @@ function defineLexerSpecs( NewLine ] // on newer node.js this will run with the 2nd argument as true. - let analyzeResult = analyzeTokenTypes(tokenClasses, { + const analyzeResult = analyzeTokenTypes(tokenClasses, { useSticky: true }) - let allPatterns = map( + const allPatterns = map( analyzeResult.patternIdxToConfig, (currConfig) => currConfig.pattern ) expect(allPatterns.length).to.equal(8) - let allPatternsString = map(allPatterns, (pattern) => { + const allPatternsString = map(allPatterns, (pattern) => { return isString(pattern) ? pattern : pattern.source }) setEquality(allPatternsString, [ @@ -605,7 +608,7 @@ function defineLexerSpecs( expect(currPattern.sticky).to.be.true } }) - let patternIdxToClass = map( + const patternIdxToClass = map( analyzeResult.patternIdxToConfig, (currConfig) => currConfig.tokenType ) @@ -622,7 +625,7 @@ function defineLexerSpecs( } it("can count the number of line terminators in a string", () => { - let ltCounter = new Lexer([ + const ltCounter = new Lexer([ createToken({ name: "lt", pattern: /\s+/ @@ -632,19 +635,19 @@ function defineLexerSpecs( pattern: /\d+/ }) ]) - let lastToken = last(ltCounter.tokenize("1\r\n1\r1").tokens) + const lastToken = last(ltCounter.tokenize("1\r\n1\r1").tokens) expect(lastToken.startLine).to.equal(3) - let lastToken2 = last(ltCounter.tokenize("\r\r\r1234\r\n1").tokens) + const lastToken2 = last(ltCounter.tokenize("\r\r\r1234\r\n1").tokens) expect(lastToken2.startLine).to.equal(5) expect(lastToken2.startColumn).to.equal(1) - let lastToken3 = last(ltCounter.tokenize("2\r3\n\r4\n5").tokens) + const lastToken3 = last(ltCounter.tokenize("2\r3\n\r4\n5").tokens) expect(lastToken3.startLine).to.equal(5) }) it("can count the number of line terminators in a string - with lookahead", () => { - let ltCounter = new Lexer([ + const ltCounter = new Lexer([ createToken({ name: "lt", pattern: /\s+/ @@ -655,19 +658,19 @@ function defineLexerSpecs( pattern: /\d+(?=|\n)/ }) ]) - let lastToken = last(ltCounter.tokenize("1\r\n1\r1").tokens) + const lastToken = last(ltCounter.tokenize("1\r\n1\r1").tokens) expect(lastToken.startLine).to.equal(3) - let lastToken2 = last(ltCounter.tokenize("\r\r\r1234\r\n1").tokens) + const lastToken2 = last(ltCounter.tokenize("\r\r\r1234\r\n1").tokens) expect(lastToken2.startLine).to.equal(5) expect(lastToken2.startColumn).to.equal(1) - let lastToken3 = last(ltCounter.tokenize("2\r3\n\r4\n5").tokens) + const lastToken3 = last(ltCounter.tokenize("2\r3\n\r4\n5").tokens) expect(lastToken3.startLine).to.equal(5) }) it("can count the number of line terminators in a string - with negative lookahead", () => { - let ltCounter = new Lexer([ + const ltCounter = new Lexer([ createToken({ name: "lt", pattern: /\s+/ @@ -679,19 +682,19 @@ function defineLexerSpecs( pattern: /\d+(?!a\n)/ }) ]) - let lastToken = last(ltCounter.tokenize("1\r\n1\r1").tokens) + const lastToken = last(ltCounter.tokenize("1\r\n1\r1").tokens) expect(lastToken.startLine).to.equal(3) - let lastToken2 = last(ltCounter.tokenize("\r\r\r1234\r\n1").tokens) + const lastToken2 = last(ltCounter.tokenize("\r\r\r1234\r\n1").tokens) expect(lastToken2.startLine).to.equal(5) expect(lastToken2.startColumn).to.equal(1) - let lastToken3 = last(ltCounter.tokenize("2\r3\n\r4\n5").tokens) + const lastToken3 = last(ltCounter.tokenize("2\r3\n\r4\n5").tokens) expect(lastToken3.startLine).to.equal(5) }) it("can count the number of line terminators in a string - string literal patterns", () => { - let ltCounter = new Lexer([ + const ltCounter = new Lexer([ createToken({ name: "lt", pattern: "\n", @@ -702,12 +705,12 @@ function defineLexerSpecs( pattern: /\d+/ }) ]) - let lastToken = last(ltCounter.tokenize("1\n1\n1").tokens) + const lastToken = last(ltCounter.tokenize("1\n1\n1").tokens) expect(lastToken.startLine).to.equal(3) }) it("can count the number of line terminators in a string - string literal patterns - implicit prop", () => { - let ltCounter = new Lexer([ + const ltCounter = new Lexer([ createToken({ name: "lt", pattern: "\n" @@ -717,26 +720,26 @@ function defineLexerSpecs( pattern: /\d+/ }) ]) - let lastToken = last(ltCounter.tokenize("1\n1\n1").tokens) + const lastToken = last(ltCounter.tokenize("1\n1\n1").tokens) expect(lastToken.startLine).to.equal(3) }) it("Supports custom Line Terminators", () => { - let WS = createToken({ + const WS = createToken({ name: "WS", pattern: /\u2028/, line_breaks: true, group: Lexer.SKIPPED }) - let ifElseLexer = new Lexer([WS, If, Else], { + const ifElseLexer = new Lexer([WS, If, Else], { lineTerminatorsPattern: /\u2028/g, lineTerminatorCharacters: ["\u2028"] }) - let input = "if\u2028elseif" + const input = "if\u2028elseif" - let lexResult = ifElseLexer.tokenize(input) - let tokens: any = lexResult.tokens + const lexResult = ifElseLexer.tokenize(input) + const tokens: any = lexResult.tokens expect(tokens[0].image).to.equal("if") expect(tokens[0].startLine).to.equal(1) expect(tokens[0].startColumn).to.equal(1) @@ -749,22 +752,22 @@ function defineLexerSpecs( }) it("Supports custom Line Terminators with numerical lineTerminatorCharacters", () => { - let WS = createToken({ + const WS = createToken({ name: "WS", pattern: /\u2028/, line_breaks: true, group: Lexer.SKIPPED }) - let ifElseLexer = new Lexer([WS, If, Else], { + const ifElseLexer = new Lexer([WS, If, Else], { lineTerminatorsPattern: /\u2028/g, // "\u2028".charCodeAt(0) === 8232 lineTerminatorCharacters: [8232] }) - let input = "if\u2028elseif" + const input = "if\u2028elseif" - let lexResult = ifElseLexer.tokenize(input) - let tokens: any = lexResult.tokens + const lexResult = ifElseLexer.tokenize(input) + const tokens: any = lexResult.tokens expect(tokens[0].image).to.equal("if") expect(tokens[0].startLine).to.equal(1) expect(tokens[0].startColumn).to.equal(1) @@ -787,30 +790,30 @@ function defineLexerSpecs( const lexer = new Lexer([workflow], { positionTracking: "onlyOffset" }) - let lexResult = lexer.tokenize(input) - let tokens: any = lexResult.tokens + const lexResult = lexer.tokenize(input) + const tokens: any = lexResult.tokens expect(tokens[0].image).to.equal("worKFloW") expect(tokens[0].tokenType).to.equal(workflow) }) it("can run a simpleLexer without optimizing meta chars", () => { - let Tab = createToken({ + const Tab = createToken({ name: "Tab", pattern: /\t/, group: "spaces" }) - let ifElseLexer = new Lexer([Tab, If, Else], { + const ifElseLexer = new Lexer([Tab, If, Else], { positionTracking: "onlyOffset" }) - let input = "if\telse" + const input = "if\telse" - let lexResult = ifElseLexer.tokenize(input) - let tokens: any = lexResult.tokens + const lexResult = ifElseLexer.tokenize(input) + const tokens: any = lexResult.tokens expect(tokens[0].image).to.equal("if") expect(tokens[1].image).to.equal("else") - let spacesGroups: any = lexResult.groups.spaces + const spacesGroups: any = lexResult.groups.spaces expect(spacesGroups[0].image).to.equal("\t") }) @@ -820,14 +823,14 @@ function defineLexerSpecs( pattern: /if|else/, start_chars_hint: ["i", "e".charCodeAt(0)] }) - let ifElseLexer = new Lexer([IfOrElse], { + const ifElseLexer = new Lexer([IfOrElse], { positionTracking: "onlyOffset" }) - let input = "ifelse" + const input = "ifelse" - let lexResult = ifElseLexer.tokenize(input) - let tokens: any = lexResult.tokens + const lexResult = ifElseLexer.tokenize(input) + const tokens: any = lexResult.tokens expect(tokens[0].image).to.equal("if") expect(tokens[1].image).to.equal("else") }) @@ -837,7 +840,7 @@ function defineLexerSpecs( pattern: /BAMBA$/ }) it("can create a simple Lexer from a List of Token Typees", () => { - let ifElseLexer = new Lexer( + const ifElseLexer = new Lexer( [ Keyword, If, @@ -855,9 +858,9 @@ function defineLexerSpecs( //noinspection BadExpressionStatementJS expect(ifElseLexer.lexerDefinitionErrors).to.be.empty - let input = "if (666) reTurn 1\n" + "\telse return 2" + const input = "if (666) reTurn 1\n" + "\telse return 2" - let lexResult = ifElseLexer.tokenize(input) + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.groups).to.be.empty expect(lexResult.tokens[0].image).to.equal("if") @@ -966,7 +969,7 @@ function defineLexerSpecs( if (!skipValidationChecks) { // This test must not be performed in custom mode it("can count the number of line terminators in a string - complement prop", () => { - let ltCounter = new Lexer([ + const ltCounter = new Lexer([ createToken({ name: "lt", pattern: /[^\d]+/ @@ -976,7 +979,7 @@ function defineLexerSpecs( pattern: /\d+/ }) ]) - let lastToken = last(ltCounter.tokenize("1\n1\n1").tokens) + const lastToken = last(ltCounter.tokenize("1\n1\n1").tokens) expect(lastToken.startLine).to.equal(3) }) @@ -1043,7 +1046,7 @@ function defineLexerSpecs( }) ).to.not.throw(/EndOfInputAnchor/) - let lexerWithErrs = new Lexer([EndOfInputAnchor, If, Else], { + const lexerWithErrs = new Lexer([EndOfInputAnchor, If, Else], { positionTracking: "onlyOffset", deferDefinitionErrorsHandling: true }) @@ -1060,7 +1063,7 @@ function defineLexerSpecs( } it("can skip invalid character inputs and only report one error per sequence of characters skipped", () => { - let ifElseLexer = new Lexer( + const ifElseLexer = new Lexer( [ Keyword, If, @@ -1076,9 +1079,9 @@ function defineLexerSpecs( lexerConfig ) - let input = "if (666) return 1@#$@#$\n" + "\telse return 2" + const input = "if (666) return 1@#$@#$\n" + "\telse return 2" - let lexResult = ifElseLexer.tokenize(input) + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.errors.length).to.equal(1) expect(lexResult.errors[0].message).to.contain("@") expect(lexResult.errors[0].length).to.equal(6) @@ -1164,7 +1167,7 @@ function defineLexerSpecs( }) it("won't go into infinite loops when skipping at end of input", () => { - let ifElseLexer = new Lexer( + const ifElseLexer = new Lexer( [ Keyword, If, @@ -1180,8 +1183,8 @@ function defineLexerSpecs( lexerConfig ) - let input = "if&&&&&&&&&&&&&&&&&&&&&&&&&&&&" - let lexResult = ifElseLexer.tokenize(input) + const input = "if&&&&&&&&&&&&&&&&&&&&&&&&&&&&" + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.errors.length).to.equal(1) expect(lexResult.errors[0].message).to.contain("&") if (testStart) { @@ -1203,13 +1206,13 @@ function defineLexerSpecs( }) it("can deal with line terminators inside multi-line Tokens", () => { - let ifElseLexer = new Lexer( + const ifElseLexer = new Lexer( [If, Else, WhitespaceNotSkipped], lexerConfig ) - let input = "if\r\r\telse\rif\n" - let lexResult = ifElseLexer.tokenize(input) + const input = "if\r\r\telse\rif\n" + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.tokens[0].image).to.equal("if") expect(lexResult.tokens[0].startOffset).to.equal(0) @@ -1288,10 +1291,10 @@ function defineLexerSpecs( }) it("can deal with Tokens which may or may not be a lineTerminator", () => { - let ifElseLexer = new Lexer([If, Else, WhitespaceOrAmp], lexerConfig) + const ifElseLexer = new Lexer([If, Else, WhitespaceOrAmp], lexerConfig) - let input = "if\r\r\telse&if" - let lexResult = ifElseLexer.tokenize(input) + const input = "if\r\r\telse&if" + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.tokens[0].image).to.equal("if") expect(lexResult.tokens[0].startOffset).to.equal(0) @@ -1355,9 +1358,9 @@ function defineLexerSpecs( }) it("supports Token groups", () => { - let ifElseLexer = new Lexer([If, Else, Comment, NewLine], lexerConfig) - let input = "if//else" - let lexResult = ifElseLexer.tokenize(input) + const ifElseLexer = new Lexer([If, Else, Comment, NewLine], lexerConfig) + const input = "if//else" + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.tokens[0].image).to.equal("if") expect(lexResult.tokens[0].startOffset).to.equal(0) @@ -1374,7 +1377,7 @@ function defineLexerSpecs( expect(lexResult.groups).to.have.property("comments") // tslint:disable expect(lexResult.groups["comments"]).to.have.length(1) - let comment = lexResult.groups["comments"][0] + const comment = lexResult.groups["comments"][0] // tslint:enable expect(comment.image).to.equal("//else") expect(comment.startOffset).to.equal(2) @@ -1390,8 +1393,8 @@ function defineLexerSpecs( }) it("won't have leftover state when using token groups", () => { - let ifElseLexer = new Lexer([If, Else, Comment, NewLine], lexerConfig) - let input = "if//else" + const ifElseLexer = new Lexer([If, Else, Comment, NewLine], lexerConfig) + const input = "if//else" let lexResult = ifElseLexer.tokenize(input) expect(lexResult.groups).to.have.property("comments") @@ -1408,9 +1411,9 @@ function defineLexerSpecs( }) it("can lex a pile of poo", () => { - let ifElseLexer = new Lexer([If, PileOfPoo, NewLine], lexerConfig) - let input = "if💩" - let lexResult = ifElseLexer.tokenize(input) + const ifElseLexer = new Lexer([If, PileOfPoo, NewLine], lexerConfig) + const input = "if💩" + const lexResult = ifElseLexer.tokenize(input) expect(lexResult.tokens[0].image).to.equal("if") expect(lexResult.tokens[0].tokenType).to.equal(If) @@ -1477,7 +1480,7 @@ function defineLexerSpecs( }) Whitespace.GROUP = Lexer.SKIPPED - let modeLexerDefinition: IMultiModeLexerDefinition = { + const modeLexerDefinition: IMultiModeLexerDefinition = { modes: { numbers: [One, Two, Three, ExitNumbers, LETTERS, Whitespace], letters: [ @@ -1494,14 +1497,14 @@ function defineLexerSpecs( defaultMode: "numbers" } - let ModeLexer = new Lexer(modeLexerDefinition, lexerConfig) + const ModeLexer = new Lexer(modeLexerDefinition, lexerConfig) it("supports 'context' lexer modes full flow", () => { - let input = "1 LETTERS G A G SIGNS & EXIT_SIGNS B EXIT_LETTERS 3" - let lexResult = ModeLexer.tokenize(input) + const input = "1 LETTERS G A G SIGNS & EXIT_SIGNS B EXIT_LETTERS 3" + const lexResult = ModeLexer.tokenize(input) expect(lexResult.errors).to.be.empty - let images = map(lexResult.tokens, (currTok) => currTok.image) + const images = map(lexResult.tokens, (currTok) => currTok.image) expect(images).to.deep.equal([ "1", "LETTERS", @@ -1518,8 +1521,8 @@ function defineLexerSpecs( }) it("supports lexer error reporting with modes", () => { - let input = "1 LETTERS EXIT_LETTERS +" - let lexResult = ModeLexer.tokenize(input) + const input = "1 LETTERS EXIT_LETTERS +" + const lexResult = ModeLexer.tokenize(input) expect(lexResult.errors).to.have.lengthOf(1) expect(lexResult.errors[0].message).to.equal( "unexpected character: ->+<- at offset: 23, skipped 1 characters." @@ -1527,22 +1530,22 @@ function defineLexerSpecs( }) it("allows choosing the initial Mode", () => { - let input = "A G SIGNS ^" - let lexResult = ModeLexer.tokenize(input, "letters") + const input = "A G SIGNS ^" + const lexResult = ModeLexer.tokenize(input, "letters") expect(lexResult.errors).to.be.empty - let images = map(lexResult.tokens, (currTok) => currTok.image) + const images = map(lexResult.tokens, (currTok) => currTok.image) expect(images).to.deep.equal(["A", "G", "SIGNS", "^"]) }) it("won't allow lexing tokens that are not in the current mode's set", () => { - let input = "1 LETTERS 1A" - let lexResult = ModeLexer.tokenize(input) + const input = "1 LETTERS 1A" + const lexResult = ModeLexer.tokenize(input) expect(lexResult.errors).to.have.lengthOf(1) expect(lexResult.errors[0].message).to.include("skipped 1") expect(lexResult.errors[0].message).to.include(">1<") - let images = map(lexResult.tokens, (currTok) => currTok.image) + const images = map(lexResult.tokens, (currTok) => currTok.image) expect(images).to.deep.equal([ "1", @@ -1552,8 +1555,8 @@ function defineLexerSpecs( }) it("Will create a lexer error and skip the mode popping when there is no lexer mode to pop", () => { - let input = "1 EXIT_NUMBERS 2" - let lexResult = ModeLexer.tokenize(input) + const input = "1 EXIT_NUMBERS 2" + const lexResult = ModeLexer.tokenize(input) expect(lexResult.errors).to.have.lengthOf(1) expect(lexResult.errors[0].message).to.include(">EXIT_NUMBERS<") expect(lexResult.errors[0].message).to.include("Unable to pop") @@ -1567,16 +1570,16 @@ function defineLexerSpecs( expect(lexResult.errors[0].length).to.equal(12) - let images = map(lexResult.tokens, (currTok) => currTok.image) + const images = map(lexResult.tokens, (currTok) => currTok.image) expect(images).to.deep.equal(["1", "EXIT_NUMBERS", "2"]) }) it("Will pop the lexer mode and push a new one if both are defined on the token", () => { - let input = "LETTERS SIGNS_AND_EXIT_LETTERS &" - let lexResult = ModeLexer.tokenize(input) + const input = "LETTERS SIGNS_AND_EXIT_LETTERS &" + const lexResult = ModeLexer.tokenize(input) expect(lexResult.errors).to.be.empty - let images = map(lexResult.tokens, (currTok) => currTok.image) + const images = map(lexResult.tokens, (currTok) => currTok.image) expect(images).to.deep.equal([ "LETTERS", "SIGNS_AND_EXIT_LETTERS", @@ -1598,7 +1601,7 @@ function defineLexerSpecs( }) EnterNumbers.PUSH_MODE = "numbers" - let lexerDef: IMultiModeLexerDefinition = { + const lexerDef: IMultiModeLexerDefinition = { modes: { letters: [Alpha, Beta, Gamma, Whitespace, EnterNumbers], // the numbers mode has a typo! so the PUSH_MODE in the 'EnterNumbers' is invalid @@ -1608,7 +1611,7 @@ function defineLexerSpecs( defaultMode: "letters" } - let badLexer = new Lexer(lexerDef, { + const badLexer = new Lexer(lexerDef, { deferDefinitionErrorsHandling: true }) expect(badLexer.lexerDefinitionErrors).to.have.lengthOf(1) @@ -1630,7 +1633,7 @@ function defineLexerSpecs( }) it("Will detect a multiMode Lexer definition which is missing the property", () => { - let lexerDef: any = { + const lexerDef: any = { modes___: { // typo in 'modes' property name }, @@ -1638,7 +1641,7 @@ function defineLexerSpecs( defaultMode: "" } - let badLexer = new Lexer(lexerDef, { + const badLexer = new Lexer(lexerDef, { deferDefinitionErrorsHandling: true, positionTracking: "onlyOffset" }) @@ -1655,13 +1658,13 @@ function defineLexerSpecs( }) it("Will detect a multiMode Lexer definition which is missing the property", () => { - let lexerDef: any = { + const lexerDef: any = { modes: {}, defaultMode___: "" // typo in 'defaultMode' property name } - let badLexer = new Lexer(lexerDef, { + const badLexer = new Lexer(lexerDef, { deferDefinitionErrorsHandling: true, positionTracking: "onlyOffset" }) @@ -1681,14 +1684,14 @@ function defineLexerSpecs( "Will detect a multiMode Lexer definition " + "which has an invalid (missing the value) of the property", () => { - let lexerDef: any = { + const lexerDef: any = { modes: { bamba: [] }, defaultMode: "bisli" } - let badLexer = new Lexer(lexerDef, { + const badLexer = new Lexer(lexerDef, { deferDefinitionErrorsHandling: true, positionTracking: "onlyOffset" }) @@ -1709,8 +1712,8 @@ function defineLexerSpecs( ) it("Will detect a Lexer definition which has undefined Token Typees", () => { - let lexerDef: any = [Alpha, Beta /* this is undefined */, , Gamma] - let badLexer = new Lexer(lexerDef, { + const lexerDef: any = [Alpha, Beta /* this is undefined */, , Gamma] + const badLexer = new Lexer(lexerDef, { deferDefinitionErrorsHandling: true, positionTracking: "onlyOffset" }) @@ -1748,8 +1751,8 @@ function defineLexerSpecs( }) it("supports custom unexpected characters lexer error message", () => { - let input = "1 LETTERS EXIT_LETTERS +" - let lexResult = ModeLexerWithCustomErrors.tokenize(input) + const input = "1 LETTERS EXIT_LETTERS +" + const lexResult = ModeLexerWithCustomErrors.tokenize(input) expect(lexResult.errors).to.have.lengthOf(1) expect(lexResult.errors[0].message).to.equal( "[1, 24] Unknown character + at position 23 skipped 1" @@ -1757,8 +1760,8 @@ function defineLexerSpecs( }) it("supports custom unable to pop lexer mode error message", () => { - let input = "1 EXIT_NUMBERS 2" - let lexResult = ModeLexerWithCustomErrors.tokenize(input) + const input = "1 EXIT_NUMBERS 2" + const lexResult = ModeLexerWithCustomErrors.tokenize(input) expect(lexResult.errors).to.have.lengthOf(1) expect(lexResult.errors[0].message).to.equal( "No pop for you EXIT_NUMBERS" @@ -1772,7 +1775,7 @@ function defineLexerSpecs( let time = 1 function extraContextValidator(text, offset, tokens, groups) { - let result = isFunction(customPattern) + const result = isFunction(customPattern) ? customPattern(text, offset) : customPattern.exec(text, offset) if (result !== null) { @@ -1791,17 +1794,17 @@ function defineLexerSpecs( return result } - let A = createToken({ + const A = createToken({ name: "A", pattern: "A" }) - let B = createToken({ + const B = createToken({ name: "B", pattern: extraContextValidator, line_breaks: false }) - let WS = createToken({ + const WS = createToken({ name: "WS", pattern: { exec: (text, offset) => /^\s+/.exec(text.substring(offset)) @@ -1810,15 +1813,15 @@ function defineLexerSpecs( line_breaks: true }) - let lexerDef: any = [WS, A, B] - let myLexer = new Lexer(lexerDef, lexerConfig) - let lexResult = myLexer.tokenize("B A\n B ") + const lexerDef: any = [WS, A, B] + const myLexer = new Lexer(lexerDef, lexerConfig) + const lexResult = myLexer.tokenize("B A\n B ") expect(lexResult.tokens).to.have.length(3) expect(tokenMatcher(lexResult.tokens[0], B)).to.be.true expect(tokenMatcher(lexResult.tokens[1], A)).to.be.true expect(tokenMatcher(lexResult.tokens[2], B)).to.be.true - let lastToken = lexResult.tokens[2] + const lastToken = lexResult.tokens[2] expect(lastToken.startOffset).to.equal(5) if (testStart) { @@ -1981,10 +1984,10 @@ skipOnBrowser("debugging and messages and optimizations", () => { }) function wrapWithCustom(baseExtendToken) { - return function () { - let newToken = baseExtendToken.apply(null, arguments) + return function (...args) { + const newToken = baseExtendToken(...args) - let pattern = newToken.PATTERN + const pattern = newToken.PATTERN if ( isRegExp(pattern) && !/\\n|\\r|\\s/g.test(pattern.source) && @@ -1992,8 +1995,8 @@ function wrapWithCustom(baseExtendToken) { ) { newToken.PATTERN = function (text, offset) { // can't use sticky here because tests on node.js version 4 won't pass. - let withStart = addStartOfInput(pattern) - let execResult = withStart.exec(text.substring(offset)) + const withStart = addStartOfInput(pattern) + const execResult = withStart.exec(text.substring(offset)) return execResult } diff --git a/packages/chevrotain/test/scan/token_spec.ts b/packages/chevrotain/test/scan/token_spec.ts index c4e782a3e..e3fed8c41 100644 --- a/packages/chevrotain/test/scan/token_spec.ts +++ b/packages/chevrotain/test/scan/token_spec.ts @@ -10,7 +10,7 @@ import { singleAssignCategoriesToksMap } from "../../src/scan/tokens" describe("The Chevrotain Tokens namespace", () => { context("createToken", () => { - let TrueLiteral = createToken({ name: "TrueLiteral" }) + const TrueLiteral = createToken({ name: "TrueLiteral" }) class FalseLiteral {} it("assigns `name` property to tokenTypes", () => { @@ -21,26 +21,26 @@ describe("The Chevrotain Tokens namespace", () => { expect(tokenName(TrueLiteral)).to.equal("TrueLiteral") }) - let A = createToken({ name: "A" }) - let B = createToken({ name: "B", categories: A }) + const A = createToken({ name: "A" }) + const B = createToken({ name: "B", categories: A }) B.GROUP = "Special" - let C = createToken({ + const C = createToken({ name: "C", pattern: /\d+/, categories: B }) - let D = createToken({ + const D = createToken({ name: "D", pattern: /\w+/, categories: B }) - let Plus = createToken({ name: "Plus", pattern: /\+/ }) + const Plus = createToken({ name: "Plus", pattern: /\+/ }) Plus.LABEL = "+" it("provides an createTokenInstance utility - creating an instance", () => { - let aInstance = createTokenInstance(A, "Hello", 0, 4, 1, 1, 1, 5) + const aInstance = createTokenInstance(A, "Hello", 0, 4, 1, 1, 1, 5) expect(aInstance.image).to.equal("Hello") expect(aInstance.startOffset).to.equal(0) expect(aInstance.endOffset).to.equal(4) @@ -51,7 +51,7 @@ describe("The Chevrotain Tokens namespace", () => { }) it("provides an extendToken utility - creating a subclass instance", () => { - let aInstance = createTokenInstance(A, "World", 0, 4, 1, 1, 1, 5) + const aInstance = createTokenInstance(A, "World", 0, 4, 1, 1, 1, 5) expect(aInstance.image).to.equal("World") expect(aInstance.startOffset).to.equal(0) expect(aInstance.endOffset).to.equal(4) @@ -69,13 +69,13 @@ describe("The Chevrotain Tokens namespace", () => { }) it("provides a utility to verify if a token instance matches a Token Type", () => { - let ATokRegular = createToken({ + const ATokRegular = createToken({ name: "ATokRegular" }) - let BTokRegular = createToken({ + const BTokRegular = createToken({ name: "BTokRegular" }) - let AInstanceRegular = createTokenInstance( + const AInstanceRegular = createTokenInstance( ATokRegular, "a", -1, @@ -85,7 +85,7 @@ describe("The Chevrotain Tokens namespace", () => { -1, -1 ) - let BInstanceRegular = createTokenInstance( + const BInstanceRegular = createTokenInstance( BTokRegular, "b", -1, @@ -103,8 +103,8 @@ describe("The Chevrotain Tokens namespace", () => { }) it("Will augment Token Constructors with additional metadata basic", () => { - let A = createToken({ name: "A" }) - let B = createToken({ name: "B" }) + const A = createToken({ name: "A" }) + const B = createToken({ name: "B" }) expect(A.tokenTypeIdx).to.be.greaterThan(0) expect(B.tokenTypeIdx).to.be.greaterThan(A.tokenTypeIdx) @@ -116,7 +116,7 @@ describe("The Chevrotain Tokens namespace", () => { }) it("can define a token Label via the createToken utilities", () => { - let A = createToken({ + const A = createToken({ name: "A", label: "bamba" }) @@ -124,7 +124,7 @@ describe("The Chevrotain Tokens namespace", () => { }) it("can define a POP_MODE via the createToken utilities", () => { - let A = createToken({ + const A = createToken({ name: "A", pop_mode: true }) @@ -133,7 +133,7 @@ describe("The Chevrotain Tokens namespace", () => { }) it("can define a PUSH_MODE via the createToken utilities", () => { - let A = createToken({ + const A = createToken({ name: "A", push_mode: "attribute" }) @@ -142,14 +142,14 @@ describe("The Chevrotain Tokens namespace", () => { }) it("can define a LONGER_ALT via the createToken utilities", () => { - let A = createToken({ name: "A" }) - let B = createToken({ name: "B", longer_alt: A }) + const A = createToken({ name: "A" }) + const B = createToken({ name: "B", longer_alt: A }) expect(B).to.haveOwnProperty("LONGER_ALT") expect(B.LONGER_ALT).to.equal(A) }) it("can define a token group via the createToken utilities", () => { - let A = createToken({ + const A = createToken({ name: "A", group: Lexer.SKIPPED }) diff --git a/packages/chevrotain/test/test.config.js b/packages/chevrotain/test/test.config.js index a905dfcef..180e11ee1 100644 --- a/packages/chevrotain/test/test.config.js +++ b/packages/chevrotain/test/test.config.js @@ -1,3 +1,4 @@ +/* eslint-disable no-undef -- config file */ if (typeof global === "object") { global.expect = require("chai").expect require("chai").use(require("sinon-chai")) @@ -5,3 +6,4 @@ if (typeof global === "object") { } else if (typeof window === "object") { window.expect = chai.expect } +/* eslint-enable no-undef -- config file */ diff --git a/packages/chevrotain/test/text/range_spec.ts b/packages/chevrotain/test/text/range_spec.ts index 289bc16a7..c34d6eaec 100644 --- a/packages/chevrotain/test/text/range_spec.ts +++ b/packages/chevrotain/test/text/range_spec.ts @@ -11,7 +11,7 @@ describe("The Chevrotain Range namespace", () => { }) it("can check if a number is contained in a give range", () => { - let r = new Range(90, 110) + const r = new Range(90, 110) expect(r.contains(-4)).to.equal(false) expect(r.contains(30)).to.equal(false) expect(r.contains(89)).to.equal(false) @@ -24,12 +24,12 @@ describe("The Chevrotain Range namespace", () => { }) it("can check if it is contained in another range", () => { - let _10_50 = new Range(10, 50) - let _1_6 = new Range(1, 6) - let _5_15 = new Range(5, 15) - let _20_35 = new Range(20, 35) - let _45_55 = new Range(45, 55) - let _51_100 = new Range(51, 100) + const _10_50 = new Range(10, 50) + const _1_6 = new Range(1, 6) + const _5_15 = new Range(5, 15) + const _20_35 = new Range(20, 35) + const _45_55 = new Range(45, 55) + const _51_100 = new Range(51, 100) expect(_1_6.isContainedInRange(_10_50)).to.equal(false) expect(_5_15.isContainedInRange(_10_50)).to.equal(false) @@ -40,15 +40,15 @@ describe("The Chevrotain Range namespace", () => { }) it("can check if it is strictly contained in another range", () => { - let _10_50 = new Range(10, 50) + const _10_50 = new Range(10, 50) - let _1_6 = new Range(1, 6) - let _10_11 = new Range(10, 11) - let _5_15 = new Range(5, 15) - let _20_35 = new Range(20, 35) - let _45_55 = new Range(45, 55) - let _49_50 = new Range(49, 50) - let _51_100 = new Range(51, 100) + const _1_6 = new Range(1, 6) + const _10_11 = new Range(10, 11) + const _5_15 = new Range(5, 15) + const _20_35 = new Range(20, 35) + const _45_55 = new Range(45, 55) + const _49_50 = new Range(49, 50) + const _51_100 = new Range(51, 100) expect(_1_6.isStrictlyContainedInRange(_10_50)).to.equal(false) expect(_10_11.isStrictlyContainedInRange(_10_50)).to.equal(false) diff --git a/packages/chevrotain/test/utils/utils_spec.ts b/packages/chevrotain/test/utils/utils_spec.ts index 735234d80..e69c75ce1 100644 --- a/packages/chevrotain/test/utils/utils_spec.ts +++ b/packages/chevrotain/test/utils/utils_spec.ts @@ -150,7 +150,7 @@ describe("The Utils functions namespace", () => { it("exports a cloneArr utility", () => { expect(cloneArr([1, 2, 3])).to.deep.equal([1, 2, 3]) expect(cloneArr([])).to.deep.equal([]) - let arr = [] + const arr = [] expect(cloneArr(arr)).to.not.equal(arr) }) @@ -159,7 +159,7 @@ describe("The Utils functions namespace", () => { bamba: 666, bisli: 777 }) - let obj = { bamba: 666, bisli: 777 } + const obj = { bamba: 666, bisli: 777 } expect(cloneObj(obj)).to.not.equal(obj) expect(cloneObj(["bamba"])).to.not.have.property("length") expect(cloneObj(["bamba"])).to.deep.equal({ "0": "bamba" }) @@ -168,8 +168,8 @@ describe("The Utils functions namespace", () => { it("exports a find utility", () => { expect(find([1, 2, 3], (item) => item === 2)).to.equal(2) expect(find([], (item) => item === 2)).to.be.undefined - let a = {} - let b = {} + const a = {} + const b = {} expect(find([a, b], (item) => item === b)).to.equal(b) }) @@ -220,7 +220,7 @@ describe("The Utils functions namespace", () => { }) it("exports a partial utility", () => { - let add = function (x, y) { + const add = function (x, y) { return x + y } expect(partial(add)(2, 3)).to.equal(5) diff --git a/packages/chevrotain/test_integration/sanity/json_parser_spec.js b/packages/chevrotain/test_integration/sanity/json_parser_spec.js index 345a305ea..2bc793253 100644 --- a/packages/chevrotain/test_integration/sanity/json_parser_spec.js +++ b/packages/chevrotain/test_integration/sanity/json_parser_spec.js @@ -1,4 +1,4 @@ -;(function (root, factory) { +(function (root, factory) { if (typeof define === "function" && define.amd) { // AMD. Register as an anonymous module. define(["chevrotain"], factory) diff --git a/tslint.json b/tslint.json deleted file mode 100644 index 14aed4293..000000000 --- a/tslint.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "rules": { - "class-name": true, - "curly": true, - "forin": true, - "label-position": true, - "no-arg": true, - "no-bitwise": true, - "no-console": [true, "debug", "info", "time", "timeEnd", "trace"], - "no-construct": true, - "no-debugger": true, - "no-var-keyword": true, - "no-duplicate-variable": true, - "no-eval": true, - "no-string-literal": false, - "no-switch-case-fall-through": true, - "no-unused-expression": false, - "no-use-before-declare": false, - "radix": true, - "triple-equals": [true, "allow-null-check"], - "variable-name": false - } -} diff --git a/yarn.lock b/yarn.lock index 10a9a22d3..e3dc7dfa8 100644 --- a/yarn.lock +++ b/yarn.lock @@ -970,6 +970,22 @@ resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.2.tgz#8f03a22a04de437254e8ce8cc84ba39689288752" integrity sha512-HyYEUDeIj5rRQU2Hk5HTB2uHsbRQpF70nvMhVzi+VJR0X+xNEhjPui4/kBf3VeH/wqD28PT4sVOm8qqLjBrSZg== +"@eslint/eslintrc@^0.3.0": + version "0.3.0" + resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-0.3.0.tgz#d736d6963d7003b6514e6324bec9c602ac340318" + integrity sha512-1JTKgrOKAHVivSvOYw+sJOunkBjUOvjqWk1DPja7ZFhIS2mX/4EgTT8M7eTK9jrKhL/FvXXEbQwIs3pg1xp3dg== + dependencies: + ajv "^6.12.4" + debug "^4.1.1" + espree "^7.3.0" + globals "^12.1.0" + ignore "^4.0.6" + import-fresh "^3.2.1" + js-yaml "^3.13.1" + lodash "^4.17.20" + minimatch "^3.0.4" + strip-json-comments "^3.1.1" + "@evocateur/libnpmaccess@^3.1.2": version "3.1.2" resolved "https://registry.yarnpkg.com/@evocateur/libnpmaccess/-/libnpmaccess-3.1.2.tgz#ecf7f6ce6b004e9f942b098d92200be4a4b1c845" @@ -1752,11 +1768,32 @@ call-me-maybe "^1.0.1" glob-to-regexp "^0.3.0" +"@nodelib/fs.scandir@2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz#d4b3549a5db5de2683e0c1071ab4f140904bbf69" + integrity sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA== + dependencies: + "@nodelib/fs.stat" "2.0.4" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.4", "@nodelib/fs.stat@^2.0.2": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz#a3f2dd61bab43b8db8fa108a121cfffe4c676655" + integrity sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q== + "@nodelib/fs.stat@^1.1.2": version "1.1.3" resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b" integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw== +"@nodelib/fs.walk@^1.2.3": + version "1.2.6" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.6.tgz#cce9396b30aa5afe9e3756608f5831adcb53d063" + integrity sha512-8Broas6vTtW4GIXTAHDoE32hnN2M5ykgCpWGbuXHQ15vEMqr23pB76e/GZcYsZCHALv50ktd24qhEyKr6wBtow== + dependencies: + "@nodelib/fs.scandir" "2.1.4" + fastq "^1.6.0" + "@octokit/endpoint@^5.5.0": version "5.5.1" resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-5.5.1.tgz#2eea81e110ca754ff2de11c79154ccab4ae16b3f" @@ -1909,6 +1946,11 @@ resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.6.tgz#f4c7ec43e81b319a9815115031709f26987891f0" integrity sha512-3c+yGKvVP5Y9TYBEibGNR+kLtijnj7mYrXRg+WpFb2X9xm04g/DXYkfg4hmzJQosc9snFNUPkbYIhu+KAm6jJw== +"@types/json-schema@^7.0.3": + version "7.0.7" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.7.tgz#98a993516c859eb0d5c4c8f098317a9ea68db9ad" + integrity sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA== + "@types/minimatch@*": version "3.0.3" resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" @@ -1947,6 +1989,111 @@ resolved "https://registry.yarnpkg.com/@types/sinon/-/sinon-7.5.1.tgz#d27b81af0d1cfe1f9b24eebe7a24f74ae40f5b7c" integrity sha512-EZQUP3hSZQyTQRfiLqelC9NMWd1kqLcmQE0dMiklxBkgi84T+cHOhnKpgk4NnOWpGX863yE6+IaGnOXUNFqDnQ== +"@typescript-eslint/eslint-plugin@4.15.0": + version "4.15.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-4.15.0.tgz#13a5a07cf30d0d5781e43480aa2a8d38d308b084" + integrity sha512-DJgdGZW+8CFUTz5C/dnn4ONcUm2h2T0itWD85Ob5/V27Ndie8hUoX5HKyGssvR8sUMkAIlUc/AMK67Lqa3kBIQ== + dependencies: + "@typescript-eslint/experimental-utils" "4.15.0" + "@typescript-eslint/scope-manager" "4.15.0" + debug "^4.1.1" + functional-red-black-tree "^1.0.1" + lodash "^4.17.15" + regexpp "^3.0.0" + semver "^7.3.2" + tsutils "^3.17.1" + +"@typescript-eslint/experimental-utils@4.15.0": + version "4.15.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/experimental-utils/-/experimental-utils-4.15.0.tgz#b87c36410a9b23f637689427be85007a2ec1a9c6" + integrity sha512-V4vaDWvxA2zgesg4KPgEGiomWEBpJXvY4ZX34Y3qxK8LUm5I87L+qGIOTd9tHZOARXNRt9pLbblSKiYBlGMawg== + dependencies: + "@types/json-schema" "^7.0.3" + "@typescript-eslint/scope-manager" "4.15.0" + "@typescript-eslint/types" "4.15.0" + "@typescript-eslint/typescript-estree" "4.15.0" + eslint-scope "^5.0.0" + eslint-utils "^2.0.0" + +"@typescript-eslint/parser@4.14.0": + version "4.14.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-4.14.0.tgz#62d4cd2079d5c06683e9bfb200c758f292c4dee7" + integrity sha512-sUDeuCjBU+ZF3Lzw0hphTyScmDDJ5QVkyE21pRoBo8iDl7WBtVFS+WDN3blY1CH3SBt7EmYCw6wfmJjF0l/uYg== + dependencies: + "@typescript-eslint/scope-manager" "4.14.0" + "@typescript-eslint/types" "4.14.0" + "@typescript-eslint/typescript-estree" "4.14.0" + debug "^4.1.1" + +"@typescript-eslint/scope-manager@4.14.0": + version "4.14.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-4.14.0.tgz#55a4743095d684e1f7b7180c4bac2a0a3727f517" + integrity sha512-/J+LlRMdbPh4RdL4hfP1eCwHN5bAhFAGOTsvE6SxsrM/47XQiPSgF5MDgLyp/i9kbZV9Lx80DW0OpPkzL+uf8Q== + dependencies: + "@typescript-eslint/types" "4.14.0" + "@typescript-eslint/visitor-keys" "4.14.0" + +"@typescript-eslint/scope-manager@4.15.0": + version "4.15.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-4.15.0.tgz#c42703558ea6daaaba51a9c3a86f2902dbab9432" + integrity sha512-CSNBZnCC2jEA/a+pR9Ljh8Y+5TY5qgbPz7ICEk9WCpSEgT6Pi7H2RIjxfrrbUXvotd6ta+i27sssKEH8Azm75g== + dependencies: + "@typescript-eslint/types" "4.15.0" + "@typescript-eslint/visitor-keys" "4.15.0" + +"@typescript-eslint/types@4.14.0": + version "4.14.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-4.14.0.tgz#d8a8202d9b58831d6fd9cee2ba12f8a5a5dd44b6" + integrity sha512-VsQE4VvpldHrTFuVPY1ZnHn/Txw6cZGjL48e+iBxTi2ksa9DmebKjAeFmTVAYoSkTk7gjA7UqJ7pIsyifTsI4A== + +"@typescript-eslint/types@4.15.0": + version "4.15.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-4.15.0.tgz#3011ae1ac3299bb9a5ac56bdd297cccf679d3662" + integrity sha512-su4RHkJhS+iFwyqyXHcS8EGPlUVoC+XREfy5daivjLur9JP8GhvTmDipuRpcujtGC4M+GYhUOJCPDE3rC5NJrg== + +"@typescript-eslint/typescript-estree@4.14.0": + version "4.14.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-4.14.0.tgz#4bcd67486e9acafc3d0c982b23a9ab8ac8911ed7" + integrity sha512-wRjZ5qLao+bvS2F7pX4qi2oLcOONIB+ru8RGBieDptq/SudYwshveORwCVU4/yMAd4GK7Fsf8Uq1tjV838erag== + dependencies: + "@typescript-eslint/types" "4.14.0" + "@typescript-eslint/visitor-keys" "4.14.0" + debug "^4.1.1" + globby "^11.0.1" + is-glob "^4.0.1" + lodash "^4.17.15" + semver "^7.3.2" + tsutils "^3.17.1" + +"@typescript-eslint/typescript-estree@4.15.0": + version "4.15.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-4.15.0.tgz#402c86a7d2111c1f7a2513022f22a38a395b7f93" + integrity sha512-jG6xTmcNbi6xzZq0SdWh7wQ9cMb2pqXaUp6bUZOMsIlu5aOlxGxgE/t6L/gPybybQGvdguajXGkZKSndZJpksA== + dependencies: + "@typescript-eslint/types" "4.15.0" + "@typescript-eslint/visitor-keys" "4.15.0" + debug "^4.1.1" + globby "^11.0.1" + is-glob "^4.0.1" + semver "^7.3.2" + tsutils "^3.17.1" + +"@typescript-eslint/visitor-keys@4.14.0": + version "4.14.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-4.14.0.tgz#b1090d9d2955b044b2ea2904a22496849acbdf54" + integrity sha512-MeHHzUyRI50DuiPgV9+LxcM52FCJFYjJiWHtXlbyC27b80mfOwKeiKI+MHOTEpcpfmoPFm/vvQS88bYIx6PZTA== + dependencies: + "@typescript-eslint/types" "4.14.0" + eslint-visitor-keys "^2.0.0" + +"@typescript-eslint/visitor-keys@4.15.0": + version "4.15.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-4.15.0.tgz#2a07768df30c8a5673f1bce406338a07fdec38ca" + integrity sha512-RnDtJwOwFucWFAMjG3ghCG/ikImFJFEg20DI7mn4pHEx3vC48lIAoyjhffvfHmErRDboUPC7p9Z2il4CLb7qxA== + dependencies: + "@typescript-eslint/types" "4.15.0" + eslint-visitor-keys "^2.0.0" + "@ungap/promise-all-settled@1.1.2": version "1.1.2" resolved "https://registry.yarnpkg.com/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz#aa58042711d6e3275dd37dc597e5d31e8c290a44" @@ -2516,6 +2663,11 @@ acorn-globals@^6.0.0: acorn "^7.1.1" acorn-walk "^7.1.1" +acorn-jsx@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.1.tgz#fc8661e11b7ac1539c47dbfea2e72b3af34d267b" + integrity sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng== + acorn-walk@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-7.1.1.tgz#345f0dffad5c735e7373d2fec9a1023e6a44b83e" @@ -2531,6 +2683,11 @@ acorn@^7.1.1: resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.2.0.tgz#17ea7e40d7c8640ff54a694c889c26f31704effe" integrity sha512-apwXVmYVpQ34m/i71vrApRrRKCWQnZZF1+npOD0WV5xZFfwWOmKGQ2RWlfdy9vWITsenisM8M0Qeq8agcFHNiQ== +acorn@^7.4.0: + version "7.4.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" + integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== + acorn@^8.0.1, acorn@^8.0.4: version "8.0.4" resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.0.4.tgz#7a3ae4191466a6984eee0fe3407a4f3aa9db8354" @@ -2580,7 +2737,7 @@ ajv-keywords@^3.1.0, ajv-keywords@^3.4.1, ajv-keywords@^3.5.2: resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz#31f29da5ab6e00d1c2d329acf7b5929614d5014d" integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ== -ajv@^6.1.0, ajv@^6.10.2, ajv@^6.12.0, ajv@^6.12.5, ajv@^6.5.5: +ajv@^6.1.0, ajv@^6.10.0, ajv@^6.10.2, ajv@^6.12.0, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.5.5: version "6.12.6" resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== @@ -2590,6 +2747,16 @@ ajv@^6.1.0, ajv@^6.10.2, ajv@^6.12.0, ajv@^6.12.5, ajv@^6.5.5: json-schema-traverse "^0.4.1" uri-js "^4.2.2" +ajv@^7.0.2: + version "7.1.1" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-7.1.1.tgz#1e6b37a454021fa9941713f38b952fc1c8d32a84" + integrity sha512-ga/aqDYnUy/o7vbsRTFhhTsNeXiYb5JWDIcRIeZfwRNCefwjNTVYCGdGSUrEmiu3yDK3vFvNbgJxvrQW4JXrYQ== + dependencies: + fast-deep-equal "^3.1.1" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.2.2" + algoliasearch@^3.24.5: version "3.35.1" resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-3.35.1.tgz#297d15f534a3507cab2f5dfb996019cac7568f0c" @@ -2800,6 +2967,11 @@ array-union@^1.0.1, array-union@^1.0.2: dependencies: array-uniq "^1.0.1" +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + array-uniq@^1.0.1: version "1.0.3" resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" @@ -3223,11 +3395,6 @@ buffer@^4.3.0: ieee754 "^1.1.4" isarray "^1.0.0" -builtin-modules@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" - integrity sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8= - builtin-status-codes@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" @@ -3461,7 +3628,7 @@ chalk@^1.1.3: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@^2.0.0, chalk@^2.3.0, chalk@^2.3.1, chalk@^2.3.2, chalk@^2.4.1, chalk@^2.4.2: +chalk@^2.0.0, chalk@^2.3.1, chalk@^2.3.2, chalk@^2.4.1, chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -3777,7 +3944,7 @@ commander@2.17.x: resolved "https://registry.yarnpkg.com/commander/-/commander-2.17.1.tgz#bd77ab7de6de94205ceacc72f1716d29f20a77bf" integrity sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg== -commander@^2.12.1, commander@^2.20.0, commander@~2.20.3: +commander@^2.20.0, commander@~2.20.3: version "2.20.3" resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== @@ -4215,7 +4382,7 @@ cross-spawn@^7.0.0: shebang-command "^2.0.0" which "^2.0.1" -cross-spawn@^7.0.3: +cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -4528,7 +4695,7 @@ debug@3.1.0, debug@~3.1.0: dependencies: ms "2.0.0" -debug@4.3.1, debug@^4.1.0, debug@^4.1.1, debug@^4.2.0: +debug@4.3.1, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.2.0: version "4.3.1" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== @@ -4611,7 +4778,7 @@ deep-extend@^0.6.0: resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== -deep-is@~0.1.3: +deep-is@^0.1.3, deep-is@~0.1.3: version "0.1.3" resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= @@ -4761,7 +4928,7 @@ diff@5.0.0: resolved "https://registry.yarnpkg.com/diff/-/diff-5.0.0.tgz#7ed6ad76d859d030787ec35855f5b1daf31d852b" integrity sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w== -diff@^4.0.1, diff@^4.0.2: +diff@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== @@ -4782,6 +4949,13 @@ dir-glob@^2.0.0, dir-glob@^2.2.2: dependencies: path-type "^3.0.0" +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + dns-equal@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" @@ -4815,6 +4989,13 @@ docsearch.js@^2.5.2: to-factory "^1.0.0" zepto "^1.2.0" +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== + dependencies: + esutils "^2.0.2" + dom-converter@^0.2: version "0.2.0" resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768" @@ -5015,7 +5196,7 @@ enhanced-resolve@^5.7.0: graceful-fs "^4.2.4" tapable "^2.2.0" -enquirer@^2.3.6: +enquirer@^2.3.5, enquirer@^2.3.6: version "2.3.6" resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d" integrity sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg== @@ -5178,6 +5359,19 @@ escodegen@^1.14.1: optionalDependencies: source-map "~0.6.1" +eslint-config-prettier@7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-7.2.0.tgz#f4a4bd2832e810e8cc7c1411ec85b3e85c0c53f9" + integrity sha512-rV4Qu0C3nfJKPOAhFujFxB7RMP+URFyQqqOZW9DMRD7ZDTFyjaIlETU3xzHELt++4ugC0+Jm084HQYkkJe+Ivg== + +eslint-plugin-eslint-comments@3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-eslint-comments/-/eslint-plugin-eslint-comments-3.2.0.tgz#9e1cd7b4413526abb313933071d7aba05ca12ffa" + integrity sha512-0jkOl0hfojIHHmEHgmNdqv4fmh7300NdpA9FFpF7zaoLvB/QeXOGNLIo86oAveJFrfB1p05kC8hpEMHM8DwWVQ== + dependencies: + escape-string-regexp "^1.0.5" + ignore "^5.0.5" + eslint-scope@^4.0.3: version "4.0.3" resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848" @@ -5186,7 +5380,7 @@ eslint-scope@^4.0.3: esrecurse "^4.1.0" estraverse "^4.1.1" -eslint-scope@^5.1.1: +eslint-scope@^5.0.0, eslint-scope@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== @@ -5194,16 +5388,92 @@ eslint-scope@^5.1.1: esrecurse "^4.3.0" estraverse "^4.1.1" +eslint-utils@^2.0.0, eslint-utils@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27" + integrity sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg== + dependencies: + eslint-visitor-keys "^1.1.0" + +eslint-visitor-keys@^1.1.0, eslint-visitor-keys@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz#30ebd1ef7c2fdff01c3a4f151044af25fab0523e" + integrity sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ== + +eslint-visitor-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-2.0.0.tgz#21fdc8fbcd9c795cc0321f0563702095751511a8" + integrity sha512-QudtT6av5WXels9WjIM7qz1XD1cWGvX4gGXvp/zBn9nXG02D0utdU3Em2m/QjTnrsk6bBjmCygl3rmj118msQQ== + +eslint@7.18.0: + version "7.18.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-7.18.0.tgz#7fdcd2f3715a41fe6295a16234bd69aed2c75e67" + integrity sha512-fbgTiE8BfUJZuBeq2Yi7J3RB3WGUQ9PNuNbmgi6jt9Iv8qrkxfy19Ds3OpL1Pm7zg3BtTVhvcUZbIRQ0wmSjAQ== + dependencies: + "@babel/code-frame" "^7.0.0" + "@eslint/eslintrc" "^0.3.0" + ajv "^6.10.0" + chalk "^4.0.0" + cross-spawn "^7.0.2" + debug "^4.0.1" + doctrine "^3.0.0" + enquirer "^2.3.5" + eslint-scope "^5.1.1" + eslint-utils "^2.1.0" + eslint-visitor-keys "^2.0.0" + espree "^7.3.1" + esquery "^1.2.0" + esutils "^2.0.2" + file-entry-cache "^6.0.0" + functional-red-black-tree "^1.0.1" + glob-parent "^5.0.0" + globals "^12.1.0" + ignore "^4.0.6" + import-fresh "^3.0.0" + imurmurhash "^0.1.4" + is-glob "^4.0.0" + js-yaml "^3.13.1" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.4.1" + lodash "^4.17.20" + minimatch "^3.0.4" + natural-compare "^1.4.0" + optionator "^0.9.1" + progress "^2.0.0" + regexpp "^3.1.0" + semver "^7.2.1" + strip-ansi "^6.0.0" + strip-json-comments "^3.1.0" + table "^6.0.4" + text-table "^0.2.0" + v8-compile-cache "^2.0.3" + esm@^3.2.25: version "3.2.25" resolved "https://registry.yarnpkg.com/esm/-/esm-3.2.25.tgz#342c18c29d56157688ba5ce31f8431fbb795cc10" integrity sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA== +espree@^7.3.0, espree@^7.3.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/espree/-/espree-7.3.1.tgz#f2df330b752c6f55019f8bd89b7660039c1bbbb6" + integrity sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g== + dependencies: + acorn "^7.4.0" + acorn-jsx "^5.3.1" + eslint-visitor-keys "^1.3.0" + esprima@^4.0.0, esprima@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== +esquery@^1.2.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.4.0.tgz#2148ffc38b82e8c7057dfed48425b3e61f0f24a5" + integrity sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w== + dependencies: + estraverse "^5.1.0" + esrecurse@^4.1.0: version "4.2.1" resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" @@ -5223,7 +5493,7 @@ estraverse@^4.1.0, estraverse@^4.1.1, estraverse@^4.2.0: resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== -estraverse@^5.2.0: +estraverse@^5.1.0, estraverse@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.2.0.tgz#307df42547e6cc7324d3cf03c155d5cdb8c53880" integrity sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ== @@ -5442,12 +5712,24 @@ fast-glob@^2.2.6: merge2 "^1.2.3" micromatch "^3.1.10" +fast-glob@^3.1.1: + version "3.2.5" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.5.tgz#7939af2a656de79a4f1901903ee8adcaa7cb9661" + integrity sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.0" + merge2 "^1.3.0" + micromatch "^4.0.2" + picomatch "^2.2.1" + fast-json-stable-stringify@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== -fast-levenshtein@~2.0.6: +fast-levenshtein@^2.0.6, fast-levenshtein@~2.0.6: version "2.0.6" resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= @@ -5457,6 +5739,13 @@ fastest-levenshtein@^1.0.12: resolved "https://registry.yarnpkg.com/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz#9990f7d3a88cc5a9ffd1f1745745251700d497e2" integrity sha512-On2N+BpYJ15xIC974QNVuYGMOlEVt4s0EOI3wwMqOmK1fdDY+FN/zltPV8vosq4ad4c/gJ1KHScUn/6AWIgiow== +fastq@^1.6.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.11.0.tgz#bb9fb955a07130a918eb63c1f5161cc32a5d0858" + integrity sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g== + dependencies: + reusify "^1.0.4" + faye-websocket@^0.10.0: version "0.10.0" resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" @@ -5490,6 +5779,13 @@ figures@^3.0.0, figures@^3.2.0: dependencies: escape-string-regexp "^1.0.5" +file-entry-cache@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" + integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg== + dependencies: + flat-cache "^3.0.4" + file-loader@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-3.0.1.tgz#f8e0ba0b599918b51adfe45d66d1e771ad560faa" @@ -5619,11 +5915,24 @@ findup-sync@^3.0.0: micromatch "^3.0.4" resolve-dir "^1.0.1" +flat-cache@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.0.4.tgz#61b0338302b2fe9f957dcc32fc2a87f1c3048b11" + integrity sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg== + dependencies: + flatted "^3.1.0" + rimraf "^3.0.2" + flat@^5.0.2: version "5.0.2" resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== +flatted@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.1.1.tgz#c4b489e80096d9df1dfc97c79871aea7c617c469" + integrity sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA== + flush-write-stream@^1.0.0: version "1.1.1" resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.1.1.tgz#8dd7d873a1babc207d94ead0c2e0e44276ebf2e8" @@ -5767,6 +6076,11 @@ function-bind@^1.1.1: resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= + gauge@~2.7.3: version "2.7.4" resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" @@ -5953,6 +6267,13 @@ glob-parent@^5.0.0, glob-parent@~5.1.0: dependencies: is-glob "^4.0.1" +glob-parent@^5.1.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.1.tgz#b6c1ef417c4e5663ea498f1c45afac6916bbc229" + integrity sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ== + dependencies: + is-glob "^4.0.1" + glob-to-regexp@^0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab" @@ -6034,6 +6355,25 @@ globals@^11.1.0: resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== +globals@^12.1.0: + version "12.4.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-12.4.0.tgz#a18813576a41b00a24a97e7f815918c2e19925f8" + integrity sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg== + dependencies: + type-fest "^0.8.1" + +globby@^11.0.1: + version "11.0.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.2.tgz#1af538b766a3b540ebfb58a32b2e2d5897321d83" + integrity sha512-2ZThXDvvV8fYFRVIxnrMQBipZQDr7MxKAmQK1vujaj9/7eF0efG7BPUKJ7jP7G5SLF37xKDXvO4S/KKLj/Z0og== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.1.1" + ignore "^5.1.4" + merge2 "^1.3.0" + slash "^3.0.0" + globby@^6.1.0: version "6.1.0" resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" @@ -6542,11 +6882,16 @@ ignore@^3.3.5: resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" integrity sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug== -ignore@^4.0.3: +ignore@^4.0.3, ignore@^4.0.6: version "4.0.6" resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== +ignore@^5.0.5, ignore@^5.1.4: + version "5.1.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" + integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== + immediate@^3.2.3: version "3.2.3" resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.2.3.tgz#d140fa8f614659bd6541233097ddaac25cdd991c" @@ -7326,11 +7671,21 @@ json-schema-traverse@^0.4.1: resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== +json-schema-traverse@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" + integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== + json-schema@0.2.3: version "0.2.3" resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" integrity sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM= +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE= + json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" @@ -7490,6 +7845,14 @@ levenary@^1.1.1: dependencies: leven "^3.1.0" +levn@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" + integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== + dependencies: + prelude-ls "^1.2.1" + type-check "~0.4.0" + levn@~0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" @@ -7721,7 +8084,7 @@ lodash@4.17.15: resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== -lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.21, lodash@^4.17.3, lodash@^4.17.4, lodash@^4.17.5, lodash@^4.2.1, lodash@~4.17.10: +lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.17.3, lodash@^4.17.4, lodash@^4.17.5, lodash@^4.2.1, lodash@~4.17.10: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -8053,6 +8416,11 @@ merge2@^1.2.3: resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.3.0.tgz#5b366ee83b2f1582c48f87e47cf1a9352103ca81" integrity sha512-2j4DAdlBOkiSZIsaXk4mTE3sRS02yBHAtfy127xRV3bQUFqXkjHCHLW6Scv7DwNRbIWNHH8zpnz9zMaKXIdvYw== +merge2@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + merge@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.1.tgz#38bebf80c3220a8a487b6fcfb3941bb11720c145" @@ -8392,6 +8760,11 @@ nanomatch@^1.2.9: snapdragon "^0.8.1" to-regex "^3.0.1" +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= + negotiator@0.6.2: version "0.6.2" resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" @@ -8883,6 +9256,18 @@ optionator@^0.8.1: type-check "~0.3.2" word-wrap "~1.2.3" +optionator@^0.9.1: + version "0.9.1" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.1.tgz#4f236a6373dae0566a6d43e1326674f50c291499" + integrity sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw== + dependencies: + deep-is "^0.1.3" + fast-levenshtein "^2.0.6" + levn "^0.4.1" + prelude-ls "^1.2.1" + type-check "^0.4.0" + word-wrap "^1.2.3" + original@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/original/-/original-1.0.2.tgz#e442a61cffe1c5fd20a65f3261c26663b303f25f" @@ -9748,6 +10133,11 @@ postcss@^7.0.14: source-map "^0.6.1" supports-color "^6.1.0" +prelude-ls@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" + integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== + prelude-ls@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" @@ -9810,7 +10200,7 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= -progress@^2.0.3: +progress@^2.0.0, progress@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA== @@ -9973,6 +10363,11 @@ querystringify@^2.1.1: resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.1.1.tgz#60e5a5fd64a7f8bfa4d2ab2ed6fdf4c85bad154e" integrity sha512-w7fLxIRCRT7U8Qu53jQnJyPkYZIaR4n5151KMfcJlO/A9397Wxb1amJvROTK6TOnp7PfoAmg/qXiNHI+08jRfA== +queue-microtask@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.2.tgz#abf64491e6ecf0f38a6502403d4cda04f372dfd3" + integrity sha512-dB15eXv3p2jDlbOiNLyMabYg1/sXvppd8DP2J3EOCQ0AkuSXCW2tP7mnVouVLJKgUMY6yP0kcQDVpLCN13h4Xg== + quick-lru@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-1.1.0.tgz#4360b17c61136ad38078397ff11416e186dcfbb8" @@ -10227,6 +10622,11 @@ regexp.prototype.flags@^1.2.0: define-properties "^1.1.3" es-abstract "^1.17.0-next.1" +regexpp@^3.0.0, regexpp@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.1.0.tgz#206d0ad0a5648cffbdb8ae46438f3dc51c9f78e2" + integrity sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q== + regexpu-core@^4.7.0: version "4.7.0" resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.7.0.tgz#fcbf458c50431b0bb7b45d6967b8192d91f3d938" @@ -10479,6 +10879,11 @@ retry@^0.12.0: resolved "https://registry.yarnpkg.com/retry/-/retry-0.12.0.tgz#1b42a6266a21f07421d1b0b54b7dc167b01c013b" integrity sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs= +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + rgb-regex@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1" @@ -10501,7 +10906,7 @@ rimraf@^2.5.4, rimraf@^2.6.2, rimraf@^2.6.3: dependencies: glob "^7.1.3" -rimraf@^3.0.0: +rimraf@^3.0.0, rimraf@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== @@ -10523,6 +10928,13 @@ run-async@^2.2.0: dependencies: is-promise "^2.1.0" +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + run-queue@^1.0.0, run-queue@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47" @@ -10646,7 +11058,7 @@ semver-regex@^3.1.2: resolved "https://registry.yarnpkg.com/semver-regex/-/semver-regex-3.1.2.tgz#34b4c0d361eef262e07199dbef316d0f2ab11807" integrity sha512-bXWyL6EAKOJa81XG1OZ/Yyuq+oT0b2YLlxx7c+mrdYPaPbnj6WgVULXhinMIeZGufuUBu/eVRqXEhiv4imfwxA== -"semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0, semver@^5.7.0, semver@^5.7.1: +"semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", semver@^5.1.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0, semver@^5.7.0, semver@^5.7.1: version "5.7.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== @@ -10656,7 +11068,7 @@ semver@7.0.0: resolved "https://registry.yarnpkg.com/semver/-/semver-7.0.0.tgz#5f3ca35761e47e05b206c6daff2cf814f0316b8e" integrity sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A== -semver@7.3.4: +semver@7.3.4, semver@^7.2.1, semver@^7.3.2: version "7.3.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.4.tgz#27aaa7d2e4ca76452f98d3add093a72c943edc97" integrity sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw== @@ -11384,7 +11796,7 @@ strip-json-comments@3.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.0.1.tgz#85713975a91fb87bf1b305cca77395e40d2a64a7" integrity sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw== -strip-json-comments@3.1.1: +strip-json-comments@3.1.1, strip-json-comments@^3.1.0, strip-json-comments@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== @@ -11497,6 +11909,16 @@ symbol-tree@^3.2.4: resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2" integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw== +table@^6.0.4: + version "6.0.7" + resolved "https://registry.yarnpkg.com/table/-/table-6.0.7.tgz#e45897ffbcc1bcf9e8a87bf420f2c9e5a7a52a34" + integrity sha512-rxZevLGTUzWna/qBLObOe16kB2RTnnbhciwgPbMMlazz1yZGVEgnZK762xyVdVznhqxrfCeBMmMkgOOaPwjH7g== + dependencies: + ajv "^7.0.2" + lodash "^4.17.20" + slice-ansi "^4.0.0" + string-width "^4.2.0" + tapable@^1.0.0, tapable@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" @@ -11782,34 +12204,15 @@ trim-off-newlines@^1.0.0: resolved "https://registry.yarnpkg.com/trim-off-newlines/-/trim-off-newlines-1.0.1.tgz#9f9ba9d9efa8764c387698bcbfeb2c848f11adb3" integrity sha1-n5up2e+odkw4dpi8v+sshI8RrbM= -tslib@^1.13.0, tslib@^1.8.1, tslib@^1.9.0: +tslib@^1.8.1, tslib@^1.9.0: version "1.13.0" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.13.0.tgz#c881e13cc7015894ed914862d276436fa9a47043" integrity sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q== -tslint@^6.0.0: - version "6.1.3" - resolved "https://registry.yarnpkg.com/tslint/-/tslint-6.1.3.tgz#5c23b2eccc32487d5523bd3a470e9aa31789d904" - integrity sha512-IbR4nkT96EQOvKE2PW/djGz8iGNeJ4rF2mBfiYaR/nvUWYKJhLwimoJKgjIFEIDibBtOevj7BqCRL4oHeWWUCg== - dependencies: - "@babel/code-frame" "^7.0.0" - builtin-modules "^1.1.1" - chalk "^2.3.0" - commander "^2.12.1" - diff "^4.0.1" - glob "^7.1.1" - js-yaml "^3.13.1" - minimatch "^3.0.4" - mkdirp "^0.5.3" - resolve "^1.3.2" - semver "^5.3.0" - tslib "^1.13.0" - tsutils "^2.29.0" - -tsutils@^2.29.0: - version "2.29.0" - resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-2.29.0.tgz#32b488501467acbedd4b85498673a0812aca0b99" - integrity sha512-g5JVHCIJwzfISaXpXE1qvNalca5Jwob6FjI4AoPlqMusJ6ftFE7IkkFoMhVLRgK+4Kx3gkzb8UZK5t5yTTvEmA== +tsutils@^3.17.1: + version "3.20.0" + resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.20.0.tgz#ea03ea45462e146b53d70ce0893de453ff24f698" + integrity sha512-RYbuQuvkhuqVeXweWT3tJLKOEJ/UUw9GjNEZGWdrLLlM+611o1gwLHBpxoFJKKl25fLprp2eVthtKs5JOrNeXg== dependencies: tslib "^1.8.1" @@ -11830,6 +12233,13 @@ tweetnacl@^0.14.3, tweetnacl@~0.14.0: resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= +type-check@^0.4.0, type-check@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" + integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== + dependencies: + prelude-ls "^1.2.1" + type-check@~0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" @@ -12175,7 +12585,7 @@ uuid@^3.3.3: resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== -v8-compile-cache@^2.2.0: +v8-compile-cache@^2.0.3, v8-compile-cache@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.2.0.tgz#9471efa3ef9128d2f7c6a7ca39c4dd6b5055b132" integrity sha512-gTpR5XQNKFwOd4clxfnhaqvfqMpqEwr4tOtCyz4MtYZX2JYhfr1JvBFKdS+7K/9rfpZR3VLX+YWBbKoxCgS43Q== @@ -12680,7 +13090,7 @@ windows-release@^3.1.0: dependencies: execa "^1.0.0" -word-wrap@^1.0.3, word-wrap@~1.2.3: +word-wrap@^1.0.3, word-wrap@^1.2.3, word-wrap@~1.2.3: version "1.2.3" resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==