diff --git a/Aptfile b/Aptfile
new file mode 100644
index 0000000..6db3ac7
--- /dev/null
+++ b/Aptfile
@@ -0,0 +1 @@
+swi-prolog
diff --git a/Procfile b/Procfile
new file mode 100644
index 0000000..e1d4131
--- /dev/null
+++ b/Procfile
@@ -0,0 +1 @@
+web: node app.js
diff --git a/app.js b/app.js
index 7424fb4..f85cae9 100644
--- a/app.js
+++ b/app.js
@@ -6,8 +6,11 @@ const path = require('path')
const { config } = require('./config.js')
const tmp = require('./lib/tmp.js')
const eye = require('./lib/eye/eye.js')
-const cwm = require('./lib/cwm/cwm.js')
+// const cwm = require('./lib/cwm/cwm.js')
const jen3 = require('./lib/jen3/jen3.js')
+const jena = require('./lib/jena/jena.js')
+const triplify = require('./lib/triplify/triplify.js')
+const spin3 = require('./lib/spin3/spin3.js')
const { generateLink, resolveLink } = require('./lib/gen_link.js')
const { checkBuiltinInput } = require('./lib/check_builtin_input.js')
@@ -17,7 +20,15 @@ app.use(bodyParser.json())
app.use(bodyParser.urlencoded({ extended: true }))
app.use('/n3/editor/s/*', (req, res) => {
- res.sendFile(path.join(__dirname, "editor/index.html"));
+ res.sendFile(path.join(__dirname, "editor/index.html"));
+});
+app.use('/n3/out', express.static("out"));
+app.use('/n3/editor/out', express.static("out"));
+app.use('/n3/spin3*', (req, res) => {
+ res.sendFile(path.join(__dirname, "editor/spin3.html"));
+});
+app.use('/n3/sparql*', (req, res) => {
+ res.sendFile(path.join(__dirname, "editor/sparql.html"));
});
app.use('/n3/editor', express.static(path.join(__dirname, "editor")));
app.use('/n3/lib/eyebrow', express.static(path.join(__dirname, "lib/eyebrow")));
@@ -46,8 +57,9 @@ app.post('/n3', (request, response) => {
const data = request.body
// console.log("data:", data);
console.log(
- "task:", data.task,
- (data.system? ", system: " + data.system : "")
+ "task:", data.task,
+ (data.subTask ? ", subTask: " + data.subTask : ""),
+ (data.system ? ", system: " + data.system : "")
);
function ctu(ret) {
@@ -73,18 +85,30 @@ app.post('/n3', (request, response) => {
doImperating(data, ctu)
break
- case 'generate_link':
- doGenerateLink(data, ctu)
+ case 'triplify':
+ doTriplify(data, ctu)
break
- case 'resolve_link':
- doResolveLink(data, ctu)
+ case 'query':
+ doQuery(data, ctu)
break
+ case 'spin3':
+ doSpin3(data, ctu);
+ break
+
case 'check_builtin_input':
doCheckBuiltinInput(data, ctu)
break
+ case 'generate_link':
+ doGenerateLink(data, ctu)
+ break
+
+ case 'resolve_link':
+ doResolveLink(data, ctu)
+ break
+
default:
ctu({ error: 'unknown task: ' + data.task })
}
@@ -93,15 +117,12 @@ app.post('/n3', (request, response) => {
app.listen(config.http.port)
console.log(`Listening at ${config.http.hostname}:${config.http.port}`)
-function doReasoning(options, ctu) {
- tmp.save(options.formula, (file) => {
+async function doReasoning(options, ctu) {
+ let file;
+ try {
+ file = await tmp.save(options.formula)
- function end(ret) {
- tmp.del(file)
- ctu(ret)
- }
-
- var reasoner = null;
+ let reasoner = null;
switch (options.system) {
case "eye":
reasoner = eye
@@ -116,46 +137,130 @@ function doReasoning(options, ctu) {
break
default:
- end({ error: `unsupported system: "${options.system}"` })
- break
+ throw `unsupported system: "${options.system}"`
}
- if (reasoner)
- reasoner.exec(options, file, end)
- })
+
+ const output = await reasoner.exec(options, file)
+ ctu({ success: output })
+
+ } catch (e) {
+ console.log(e)
+ ctu({ error: e + "" })
+
+ } finally {
+ await tmp.del(file)
+ }
}
-function doExplaining(options, ctu) {
- tmp.save(options.formula, (file) => {
+async function doExplaining(options, ctu) {
+ let file
+ try {
+ file = await tmp.save(options.formula)
- var reasoner = null;
+ let reasoner = null;
switch (options.system) {
case "eye":
reasoner = eye
break
default:
- end({ error: `unsupported system: "${options.system}"` })
- break
+ throw `unsupported system: "${options.system}"`
}
- reasoner.exec(options, file, (explanation) => {
- tmp.del(file)
+ const explanation = await reasoner.exec(options, file)
+ ctu({ success: explanation })
- ctu(explanation)
- })
- })
+ } catch(e) {
+ console.log(e)
+ ctu({ error: e })
+
+ } finally {
+ await tmp.del(file)
+ }
}
-function doImperating(options, ctu) {
- tmp.save(options.formula, (file) => {
+async function doImperating(options, ctu) {
+ let file
+ try {
+ file = await tmp.save(options.formula)
+
+ const reasoner = jen3;
+ const code = await reasoner.exec(options, file)
+ ctu({ success: code })
+
+ } catch (e) {
+ console.log(e)
+ ctu({ error: e + "" })
+
+ } finally {
+ await tmp.del(file)
+ }
+}
- var reasoner = jen3;
- reasoner.exec(options, file, (code) => {
- tmp.del(file)
+async function doTriplify(options, ctu) {
+ let file
+ try {
+ file = tmp.save(options.formula)
- ctu(code)
- })
- })
+ const code = await triplify.exec(options, file)
+ ctu({ success: code })
+
+ } catch (e) {
+ console.log(e)
+ ctu({ error: e + "" })
+
+ } finally {
+ await tmp.del(file)
+ }
+}
+
+async function doQuery(options, ctu) {
+ let data, query
+ try {
+ data = await tmp.save(options.data)
+ query = await tmp.save(options.query)
+
+ let engine = null;
+ switch (options.system) {
+
+ case "jena":
+ engine = jena
+ break
+
+ default:
+ throw `unsupported system: "${options.system}"`
+ }
+
+ const output = await engine.exec(options, data, query)
+ ctu({ success: output })
+
+ } catch (e) {
+ console.log(e)
+ ctu({ error: e + "" })
+
+ } finally {
+ await tmp.del(data)
+ await tmp.del(query)
+ }
+}
+
+async function doSpin3(options, ctu) {
+ let data, query
+ try {
+ data = await tmp.save(options.data)
+ query = await tmp.save(options.query)
+
+ const output = await spin3.exec(options, data, query);
+ ctu({ success: output })
+
+ } catch (e) {
+ console.log(e)
+ ctu({ error: e + "" })
+
+ } finally {
+ await tmp.del(data)
+ await tmp.del(query)
+ }
}
function doGenerateLink(options, ctu) {
@@ -166,24 +271,28 @@ function doGenerateLink(options, ctu) {
function doResolveLink(options, ctu) {
resolveLink(options.id)
- .then((data) => {
+ .then((data) => {
// console.log("resolved link:", data);
ctu({ success: data })
})
.catch((error) => { ctu({ error: error }) })
}
-function doCheckBuiltinInput(options, ctu) {
- tmp.save(options.definitions, (defFile) => {
- tmp.save(options.test, (testFile) => {
+async function doCheckBuiltinInput(options, ctu) {
+ let defFile, testFile
+ try {
+ defFile = await tmp.save(options.definitions)
+ testFile = await tmp.save(options.test)
- function end(ret) {
- tmp.del(defFile)
- tmp.del(testFile)
- ctu(ret)
- }
+ const output = await checkBuiltinInput(defFile, testFile)
+ ctu({ success: output })
- checkBuiltinInput(defFile, testFile, end)
- })
- })
+ } catch (e) {
+ console.log(e)
+ ctu({ error: e + "" })
+
+ } finally {
+ await tmp.del(defFile)
+ await tmp.del(testFile)
+ }
}
diff --git a/config.js b/config.js
index 4b8bbd3..7e7337d 100644
--- a/config.js
+++ b/config.js
@@ -1,38 +1,50 @@
config = {
http: {
- hostname: 'http://127.0.0.1',
- port: 3002
+ hostname: 'https://n3-editor.herokuapp.com',
+ port: ((typeof process) != "undefined" ? process.env.PORT : undefined)
},
- reasoners: {
+ out: "/app/out",
+
+ tools: {
eye: {
- exec: "/Users/wvw/git/n3/n3-editor-js/opt/eye/bin/eye",
- folder: "/Users/wvw/git/n3/n3-editor-js/lib/eye"
+ exec: "eye",
+ folder: "/app/lib/eye"
},
cwm: {
// (use python2 for cwm)
- pythonCmd: "python",
- exec: "/Users/wvw/cwm-1.2.1/swap/cwm.py"
+ pythonCmd: "", // (python2 is not available)
+ exec: ""
},
jen3: {
- exec: "/Users/wvw/git/n3/n3-editor-js/lib/jen3/jen3.jar",
- codegen: "/Users/wvw/git/n3/n3-editor-js/lib/jen3/codegen.jar",
- folder: "/Users/wvw/git/n3/n3-editor-js/lib/jen3"
- }
+ exec: "/app/lib/jen3/jen3.jar",
+ codegen: "/app/lib/jen3/codegen.jar",
+ folder: "/app/lib/jen3"
+ },
+ jena: {
+ exec: "/app/lib/jena/sparql.jar",
+ },
+ triplify: {
+ exec: "/app/lib/triplify/sparql2spin.jar"
+ },
+ spin3: {
+ folder: "/app/lib/spin3"
+ },
},
link: {
max_len: 50000,
db: {
+ // mysql://b4837d17c012f1:e023e78d@us-cdbr-east-06.cleardb.net/heroku_e750abd160bbcaf?reconnect=true
port: '33060',
- host: 'localhost',
- db: "n3_links",
- user: 'root',
- pwd: ''
+ host: "us-cdbr-east-06.cleardb.net",
+ db: "heroku_e750abd160bbcaf",
+ user: "b4837d17c012f1",
+ pwd: "e023e78d"
}
},
- path: "/Users/wvw/git/n3/n3-editor-js"
+ path: "/Users/wvw/git/n3/n3-editor-js" // ??
}
if (typeof exports === 'object' && typeof module === 'object')
diff --git a/config_heroku.js b/config_heroku.js
new file mode 100644
index 0000000..7e7337d
--- /dev/null
+++ b/config_heroku.js
@@ -0,0 +1,53 @@
+config = {
+ http: {
+ hostname: 'https://n3-editor.herokuapp.com',
+ port: ((typeof process) != "undefined" ? process.env.PORT : undefined)
+ },
+
+ out: "/app/out",
+
+ tools: {
+ eye: {
+ exec: "eye",
+ folder: "/app/lib/eye"
+ },
+ cwm: {
+ // (use python2 for cwm)
+ pythonCmd: "", // (python2 is not available)
+ exec: ""
+ },
+ jen3: {
+ exec: "/app/lib/jen3/jen3.jar",
+ codegen: "/app/lib/jen3/codegen.jar",
+ folder: "/app/lib/jen3"
+ },
+ jena: {
+ exec: "/app/lib/jena/sparql.jar",
+ },
+ triplify: {
+ exec: "/app/lib/triplify/sparql2spin.jar"
+ },
+ spin3: {
+ folder: "/app/lib/spin3"
+ },
+ },
+
+ link: {
+ max_len: 50000,
+ db: {
+ // mysql://b4837d17c012f1:e023e78d@us-cdbr-east-06.cleardb.net/heroku_e750abd160bbcaf?reconnect=true
+ port: '33060',
+ host: "us-cdbr-east-06.cleardb.net",
+ db: "heroku_e750abd160bbcaf",
+ user: "b4837d17c012f1",
+ pwd: "e023e78d"
+ }
+ },
+
+ path: "/Users/wvw/git/n3/n3-editor-js" // ??
+}
+
+if (typeof exports === 'object' && typeof module === 'object')
+ module.exports = {
+ config
+ };
\ No newline at end of file
diff --git a/config_local.js b/config_local.js
index 4b8bbd3..9fd7ffe 100644
--- a/config_local.js
+++ b/config_local.js
@@ -4,9 +4,11 @@ config = {
port: 3002
},
- reasoners: {
+ out: "/Users/wvw/git/n3/n3-editor-js/out",
+
+ tools: {
eye: {
- exec: "/Users/wvw/git/n3/n3-editor-js/opt/eye/bin/eye",
+ exec: "eye",
folder: "/Users/wvw/git/n3/n3-editor-js/lib/eye"
},
cwm: {
@@ -18,7 +20,16 @@ config = {
exec: "/Users/wvw/git/n3/n3-editor-js/lib/jen3/jen3.jar",
codegen: "/Users/wvw/git/n3/n3-editor-js/lib/jen3/codegen.jar",
folder: "/Users/wvw/git/n3/n3-editor-js/lib/jen3"
- }
+ },
+ jena: {
+ exec: "/Users/wvw/git/n3/n3-editor-js/lib/jena/sparql.jar",
+ },
+ triplify: {
+ exec: "/Users/wvw/git/n3/n3-editor-js/lib/triplify/sparql2spin.jar"
+ },
+ spin3: {
+ folder: "/Users/wvw/git/n3/n3-editor-js/lib/spin3"
+ },
},
link: {
diff --git a/config_ppr.js b/config_ppr.js
index e9d75e3..7ddf607 100644
--- a/config_ppr.js
+++ b/config_ppr.js
@@ -4,7 +4,7 @@ config = {
port: 3002
},
- reasoners: {
+ tools: {
eye: {
exec: "/home/woensel/projects/n3-editor-js/opt/eye/bin/eye",
folder: "/home/woensel/projects/n3-editor-js/lib/eye"
@@ -18,6 +18,9 @@ config = {
exec: "/home/woensel/projects/n3-editor-js/lib/jen3/jen3.jar",
codegen: "/home/woensel/projects/n3-editor-js/lib/jen3/codegen.jar",
folder: "/home/woensel/projects/n3-editor-js/lib/jen3"
+ },
+ triplify: {
+ exec: "/home/woensel/projects/n3-editor-js/lib/triplify/sparql2spin.jar"
}
},
diff --git a/editor/dist/n3Main.js b/editor/dist/n3Main.js
index 6a63970..ed05e29 100644
--- a/editor/dist/n3Main.js
+++ b/editor/dist/n3Main.js
@@ -1,16722 +1,2 @@
-var n3;
-/******/ (() => { // webpackBootstrap
-/******/ var __webpack_modules__ = ({
-
-/***/ 262:
-/***/ (() => {
-
-/* (ignored) */
-
-/***/ })
-
-/******/ });
-/************************************************************************/
-/******/ // The module cache
-/******/ var __webpack_module_cache__ = {};
-/******/
-/******/ // The require function
-/******/ function __webpack_require__(moduleId) {
-/******/ // Check if module is in cache
-/******/ var cachedModule = __webpack_module_cache__[moduleId];
-/******/ if (cachedModule !== undefined) {
-/******/ return cachedModule.exports;
-/******/ }
-/******/ // Create a new module (and put it into the cache)
-/******/ var module = __webpack_module_cache__[moduleId] = {
-/******/ // no module.id needed
-/******/ // no module.loaded needed
-/******/ exports: {}
-/******/ };
-/******/
-/******/ // Execute the module function
-/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
-/******/
-/******/ // Return the exports of the module
-/******/ return module.exports;
-/******/ }
-/******/
-/************************************************************************/
-/******/ /* webpack/runtime/define property getters */
-/******/ (() => {
-/******/ // define getter functions for harmony exports
-/******/ __webpack_require__.d = (exports, definition) => {
-/******/ for(var key in definition) {
-/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
-/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
-/******/ }
-/******/ }
-/******/ };
-/******/ })();
-/******/
-/******/ /* webpack/runtime/hasOwnProperty shorthand */
-/******/ (() => {
-/******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))
-/******/ })();
-/******/
-/******/ /* webpack/runtime/make namespace object */
-/******/ (() => {
-/******/ // define __esModule on exports
-/******/ __webpack_require__.r = (exports) => {
-/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
-/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
-/******/ }
-/******/ Object.defineProperty(exports, '__esModule', { value: true });
-/******/ };
-/******/ })();
-/******/
-/************************************************************************/
-var __webpack_exports__ = {};
-// This entry need to be wrapped in an IIFE because it need to be in strict mode.
-(() => {
-"use strict";
-// ESM COMPAT FLAG
-__webpack_require__.r(__webpack_exports__);
-
-// EXPORTS
-__webpack_require__.d(__webpack_exports__, {
- "parse": () => (/* binding */ parse)
-});
-
-;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Token.js
-/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-/**
- * A token has properties: text, type, line, character position in the line
- * (so we can ignore tabs), token channel, index, and source from which
- * we obtained this token.
- */
-class Token {
- constructor() {
- this.source = null;
- this.type = null; // token type of the token
- this.channel = null; // The parser ignores everything not on DEFAULT_CHANNEL
- this.start = null; // optional; return -1 if not implemented.
- this.stop = null; // optional; return -1 if not implemented.
- this.tokenIndex = null; // from 0..n-1 of the token object in the input stream
- this.line = null; // line=1..n of the 1st character
- this.column = null; // beginning of the line at which it occurs, 0..n-1
- this._text = null; // text of the token.
- }
-
- getTokenSource() {
- return this.source[0];
- }
-
- getInputStream() {
- return this.source[1];
- }
-
- get text(){
- return this._text;
- }
-
- set text(text) {
- this._text = text;
- }
-}
-
-Token.INVALID_TYPE = 0;
-
-/**
- * During lookahead operations, this "token" signifies we hit rule end ATN state
- * and did not follow it despite needing to.
- */
-Token.EPSILON = -2;
-
-Token.MIN_USER_TOKEN_TYPE = 1;
-
-Token.EOF = -1;
-
-/**
- * All tokens go to the parser (unless skip() is called in that rule)
- * on a particular "channel". The parser tunes to a particular channel
- * so that whitespace etc... can go to the parser on a "hidden" channel.
- */
-Token.DEFAULT_CHANNEL = 0;
-
-/**
- * Anything on different channel than DEFAULT_CHANNEL is not parsed
- * by parser.
- */
-Token.HIDDEN_CHANNEL = 1;
-
-
-;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/polyfills/codepointat.js
-/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved.
- * Use is of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-/*! https://mths.be/codepointat v0.2.0 by @mathias */
-if (!String.prototype.codePointAt) {
- (function() {
- 'use strict'; // needed to support `apply`/`call` with `undefined`/`null`
- var defineProperty = (function() {
- // IE 8 only supports `Object.defineProperty` on DOM elements
- let result;
- try {
- const object = {};
- const $defineProperty = Object.defineProperty;
- result = $defineProperty(object, object, object) && $defineProperty;
- } catch(error) {
- /* eslint no-empty: [ "off" ] */
- }
- return result;
- }());
- const codePointAt = function(position) {
- if (this == null) {
- throw TypeError();
- }
- const string = String(this);
- const size = string.length;
- // `ToInteger`
- let index = position ? Number(position) : 0;
- if (index !== index) { // better `isNaN`
- index = 0;
- }
- // Account for out-of-bounds indices:
- if (index < 0 || index >= size) {
- return undefined;
- }
- // Get the first code unit
- const first = string.charCodeAt(index);
- let second;
- if ( // check if it’s the start of a surrogate pair
- first >= 0xD800 && first <= 0xDBFF && // high surrogate
- size > index + 1 // there is a next code unit
- ) {
- second = string.charCodeAt(index + 1);
- if (second >= 0xDC00 && second <= 0xDFFF) { // low surrogate
- // https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
- return (first - 0xD800) * 0x400 + second - 0xDC00 + 0x10000;
- }
- }
- return first;
- };
- if (defineProperty) {
- defineProperty(String.prototype, 'codePointAt', {
- 'value': codePointAt,
- 'configurable': true,
- 'writable': true
- });
- } else {
- String.prototype.codePointAt = codePointAt;
- }
- }());
-}
-
-;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/polyfills/fromcodepoint.js
-/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved.
- * Use is of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-/*! https://mths.be/fromcodepoint v0.2.1 by @mathias */
-if (!String.fromCodePoint) {
- (function() {
- const defineProperty = (function() {
- // IE 8 only supports `Object.defineProperty` on DOM elements
- let result;
- try {
- const object = {};
- const $defineProperty = Object.defineProperty;
- result = $defineProperty(object, object, object) && $defineProperty;
- } catch(error) {
- /* eslint no-empty: [ "off" ] */
- }
- return result;
- }());
- const stringFromCharCode = String.fromCharCode;
- const floor = Math.floor;
- const fromCodePoint = function(_) {
- const MAX_SIZE = 0x4000;
- const codeUnits = [];
- let highSurrogate;
- let lowSurrogate;
- let index = -1;
- const length = arguments.length;
- if (!length) {
- return '';
- }
- let result = '';
- while (++index < length) {
- let codePoint = Number(arguments[index]);
- if (
- !isFinite(codePoint) || // `NaN`, `+Infinity`, or `-Infinity`
- codePoint < 0 || // not a valid Unicode code point
- codePoint > 0x10FFFF || // not a valid Unicode code point
- floor(codePoint) !== codePoint // not an integer
- ) {
- throw RangeError('Invalid code point: ' + codePoint);
- }
- if (codePoint <= 0xFFFF) { // BMP code point
- codeUnits.push(codePoint);
- } else { // Astral code point; split in surrogate halves
- // https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
- codePoint -= 0x10000;
- highSurrogate = (codePoint >> 10) + 0xD800;
- lowSurrogate = (codePoint % 0x400) + 0xDC00;
- codeUnits.push(highSurrogate, lowSurrogate);
- }
- if (index + 1 === length || codeUnits.length > MAX_SIZE) {
- result += stringFromCharCode.apply(null, codeUnits);
- codeUnits.length = 0;
- }
- }
- return result;
- };
- if (defineProperty) {
- defineProperty(String, 'fromCodePoint', {
- 'value': fromCodePoint,
- 'configurable': true,
- 'writable': true
- });
- } else {
- String.fromCodePoint = fromCodePoint;
- }
- }());
-}
-
-;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/InputStream.js
-/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-
-
-
-
-/**
- * If decodeToUnicodeCodePoints is true, the input is treated
- * as a series of Unicode code points.
- *
- * Otherwise, the input is treated as a series of 16-bit UTF-16 code
- * units.
- */
-class InputStream {
- constructor(data, decodeToUnicodeCodePoints) {
- this.name = "
- * This implementation prints messages to {@link System//err} containing the
- * values of {@code line}, {@code charPositionInLine}, and {@code msg} using
- * the following format.
- * line line:charPositionInLine msg
- *
- *
- */
-class ConsoleErrorListener extends ErrorListener {
- constructor() {
- super();
- }
-
- syntaxError(recognizer, offendingSymbol, line, column, msg, e) {
- console.error("line " + line + ":" + column + " " + msg);
- }
-}
-
-
-/**
- * Provides a default instance of {@link ConsoleErrorListener}.
- */
-ConsoleErrorListener.INSTANCE = new ConsoleErrorListener();
-
-;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/ProxyErrorListener.js
-/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved.
- * Use is of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-
-class ProxyErrorListener extends ErrorListener {
- constructor(delegates) {
- super();
- if (delegates===null) {
- throw "delegates";
- }
- this.delegates = delegates;
- return this;
- }
-
- syntaxError(recognizer, offendingSymbol, line, column, msg, e) {
- this.delegates.map(d => d.syntaxError(recognizer, offendingSymbol, line, column, msg, e));
- }
-
- reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) {
- this.delegates.map(d => d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs));
- }
-
- reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) {
- this.delegates.map(d => d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs));
- }
-
- reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) {
- this.delegates.map(d => d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs));
- }
-}
-
-;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Recognizer.js
-/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-
-
-
-
-class Recognizer {
- constructor() {
- this._listeners = [ ConsoleErrorListener.INSTANCE ];
- this._interp = null;
- this._stateNumber = -1;
- }
-
- checkVersion(toolVersion) {
- const runtimeVersion = "4.10.1";
- if (runtimeVersion!==toolVersion) {
- console.log("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion);
- }
- }
-
- addErrorListener(listener) {
- this._listeners.push(listener);
- }
-
- removeErrorListeners() {
- this._listeners = [];
- }
-
- getLiteralNames() {
- return Object.getPrototypeOf(this).constructor.literalNames || [];
- }
-
- getSymbolicNames() {
- return Object.getPrototypeOf(this).constructor.symbolicNames || [];
- }
-
- getTokenNames() {
- if(!this.tokenNames) {
- const literalNames = this.getLiteralNames();
- const symbolicNames = this.getSymbolicNames();
- const length = literalNames.length > symbolicNames.length ? literalNames.length : symbolicNames.length;
- this.tokenNames = [];
- for(let i=0; i
- * If {@code oldToken} is also a {@link CommonToken} instance, the newly - * constructed token will share a reference to the {@link //text} field and - * the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will - * be assigned the result of calling {@link //getText}, and {@link //source} - * will be constructed from the result of {@link Token//getTokenSource} and - * {@link Token//getInputStream}.
- * - * @param oldToken The token to copy. - */ - clone() { - const t = new CommonToken(this.source, this.type, this.channel, this.start, this.stop); - t.tokenIndex = this.tokenIndex; - t.line = this.line; - t.column = this.column; - t.text = this.text; - return t; - } - - toString() { - let txt = this.text; - if (txt !== null) { - txt = txt.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t"); - } else { - txt = "- * The default value is {@code false} to avoid the performance and memory - * overhead of copying text for every token unless explicitly requested.
- */ - this.copyText = copyText===undefined ? false : copyText; - } - - create(source, type, text, channel, start, stop, line, column) { - const t = new CommonToken(source, type, channel, start, stop); - t.line = line; - t.column = column; - if (text !==null) { - t.text = text; - } else if (this.copyText && source[1] !==null) { - t.text = source[1].getText(start,stop); - } - return t; - } - - createThin(type, text) { - const t = new CommonToken(null, type); - t.text = text; - return t; - } -} - -/** - * The default {@link CommonTokenFactory} instance. - * - *- * This token factory does not explicitly copy token text when constructing - * tokens.
- */ -CommonTokenFactory.DEFAULT = new CommonTokenFactory(); - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/RecognitionException.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -/** - * The root of the ANTLR exception hierarchy. In general, ANTLR tracks just - * 3 kinds of errors: prediction errors, failed predicate errors, and - * mismatched input errors. In each case, the parser knows where it is - * in the input, where it is in the ATN, the rule invocation stack, - * and what kind of problem occurred. - */ - -class RecognitionException extends Error { - constructor(params) { - super(params.message); - if (Error.captureStackTrace) - Error.captureStackTrace(this, RecognitionException); - this.message = params.message; - this.recognizer = params.recognizer; - this.input = params.input; - this.ctx = params.ctx; - /** - * The current {@link Token} when an error occurred. Since not all streams - * support accessing symbols by index, we have to track the {@link Token} - * instance itself - */ - this.offendingToken = null; - /** - * Get the ATN state number the parser was in at the time the error - * occurred. For {@link NoViableAltException} and - * {@link LexerNoViableAltException} exceptions, this is the - * {@link DecisionState} number. For others, it is the state whose outgoing - * edge we couldn't match. - */ - this.offendingState = -1; - if (this.recognizer!==null) { - this.offendingState = this.recognizer.state; - } - } - - /** - * Gets the set of input symbols which could potentially follow the - * previously matched symbol at the time this exception was thrown. - * - *If the set of expected tokens is not known and could not be computed, - * this method returns {@code null}.
- * - * @return The set of token types that could potentially follow the current - * state in the ATN, or {@code null} if the information is not available. - */ - getExpectedTokens() { - if (this.recognizer!==null) { - return this.recognizer.atn.getExpectedTokens(this.offendingState, this.ctx); - } else { - return null; - } - } - - //If the state number is not known, this method returns -1.
- toString() { - return this.message; - } -} - - - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/Interval.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/* stop is not included! */ -class Interval { - - constructor(start, stop) { - this.start = start; - this.stop = stop; - } - - clone() { - return new Interval(this.start, this.stop); - } - - contains(item) { - return item >= this.start && item < this.stop; - } - - toString() { - if(this.start===this.stop-1) { - return this.start.toString(); - } else { - return this.start.toString() + ".." + (this.stop-1).toString(); - } - } - - get length(){ - return this.stop - this.start; - } -} - -Interval.INVALID_INTERVAL = new Interval(-1, -2); - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/LexerNoViableAltException.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -class LexerNoViableAltException extends RecognitionException { - constructor(lexer, input, startIndex, deadEndConfigs) { - super({message: "", recognizer: lexer, input: input, ctx: null}); - this.startIndex = startIndex; - this.deadEndConfigs = deadEndConfigs; - } - - toString() { - let symbol = ""; - if (this.startIndex >= 0 && this.startIndex < this.input.size) { - symbol = this.input.getText(new Interval(this.startIndex,this.startIndex)); - } - return "LexerNoViableAltException" + symbol; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Lexer.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - -/** - * A lexer is recognizer that draws input symbols from a character stream. - * lexer grammars result in a subclass of this object. A Lexer object - * uses simplified match() and error recovery mechanisms in the interest of speed. - */ -class Lexer extends Recognizer { - constructor(input) { - super(); - this._input = input; - this._factory = CommonTokenFactory.DEFAULT; - this._tokenFactorySourcePair = [ this, input ]; - - this._interp = null; // child classes must populate this - - /** - * The goal of all lexer rules/methods is to create a token object. - * this is an instance variable as multiple rules may collaborate to - * create a single token. nextToken will return this object after - * matching lexer rule(s). If you subclass to allow multiple token - * emissions, then set this to the last token to be matched or - * something nonnull so that the auto token emit mechanism will not - * emit another token. - */ - this._token = null; - - /** - * What character index in the stream did the current token start at? - * Needed, for example, to get the text for current token. Set at - * the start of nextToken. - */ - this._tokenStartCharIndex = -1; - - // The line on which the first character of the token resides/// - this._tokenStartLine = -1; - - // The character position of first character within the line/// - this._tokenStartColumn = -1; - - // Once we see EOF on char stream, next token will be EOF. - // If you have DONE : EOF ; then you see DONE EOF. - this._hitEOF = false; - - // The channel number for the current token/// - this._channel = Token.DEFAULT_CHANNEL; - - // The token type for the current token/// - this._type = Token.INVALID_TYPE; - - this._modeStack = []; - this._mode = Lexer.DEFAULT_MODE; - - /** - * You can set the text for the current token to override what is in - * the input char buffer. Use setText() or can set this instance var. - */ - this._text = null; - } - - reset() { - // wack Lexer state variables - if (this._input !== null) { - this._input.seek(0); // rewind the input - } - this._token = null; - this._type = Token.INVALID_TYPE; - this._channel = Token.DEFAULT_CHANNEL; - this._tokenStartCharIndex = -1; - this._tokenStartColumn = -1; - this._tokenStartLine = -1; - this._text = null; - - this._hitEOF = false; - this._mode = Lexer.DEFAULT_MODE; - this._modeStack = []; - - this._interp.reset(); - } - -// Return a token from this source; i.e., match a token on the char stream. - nextToken() { - if (this._input === null) { - throw "nextToken requires a non-null input stream."; - } - - /** - * Mark start location in char stream so unbuffered streams are - * guaranteed at least have text of current token - */ - const tokenStartMarker = this._input.mark(); - try { - for (;;) { - if (this._hitEOF) { - this.emitEOF(); - return this._token; - } - this._token = null; - this._channel = Token.DEFAULT_CHANNEL; - this._tokenStartCharIndex = this._input.index; - this._tokenStartColumn = this._interp.column; - this._tokenStartLine = this._interp.line; - this._text = null; - let continueOuter = false; - for (;;) { - this._type = Token.INVALID_TYPE; - let ttype = Lexer.SKIP; - try { - ttype = this._interp.match(this._input, this._mode); - } catch (e) { - if(e instanceof RecognitionException) { - this.notifyListeners(e); // report error - this.recover(e); - } else { - console.log(e.stack); - throw e; - } - } - if (this._input.LA(1) === Token.EOF) { - this._hitEOF = true; - } - if (this._type === Token.INVALID_TYPE) { - this._type = ttype; - } - if (this._type === Lexer.SKIP) { - continueOuter = true; - break; - } - if (this._type !== Lexer.MORE) { - break; - } - } - if (continueOuter) { - continue; - } - if (this._token === null) { - this.emit(); - } - return this._token; - } - } finally { - // make sure we release marker after match or - // unbuffered char stream will keep buffering - this._input.release(tokenStartMarker); - } - } - - /** - * Instruct the lexer to skip creating a token for current lexer rule - * and look for another token. nextToken() knows to keep looking when - * a lexer rule finishes with token set to SKIP_TOKEN. Recall that - * if token==null at end of any token rule, it creates one for you - * and emits it. - */ - skip() { - this._type = Lexer.SKIP; - } - - more() { - this._type = Lexer.MORE; - } - - mode(m) { - this._mode = m; - } - - pushMode(m) { - if (this._interp.debug) { - console.log("pushMode " + m); - } - this._modeStack.push(this._mode); - this.mode(m); - } - - popMode() { - if (this._modeStack.length === 0) { - throw "Empty Stack"; - } - if (this._interp.debug) { - console.log("popMode back to " + this._modeStack.slice(0, -1)); - } - this.mode(this._modeStack.pop()); - return this._mode; - } - - /** - * By default does not support multiple emits per nextToken invocation - * for efficiency reasons. Subclass and override this method, nextToken, - * and getToken (to push tokens into a list and pull from that list - * rather than a single variable as this implementation does). - */ - emitToken(token) { - this._token = token; - } - - /** - * The standard method called to automatically emit a token at the - * outermost lexical rule. The token object should point into the - * char buffer start..stop. If there is a text override in 'text', - * use that to set the token's text. Override this method to emit - * custom Token objects or provide a new factory. - */ - emit() { - const t = this._factory.create(this._tokenFactorySourcePair, this._type, - this._text, this._channel, this._tokenStartCharIndex, this - .getCharIndex() - 1, this._tokenStartLine, - this._tokenStartColumn); - this.emitToken(t); - return t; - } - - emitEOF() { - const cpos = this.column; - const lpos = this.line; - const eof = this._factory.create(this._tokenFactorySourcePair, Token.EOF, - null, Token.DEFAULT_CHANNEL, this._input.index, - this._input.index - 1, lpos, cpos); - this.emitToken(eof); - return eof; - } - -// What is the index of the current character of lookahead?/// - getCharIndex() { - return this._input.index; - } - - /** - * Return a list of all Token objects in input char stream. - * Forces load of all tokens. Does not include EOF token. - */ - getAllTokens() { - const tokens = []; - let t = this.nextToken(); - while (t.type !== Token.EOF) { - tokens.push(t); - t = this.nextToken(); - } - return tokens; - } - - notifyListeners(e) { - const start = this._tokenStartCharIndex; - const stop = this._input.index; - const text = this._input.getText(start, stop); - const msg = "token recognition error at: '" + this.getErrorDisplay(text) + "'"; - const listener = this.getErrorListenerDispatch(); - listener.syntaxError(this, null, this._tokenStartLine, - this._tokenStartColumn, msg, e); - } - - getErrorDisplay(s) { - const d = []; - for (let i = 0; i < s.length; i++) { - d.push(s[i]); - } - return d.join(''); - } - - getErrorDisplayForChar(c) { - if (c.charCodeAt(0) === Token.EOF) { - return "- * This token stream ignores the value of {@link Token//getChannel}. If your - * parser requires the token stream filter tokens to only those on a particular - * channel, such as {@link Token//DEFAULT_CHANNEL} or - * {@link Token//HIDDEN_CHANNEL}, use a filtering token stream such a - * {@link CommonTokenStream}.
- */ -class BufferedTokenStream extends TokenStream { - constructor(tokenSource) { - - super(); - // The {@link TokenSource} from which tokens for this stream are fetched. - this.tokenSource = tokenSource; - /** - * A collection of all tokens fetched from the token source. The list is - * considered a complete view of the input once {@link //fetchedEOF} is set - * to {@code true}. - */ - this.tokens = []; - - /** - * The index into {@link //tokens} of the current token (next token to - * {@link //consume}). {@link //tokens}{@code [}{@link //p}{@code ]} should - * be - * {@link //LT LT(1)}. - * - *This field is set to -1 when the stream is first constructed or when - * {@link //setTokenSource} is called, indicating that the first token has - * not yet been fetched from the token source. For additional information, - * see the documentation of {@link IntStream} for a description of - * Initializing Methods.
- */ - this.index = -1; - - /** - * Indicates whether the {@link Token//EOF} token has been fetched from - * {@link //tokenSource} and added to {@link //tokens}. This field improves - * performance for the following cases: - * - *For example, {@link CommonTokenStream} overrides this method to ensure - * that - * the seek target is always an on-channel token.
- * - * @param {Number} i The target token index. - * @return {Number} The adjusted target token index. - */ - adjustSeekIndex(i) { - return i; - } - - lazyInit() { - if (this.index === -1) { - this.setup(); - } - } - - setup() { - this.sync(0); - this.index = this.adjustSeekIndex(0); - } - -// Reset this token stream by setting its token source./// - setTokenSource(tokenSource) { - this.tokenSource = tokenSource; - this.tokens = []; - this.index = -1; - this.fetchedEOF = false; - } - - /** - * Given a starting index, return the index of the next token on channel. - * Return i if tokens[i] is on channel. Return -1 if there are no tokens - * on channel between i and EOF. - */ - nextTokenOnChannel(i, channel) { - this.sync(i); - if (i >= this.tokens.length) { - return -1; - } - let token = this.tokens[i]; - while (token.channel !== this.channel) { - if (token.type === Token.EOF) { - return -1; - } - i += 1; - this.sync(i); - token = this.tokens[i]; - } - return i; - } - - /** - * Given a starting index, return the index of the previous token on channel. - * Return i if tokens[i] is on channel. Return -1 if there are no tokens - * on channel between i and 0. - */ - previousTokenOnChannel(i, channel) { - while (i >= 0 && this.tokens[i].channel !== channel) { - i -= 1; - } - return i; - } - - /** - * Collect all tokens on specified channel to the right of - * the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or - * EOF. If channel is -1, find any non default channel token. - */ - getHiddenTokensToRight(tokenIndex, - channel) { - if (channel === undefined) { - channel = -1; - } - this.lazyInit(); - if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { - throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; - } - const nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1, Lexer.DEFAULT_TOKEN_CHANNEL); - const from_ = tokenIndex + 1; - // if none onchannel to right, nextOnChannel=-1 so set to = last token - const to = nextOnChannel === -1 ? this.tokens.length - 1 : nextOnChannel; - return this.filterForChannel(from_, to, channel); - } - - /** - * Collect all tokens on specified channel to the left of - * the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. - * If channel is -1, find any non default channel token. - */ - getHiddenTokensToLeft(tokenIndex, - channel) { - if (channel === undefined) { - channel = -1; - } - this.lazyInit(); - if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { - throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; - } - const prevOnChannel = this.previousTokenOnChannel(tokenIndex - 1, Lexer.DEFAULT_TOKEN_CHANNEL); - if (prevOnChannel === tokenIndex - 1) { - return null; - } - // if none on channel to left, prevOnChannel=-1 then from=0 - const from_ = prevOnChannel + 1; - const to = tokenIndex - 1; - return this.filterForChannel(from_, to, channel); - } - - filterForChannel(left, right, channel) { - const hidden = []; - for (let i = left; i < right + 1; i++) { - const t = this.tokens[i]; - if (channel === -1) { - if (t.channel !== Lexer.DEFAULT_TOKEN_CHANNEL) { - hidden.push(t); - } - } else if (t.channel === channel) { - hidden.push(t); - } - } - if (hidden.length === 0) { - return null; - } - return hidden; - } - - getSourceName() { - return this.tokenSource.getSourceName(); - } - -// Get the text of all tokens in this buffer./// - getText(interval) { - this.lazyInit(); - this.fill(); - if (interval === undefined || interval === null) { - interval = new Interval(0, this.tokens.length - 1); - } - let start = interval.start; - if (start instanceof Token) { - start = start.tokenIndex; - } - let stop = interval.stop; - if (stop instanceof Token) { - stop = stop.tokenIndex; - } - if (start === null || stop === null || start < 0 || stop < 0) { - return ""; - } - if (stop >= this.tokens.length) { - stop = this.tokens.length - 1; - } - let s = ""; - for (let i = start; i < stop + 1; i++) { - const t = this.tokens[i]; - if (t.type === Token.EOF) { - break; - } - s = s + t.text; - } - return s; - } - -// Get all tokens from lexer until EOF/// - fill() { - this.lazyInit(); - while (this.fetch(1000) === 1000) { - continue; - } - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/CommonTokenStream.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - -/** - * This class extends {@link BufferedTokenStream} with functionality to filter - * token streams to tokens on a particular channel (tokens where - * {@link Token//getChannel} returns a particular value). - * - *- * This token stream provides access to all tokens by index or when calling - * methods like {@link //getText}. The channel filtering is only used for code - * accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and - * {@link //LB}.
- * - *- * By default, tokens are placed on the default channel - * ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the - * {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to - * call {@link Lexer//setChannel}. - *
- * - *- * Note: lexer rules which use the {@code ->skip} lexer command or call - * {@link Lexer//skip} do not produce tokens at all, so input text matched by - * such a rule will not be available as part of the token stream, regardless of - * channel.
- */ -class CommonTokenStream extends BufferedTokenStream { - constructor(lexer, channel) { - super(lexer); - this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel; - } - - adjustSeekIndex(i) { - return this.nextTokenOnChannel(i, this.channel); - } - - LB(k) { - if (k===0 || this.index-k<0) { - return null; - } - let i = this.index; - let n = 1; - // find k good tokens looking backwards - while (n <= k) { - // skip off-channel tokens - i = this.previousTokenOnChannel(i - 1, this.channel); - n += 1; - } - if (i < 0) { - return null; - } - return this.tokens[i]; - } - - LT(k) { - this.lazyInit(); - if (k === 0) { - return null; - } - if (k < 0) { - return this.LB(-k); - } - let i = this.index; - let n = 1; // we know tokens[pos] is a good one - // find k good tokens - while (n < k) { - // skip off-channel tokens, but make sure to not look past EOF - if (this.sync(i + 1)) { - i = this.nextTokenOnChannel(i + 1, this.channel); - } - n += 1; - } - return this.tokens[i]; - } - - // Count EOF just once. - getNumberOfOnChannelTokens() { - let n = 0; - this.fill(); - for (let i =0; i< this.tokens.length;i++) { - const t = this.tokens[i]; - if( t.channel===this.channel) { - n += 1; - } - if( t.type===Token.EOF) { - break; - } - } - return n; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/utils/stringHashCode.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -String.prototype.seed = String.prototype.seed || Math.round(Math.random() * Math.pow(2, 32)); - -String.prototype.hashCode = function () { - const key = this.toString(); - let h1b, k1; - - const remainder = key.length & 3; // key.length % 4 - const bytes = key.length - remainder; - let h1 = String.prototype.seed; - const c1 = 0xcc9e2d51; - const c2 = 0x1b873593; - let i = 0; - - while (i < bytes) { - k1 = - ((key.charCodeAt(i) & 0xff)) | - ((key.charCodeAt(++i) & 0xff) << 8) | - ((key.charCodeAt(++i) & 0xff) << 16) | - ((key.charCodeAt(++i) & 0xff) << 24); - ++i; - - k1 = ((((k1 & 0xffff) * c1) + ((((k1 >>> 16) * c1) & 0xffff) << 16))) & 0xffffffff; - k1 = (k1 << 15) | (k1 >>> 17); - k1 = ((((k1 & 0xffff) * c2) + ((((k1 >>> 16) * c2) & 0xffff) << 16))) & 0xffffffff; - - h1 ^= k1; - h1 = (h1 << 13) | (h1 >>> 19); - h1b = ((((h1 & 0xffff) * 5) + ((((h1 >>> 16) * 5) & 0xffff) << 16))) & 0xffffffff; - h1 = (((h1b & 0xffff) + 0x6b64) + ((((h1b >>> 16) + 0xe654) & 0xffff) << 16)); - } - - k1 = 0; - - switch (remainder) { - case 3: - k1 ^= (key.charCodeAt(i + 2) & 0xff) << 16; - // no-break - case 2: - k1 ^= (key.charCodeAt(i + 1) & 0xff) << 8; - // no-break - case 1: - k1 ^= (key.charCodeAt(i) & 0xff); - k1 = (((k1 & 0xffff) * c1) + ((((k1 >>> 16) * c1) & 0xffff) << 16)) & 0xffffffff; - k1 = (k1 << 15) | (k1 >>> 17); - k1 = (((k1 & 0xffff) * c2) + ((((k1 >>> 16) * c2) & 0xffff) << 16)) & 0xffffffff; - h1 ^= k1; - } - - h1 ^= key.length; - - h1 ^= h1 >>> 16; - h1 = (((h1 & 0xffff) * 0x85ebca6b) + ((((h1 >>> 16) * 0x85ebca6b) & 0xffff) << 16)) & 0xffffffff; - h1 ^= h1 >>> 13; - h1 = ((((h1 & 0xffff) * 0xc2b2ae35) + ((((h1 >>> 16) * 0xc2b2ae35) & 0xffff) << 16))) & 0xffffffff; - h1 ^= h1 >>> 16; - - return h1 >>> 0; -}; - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/utils/equalArrays.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -function equalArrays(a, b) { - if (!Array.isArray(a) || !Array.isArray(b)) - return false; - if (a === b) - return true; - if (a.length !== b.length) - return false; - for (let i = 0; i < a.length; i++) { - if (a[i] === b[i]) - continue; - if (!a[i].equals || !a[i].equals(b[i])) - return false; - } - return true; -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/HashCode.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -class HashCode { - - constructor() { - this.count = 0; - this.hash = 0; - } - - update() { - for(let i=0;iI have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of - * {@link SemanticContext} within the scope of this outer class.
- */ -class SemanticContext { - - hashCode() { - const hash = new HashCode(); - this.updateHashCode(hash); - return hash.finish(); - } - - /** - * For context independent predicates, we evaluate them without a local - * context (i.e., null context). That way, we can evaluate them without - * having to create proper rule-specific context during prediction (as - * opposed to the parser, which creates them naturally). In a practical - * sense, this avoids a cast exception from RuleContext to myruleContext. - * - *For context dependent predicates, we must pass in a local context so that - * references such as $arg evaluate properly as _localctx.arg. We only - * capture context dependent predicates in the context in which we begin - * prediction, so we passed in the outer context here in case of context - * dependent predicate evaluation.
- */ - evaluate(parser, outerContext) {} - - /** - * Evaluate the precedence predicates for the context and reduce the result. - * - * @param parser The parser instance. - * @param outerContext The current parser context object. - * @return The simplified semantic context after precedence predicates are - * evaluated, which will be one of the following values. - *- * The evaluation of predicates by this context is short-circuiting, but - * unordered.
- */ - evaluate(parser, outerContext) { - for (let i = 0; i < this.opnds.length; i++) { - if (this.opnds[i].evaluate(parser, outerContext)) { - return true; - } - } - return false; - } - - evalPrecedence(parser, outerContext) { - let differs = false; - const operands = []; - for (let i = 0; i < this.opnds.length; i++) { - const context = this.opnds[i]; - const evaluated = context.evalPrecedence(parser, outerContext); - differs |= (evaluated !== context); - if (evaluated === SemanticContext.NONE) { - // The OR context is true if any element is true - return SemanticContext.NONE; - } else if (evaluated !== null) { - // Reduce the result by skipping false elements - operands.push(evaluated); - } - } - if (!differs) { - return this; - } - if (operands.length === 0) { - // all elements were false, so the OR context is false - return null; - } - const result = null; - operands.map(function(o) { - return result === null ? o : SemanticContext.orContext(result, o); - }); - return result; - } - - toString() { - const s = this.opnds.map(o => o.toString()); - return (s.length > 3 ? s.slice(3) : s).join("||"); - } -} - -function filterPrecedencePredicates(set) { - const result = []; - set.values().map( function(context) { - if (context instanceof SemanticContext.PrecedencePredicate) { - result.push(context); - } - }); - return result; -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/ATNConfig.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -function checkParams(params, isCfg) { - if(params===null) { - const result = { state:null, alt:null, context:null, semanticContext:null }; - if(isCfg) { - result.reachesIntoOuterContext = 0; - } - return result; - } else { - const props = {}; - props.state = params.state || null; - props.alt = (params.alt === undefined) ? null : params.alt; - props.context = params.context || null; - props.semanticContext = params.semanticContext || null; - if(isCfg) { - props.reachesIntoOuterContext = params.reachesIntoOuterContext || 0; - props.precedenceFilterSuppressed = params.precedenceFilterSuppressed || false; - } - return props; - } -} - -class ATNConfig { - /** - * @param {Object} params A tuple: (ATN state, predicted alt, syntactic, semantic context). - * The syntactic context is a graph-structured stack node whose - * path(s) to the root is the rule invocation(s) - * chain used to arrive at the state. The semantic context is - * the tree of semantic predicates encountered before reaching - * an ATN state - */ - constructor(params, config) { - this.checkContext(params, config); - params = checkParams(params); - config = checkParams(config, true); - // The ATN state associated with this configuration/// - this.state = params.state!==null ? params.state : config.state; - // What alt (or lexer rule) is predicted by this configuration/// - this.alt = params.alt!==null ? params.alt : config.alt; - /** - * The stack of invoking states leading to the rule/states associated - * with this config. We track only those contexts pushed during - * execution of the ATN simulator - */ - this.context = params.context!==null ? params.context : config.context; - this.semanticContext = params.semanticContext!==null ? params.semanticContext : - (config.semanticContext!==null ? config.semanticContext : SemanticContext.NONE); - // TODO: make it a boolean then - /** - * We cannot execute predicates dependent upon local context unless - * we know for sure we are in the correct context. Because there is - * no way to do this efficiently, we simply cannot evaluate - * dependent predicates unless we are in the rule that initially - * invokes the ATN simulator. - * closure() tracks the depth of how far we dip into the - * outer context: depth > 0. Note that it may not be totally - * accurate depth since I don't ever decrement - */ - this.reachesIntoOuterContext = config.reachesIntoOuterContext; - this.precedenceFilterSuppressed = config.precedenceFilterSuppressed; - } - - checkContext(params, config) { - if((params.context===null || params.context===undefined) && - (config===null || config.context===null || config.context===undefined)) { - this.context = null; - } - } - - hashCode() { - const hash = new HashCode(); - this.updateHashCode(hash); - return hash.finish(); - } - - updateHashCode(hash) { - hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext); - } - - /** - * An ATN configuration is equal to another if both have - * the same state, they predict the same alternative, and - * syntactic/semantic contexts are the same - */ - equals(other) { - if (this === other) { - return true; - } else if (! (other instanceof ATNConfig)) { - return false; - } else { - return this.state.stateNumber===other.state.stateNumber && - this.alt===other.alt && - (this.context===null ? other.context===null : this.context.equals(other.context)) && - this.semanticContext.equals(other.semanticContext) && - this.precedenceFilterSuppressed===other.precedenceFilterSuppressed; - } - } - - hashCodeForConfigSet() { - const hash = new HashCode(); - hash.update(this.state.stateNumber, this.alt, this.semanticContext); - return hash.finish(); - } - - equalsForConfigSet(other) { - if (this === other) { - return true; - } else if (! (other instanceof ATNConfig)) { - return false; - } else { - return this.state.stateNumber===other.state.stateNumber && - this.alt===other.alt && - this.semanticContext.equals(other.semanticContext); - } - } - - toString() { - return "(" + this.state + "," + this.alt + - (this.context!==null ? ",[" + this.context.toString() + "]" : "") + - (this.semanticContext !== SemanticContext.NONE ? - ("," + this.semanticContext.toString()) - : "") + - (this.reachesIntoOuterContext>0 ? - (",up=" + this.reachesIntoOuterContext) - : "") + ")"; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/IntervalSet.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -class IntervalSet { - constructor() { - this.intervals = null; - this.readOnly = false; - } - - first(v) { - if (this.intervals === null || this.intervals.length===0) { - return Token.INVALID_TYPE; - } else { - return this.intervals[0].start; - } - } - - addOne(v) { - this.addInterval(new Interval(v, v + 1)); - } - - addRange(l, h) { - this.addInterval(new Interval(l, h + 1)); - } - - addInterval(toAdd) { - if (this.intervals === null) { - this.intervals = []; - this.intervals.push(toAdd.clone()); - } else { - // find insert pos - for (let pos = 0; pos < this.intervals.length; pos++) { - const existing = this.intervals[pos]; - // distinct range -> insert - if (toAdd.stop < existing.start) { - this.intervals.splice(pos, 0, toAdd); - return; - } - // contiguous range -> adjust - else if (toAdd.stop === existing.start) { - this.intervals[pos] = new Interval(toAdd.start, existing.stop) - return; - } - // overlapping range -> adjust and reduce - else if (toAdd.start <= existing.stop) { - this.intervals[pos] = new Interval(Math.min(existing.start, toAdd.start), Math.max(existing.stop, toAdd.stop)); - this.reduce(pos); - return; - } - } - // greater than any existing - this.intervals.push(toAdd.clone()); - } - } - - addSet(other) { - if (other.intervals !== null) { - other.intervals.forEach( toAdd => this.addInterval(toAdd), this); - } - return this; - } - - reduce(pos) { - // only need to reduce if pos is not the last - if (pos < this.intervals.length - 1) { - const current = this.intervals[pos]; - const next = this.intervals[pos + 1]; - // if next contained in current - if (current.stop >= next.stop) { - this.intervals.splice(pos + 1, 1); - this.reduce(pos); - } else if (current.stop >= next.start) { - this.intervals[pos] = new Interval(current.start, next.stop); - this.intervals.splice(pos + 1, 1); - } - } - } - - complement(start, stop) { - const result = new IntervalSet(); - result.addInterval(new Interval(start, stop + 1)); - if(this.intervals !== null) - this.intervals.forEach(toRemove => result.removeRange(toRemove)); - return result; - } - - contains(item) { - if (this.intervals === null) { - return false; - } else { - for (let k = 0; k < this.intervals.length; k++) { - if(this.intervals[k].contains(item)) { - return true; - } - } - return false; - } - } - - removeRange(toRemove) { - if(toRemove.start===toRemove.stop-1) { - this.removeOne(toRemove.start); - } else if (this.intervals !== null) { - let pos = 0; - for(let n=0; nThis is a one way link. It emanates from a state (usually via a list of - * transitions) and has a target state.
- * - *Since we never have to change the ATN transitions once we construct it, - * we can fix these transitions as specific classes. The DFA transitions - * on the other hand need to update the labels as it adds transitions to - * the states. We'll use the term Edge for the DFA to distinguish them from - * ATN transitions.
- */ -class Transition { - constructor(target) { - // The target of this transition. - if (target===undefined || target===null) { - throw "target cannot be null."; - } - this.target = target; - // Are we epsilon, action, sempred? - this.isEpsilon = false; - this.label = null; - } -} - -// constants for serialization - -Transition.EPSILON = 1; -Transition.RANGE = 2; -Transition.RULE = 3; -// e.g., {isType(input.LT(1))}? -Transition.PREDICATE = 4; -Transition.ATOM = 5; -Transition.ACTION = 6; -// ~(A|B) or ~atom, wildcard, which convert to next 2 -Transition.SET = 7; -Transition.NOT_SET = 8; -Transition.WILDCARD = 9; -Transition.PRECEDENCE = 10; - -Transition.serializationNames = [ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE" - ]; - -Transition.serializationTypes = { - EpsilonTransition: Transition.EPSILON, - RangeTransition: Transition.RANGE, - RuleTransition: Transition.RULE, - PredicateTransition: Transition.PREDICATE, - AtomTransition: Transition.ATOM, - ActionTransition: Transition.ACTION, - SetTransition: Transition.SET, - NotSetTransition: Transition.NOT_SET, - WildcardTransition: Transition.WILDCARD, - PrecedencePredicateTransition: Transition.PRECEDENCE - }; - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/RuleTransition.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class RuleTransition extends Transition { - constructor(ruleStart, ruleIndex, precedence, followState) { - super(ruleStart); - // ptr to the rule definition object for this rule ref - this.ruleIndex = ruleIndex; - this.precedence = precedence; - // what node to begin computations following ref to rule - this.followState = followState; - this.serializationType = Transition.RULE; - this.isEpsilon = true; - } - - matches(symbol, minVocabSymbol, maxVocabSymbol) { - return false; - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/SetTransition.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -// A transition containing a set of values. - - - - -class SetTransition extends Transition { - constructor(target, set) { - super(target); - this.serializationType = Transition.SET; - if (set !==undefined && set !==null) { - this.label = set; - } else { - this.label = new IntervalSet(); - this.label.addOne(Token.INVALID_TYPE); - } - } - - matches(symbol, minVocabSymbol, maxVocabSymbol) { - return this.label.contains(symbol); - } - - toString() { - return this.label.toString(); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/NotSetTransition.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -class NotSetTransition extends SetTransition { - constructor(target, set) { - super(target, set); - this.serializationType = Transition.NOT_SET; - } - - matches(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && - !super.matches(symbol, minVocabSymbol, maxVocabSymbol); - } - - toString() { - return '~' + super.toString(); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/WildcardTransition.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class WildcardTransition extends Transition { - constructor(target) { - super(target); - this.serializationType = Transition.WILDCARD; - } - - matches(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; - } - - toString() { - return "."; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/AbstractPredicateTransition.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class AbstractPredicateTransition extends Transition { - constructor(target) { - super(target); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/Tree.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -/** - * The basic notion of a tree has a parent, a payload, and a list of children. - * It is the most abstract interface for all the trees used by ANTLR. - */ -class Tree {} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/SyntaxTree.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class SyntaxTree extends Tree { -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/ParseTree.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class ParseTree extends SyntaxTree { -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/RuleNode.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class RuleNode extends ParseTree { - - getRuleContext(){ - throw new Error("missing interface implementation") - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/TerminalNode.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class TerminalNode extends ParseTree { -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/ErrorNode.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class ErrorNode extends TerminalNode { -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/utils/escapeWhitespace.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -function escapeWhitespace(s, escapeSpaces) { - s = s.replace(/\t/g, "\\t") - .replace(/\n/g, "\\n") - .replace(/\r/g, "\\r"); - if (escapeSpaces) { - s = s.replace(/ /g, "\u00B7"); - } - return s; -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/Trees.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - -/** A set of utility routines useful for all kinds of ANTLR trees. */ -const Trees = { - /** - * Print out a whole tree in LISP form. {@link //getNodeText} is used on the - * node payloads to get the text for the nodes. Detect - * parse trees and extract data appropriately. - */ - toStringTree: function(tree, ruleNames, recog) { - ruleNames = ruleNames || null; - recog = recog || null; - if(recog!==null) { - ruleNames = recog.ruleNames; - } - let s = Trees.getNodeText(tree, ruleNames); - s = escapeWhitespace(s, false); - const c = tree.getChildCount(); - if(c===0) { - return s; - } - let res = "(" + s + ' '; - if(c>0) { - s = Trees.toStringTree(tree.getChild(0), ruleNames); - res = res.concat(s); - } - for(let i=1;i- * private int referenceHashCode() { - * int hash = {@link MurmurHash//initialize MurmurHash.initialize}({@link - * //INITIAL_HASH}); - * - * for (int i = 0; i < {@link //size()}; i++) { - * hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent - * getParent}(i)); - * } - * - * for (int i = 0; i < {@link //size()}; i++) { - * hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link - * //getReturnState getReturnState}(i)); - * } - * - * hash = {@link MurmurHash//finish MurmurHash.finish}(hash, 2// {@link - * //size()}); - * return hash; - * } - *- * This means only the {@link //EMPTY} context is in set. - */ - isEmpty() { - return this === PredictionContext.EMPTY; - } - - hasEmptyPath() { - return this.getReturnState(this.length - 1) === PredictionContext.EMPTY_RETURN_STATE; - } - - hashCode() { - return this.cachedHashCode; - } - - updateHashCode(hash) { - hash.update(this.cachedHashCode); - } -} - -/** - * Represents {@code $} in local context prediction, which means wildcard. - * {@code//+x =//}. - */ -PredictionContext.EMPTY = null; - -/** - * Represents {@code $} in an array in full context mode, when {@code $} - * doesn't mean wildcard: {@code $ + x = [$,x]}. Here, - * {@code $} = {@link //EMPTY_RETURN_STATE}. - */ -PredictionContext.EMPTY_RETURN_STATE = 0x7FFFFFFF; - -PredictionContext.globalNodeCount = 1; -PredictionContext.id = PredictionContext.globalNodeCount; - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/ArrayPredictionContext.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -class ArrayPredictionContext extends PredictionContext { - - constructor(parents, returnStates) { - /** - * Parent can be null only if full ctx mode and we make an array - * from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - * null parent and - * returnState == {@link //EMPTY_RETURN_STATE}. - */ - const h = new HashCode(); - h.update(parents, returnStates); - const hashCode = h.finish(); - super(hashCode); - this.parents = parents; - this.returnStates = returnStates; - return this; - } - - isEmpty() { - // since EMPTY_RETURN_STATE can only appear in the last position, we - // don't need to verify that size==1 - return this.returnStates[0] === PredictionContext.EMPTY_RETURN_STATE; - } - - getParent(index) { - return this.parents[index]; - } - - getReturnState(index) { - return this.returnStates[index]; - } - - equals(other) { - if (this === other) { - return true; - } else if (!(other instanceof ArrayPredictionContext)) { - return false; - } else if (this.hashCode() !== other.hashCode()) { - return false; // can't be same if hash is different - } else { - return equalArrays(this.returnStates, other.returnStates) && - equalArrays(this.parents, other.parents); - } - } - - toString() { - if (this.isEmpty()) { - return "[]"; - } else { - let s = "["; - for (let i = 0; i < this.returnStates.length; i++) { - if (i > 0) { - s = s + ", "; - } - if (this.returnStates[i] === PredictionContext.EMPTY_RETURN_STATE) { - s = s + "$"; - continue; - } - s = s + this.returnStates[i]; - if (this.parents[i] !== null) { - s = s + " " + this.parents[i]; - } else { - s = s + "null"; - } - } - return s + "]"; - } - } - - get length(){ - return this.returnStates.length; - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/SingletonPredictionContext.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -class SingletonPredictionContext extends PredictionContext { - - constructor(parent, returnState) { - let hashCode = 0; - const hash = new HashCode(); - if(parent !== null) { - hash.update(parent, returnState); - } else { - hash.update(1); - } - hashCode = hash.finish(); - super(hashCode); - this.parentCtx = parent; - this.returnState = returnState; - } - - getParent(index) { - return this.parentCtx; - } - - getReturnState(index) { - return this.returnState; - } - - equals(other) { - if (this === other) { - return true; - } else if (!(other instanceof SingletonPredictionContext)) { - return false; - } else if (this.hashCode() !== other.hashCode()) { - return false; // can't be same if hash is different - } else { - if(this.returnState !== other.returnState) - return false; - else if(this.parentCtx==null) - return other.parentCtx==null - else - return this.parentCtx.equals(other.parentCtx); - } - } - - toString() { - const up = this.parentCtx === null ? "" : this.parentCtx.toString(); - if (up.length === 0) { - if (this.returnState === PredictionContext.EMPTY_RETURN_STATE) { - return "$"; - } else { - return "" + this.returnState; - } - } else { - return "" + this.returnState + " " + up; - } - } - - get length(){ - return 1; - } - - static create(parent, returnState) { - if (returnState === PredictionContext.EMPTY_RETURN_STATE && parent === null) { - // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY; - } else { - return new SingletonPredictionContext(parent, returnState); - } - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/EmptyPredictionContext.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -class EmptyPredictionContext extends SingletonPredictionContext { - - constructor() { - super(null, PredictionContext.EMPTY_RETURN_STATE); - } - - isEmpty() { - return true; - } - - getParent(index) { - return null; - } - - getReturnState(index) { - return this.returnState; - } - - equals(other) { - return this === other; - } - - toString() { - return "$"; - } -} - - -PredictionContext.EMPTY = new EmptyPredictionContext(); - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/HashMap.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -const HashMap_HASH_KEY_PREFIX = "h-"; - -class HashMap_HashMap { - - constructor(hashFunction, equalsFunction) { - this.data = {}; - this.hashFunction = hashFunction || standardHashCodeFunction; - this.equalsFunction = equalsFunction || standardEqualsFunction; - } - - set(key, value) { - const hashKey = HashMap_HASH_KEY_PREFIX + this.hashFunction(key); - if (hashKey in this.data) { - const entries = this.data[hashKey]; - for (let i = 0; i < entries.length; i++) { - const entry = entries[i]; - if (this.equalsFunction(key, entry.key)) { - const oldValue = entry.value; - entry.value = value; - return oldValue; - } - } - entries.push({key:key, value:value}); - return value; - } else { - this.data[hashKey] = [{key:key, value:value}]; - return value; - } - } - - containsKey(key) { - const hashKey = HashMap_HASH_KEY_PREFIX + this.hashFunction(key); - if(hashKey in this.data) { - const entries = this.data[hashKey]; - for (let i = 0; i < entries.length; i++) { - const entry = entries[i]; - if (this.equalsFunction(key, entry.key)) - return true; - } - } - return false; - } - - get(key) { - const hashKey = HashMap_HASH_KEY_PREFIX + this.hashFunction(key); - if(hashKey in this.data) { - const entries = this.data[hashKey]; - for (let i = 0; i < entries.length; i++) { - const entry = entries[i]; - if (this.equalsFunction(key, entry.key)) - return entry.value; - } - } - return null; - } - - entries() { - return Object.keys(this.data).filter(key => key.startsWith(HashMap_HASH_KEY_PREFIX)).flatMap(key => this.data[key], this); - } - - getKeys() { - return this.entries().map(e => e.key); - } - - getValues() { - return this.entries().map(e => e.value); - } - - toString() { - const ss = this.entries().map(e => '{' + e.key + ':' + e.value + '}'); - return '[' + ss.join(", ") + ']'; - } - - get length() { - return Object.keys(this.data).filter(key => key.startsWith(HashMap_HASH_KEY_PREFIX)).map(key => this.data[key].length, this).reduce((accum, item) => accum + item, 0); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/PredictionContextUtils.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - -/** - * Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. - * Return {@link //EMPTY} if {@code outerContext} is empty or null. - */ -function predictionContextFromRuleContext(atn, outerContext) { - if (outerContext === undefined || outerContext === null) { - outerContext = RuleContext.EMPTY; - } - // if we are in RuleContext of start rule, s, then PredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if (outerContext.parentCtx === null || outerContext === RuleContext.EMPTY) { - return PredictionContext.EMPTY; - } - // If we have a parent, convert it to a PredictionContext graph - const parent = predictionContextFromRuleContext(atn, outerContext.parentCtx); - const state = atn.states[outerContext.invokingState]; - const transition = state.transitions[0]; - return SingletonPredictionContext.create(parent, transition.followState.stateNumber); -} - - -function getCachedPredictionContext(context, contextCache, visited) { - if (context.isEmpty()) { - return context; - } - let existing = visited.get(context) || null; - if (existing !== null) { - return existing; - } - existing = contextCache.get(context); - if (existing !== null) { - visited.set(context, existing); - return existing; - } - let changed = false; - let parents = []; - for (let i = 0; i < parents.length; i++) { - const parent = getCachedPredictionContext(context.getParent(i), contextCache, visited); - if (changed || parent !== context.getParent(i)) { - if (!changed) { - parents = []; - for (let j = 0; j < context.length; j++) { - parents[j] = context.getParent(j); - } - changed = true; - } - parents[i] = parent; - } - } - if (!changed) { - contextCache.add(context); - visited.set(context, context); - return context; - } - let updated = null; - if (parents.length === 0) { - updated = PredictionContext.EMPTY; - } else if (parents.length === 1) { - updated = SingletonPredictionContext.create(parents[0], context - .getReturnState(0)); - } else { - updated = new ArrayPredictionContext(parents, context.returnStates); - } - contextCache.add(updated); - visited.set(updated, updated); - visited.set(context, updated); - - return updated; -} - -function merge(a, b, rootIsWildcard, mergeCache) { - // share same graph if both same - if (a === b) { - return a; - } - if (a instanceof SingletonPredictionContext && b instanceof SingletonPredictionContext) { - return mergeSingletons(a, b, rootIsWildcard, mergeCache); - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if (rootIsWildcard) { - if (a instanceof EmptyPredictionContext) { - return a; - } - if (b instanceof EmptyPredictionContext) { - return b; - } - } - // convert singleton so both are arrays to normalize - if (a instanceof SingletonPredictionContext) { - a = new ArrayPredictionContext([a.getParent()], [a.returnState]); - } - if (b instanceof SingletonPredictionContext) { - b = new ArrayPredictionContext([b.getParent()], [b.returnState]); - } - return mergeArrays(a, b, rootIsWildcard, mergeCache); -} - - -/** - * Merge two {@link ArrayPredictionContext} instances. - * - *
Different tops, different parents.
- *
Shared top, same parents.
- *
Shared top, different parents.
- *
Shared top, all shared parents.
- *
Equal tops, merge parents and reduce top to
- * {@link SingletonPredictionContext}.
- *
Stack tops equal, parents merge is same; return left graph.
- *
Same stack top, parents differ; merge parents giving array node, then
- * remainders of those graphs. A new root node is created to point to the
- * merged parents.
- *
Different stack tops pointing to same parent. Make array node for the
- * root where both element in the root point to the same (original)
- * parent.
- *
Different stack tops pointing to different parents. Make array node for
- * the root where each element points to the corresponding original
- * parent.
- *
These local-context merge operations are used when {@code rootIsWildcard} - * is true.
- * - *{@link //EMPTY} is superset of any graph; return {@link //EMPTY}.
- *
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
- * {@code //EMPTY}; return left graph.
- *
Special case of last merge if local context.
- *
These full-context merge operations are used when {@code rootIsWildcard} - * is false.
- * - * - * - *Must keep all contexts; {@link //EMPTY} in array is a special value (and
- * null parent).
- *
If {@code ctx} is {@code null} and the end of the rule containing - * {@code s} is reached, {@link Token//EPSILON} is added to the result set. - * If {@code ctx} is not {@code null} and the end of the outermost rule is - * reached, {@link Token//EOF} is added to the result set.
- * - * @param s the ATN state - * @param stopState the ATN state to stop at. This can be a - * {@link BlockEndState} to detect epsilon paths through a closure. - * @param ctx the complete parser context, or {@code null} if the context - * should be ignored - * - * @return The set of tokens that can follow {@code s} in the ATN in the - * specified {@code ctx}. - */ - LOOK(s, stopState, ctx) { - const r = new IntervalSet(); - const seeThruPreds = true; // ignore preds; get all lookahead - ctx = ctx || null; - const lookContext = ctx!==null ? predictionContextFromRuleContext(s.atn, ctx) : null; - this._LOOK(s, stopState, lookContext, r, new HashSet(), new BitSet(), seeThruPreds, true); - return r; - } - - /** - * Compute set of tokens that can follow {@code s} in the ATN in the - * specified {@code ctx}. - * - *If {@code ctx} is {@code null} and {@code stopState} or the end of the - * rule containing {@code s} is reached, {@link Token//EPSILON} is added to - * the result set. If {@code ctx} is not {@code null} and {@code addEOF} is - * {@code true} and {@code stopState} or the end of the outermost rule is - * reached, {@link Token//EOF} is added to the result set.
- * - * @param s the ATN state. - * @param stopState the ATN state to stop at. This can be a - * {@link BlockEndState} to detect epsilon paths through a closure. - * @param ctx The outer context, or {@code null} if the outer context should - * not be used. - * @param look The result lookahead set. - * @param lookBusy A set used for preventing epsilon closures in the ATN - * from causing a stack overflow. Outside code should pass - * {@code new CustomizedSetThe {@code skip} command does not have any parameters, so this action is - * implemented as a singleton instance exposed by {@link //INSTANCE}.
- */ -class LexerSkipAction extends LexerAction { - constructor() { - super(LexerActionType.SKIP); - } - - execute(lexer) { - lexer.skip(); - } - - toString() { - return "skip"; - } -} - -// Provides a singleton instance of this parameterless lexer action. -LexerSkipAction.INSTANCE = new LexerSkipAction(); - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerChannelAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Implements the {@code channel} lexer action by calling - * {@link Lexer//setChannel} with the assigned channel. - * Constructs a new {@code channel} action with the specified channel value. - * @param channel The channel value to pass to {@link Lexer//setChannel} - */ -class LexerChannelAction extends LexerAction { - constructor(channel) { - super(LexerActionType.CHANNEL); - this.channel = channel; - } - - /** - *This action is implemented by calling {@link Lexer//setChannel} with the - * value provided by {@link //getChannel}.
- */ - execute(lexer) { - lexer._channel = this.channel; - } - - updateHashCode(hash) { - hash.update(this.actionType, this.channel); - } - - equals(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerChannelAction)) { - return false; - } else { - return this.channel === other.channel; - } - } - - toString() { - return "channel(" + this.channel + ")"; - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerCustomAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Executes a custom lexer action by calling {@link Recognizer//action} with the - * rule and action indexes assigned to the custom action. The implementation of - * a custom action is added to the generated code for the lexer in an override - * of {@link Recognizer//action} when the grammar is compiled. - * - *This class may represent embedded actions created with the {...}
- * syntax in ANTLR 4, as well as actions created for lexer commands where the
- * command argument could not be evaluated when the grammar was compiled.
Custom actions are implemented by calling {@link Lexer//action} with the - * appropriate rule and action indexes.
- */ - execute(lexer) { - lexer.action(null, this.ruleIndex, this.actionIndex); - } - - updateHashCode(hash) { - hash.update(this.actionType, this.ruleIndex, this.actionIndex); - } - - equals(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerCustomAction)) { - return false; - } else { - return this.ruleIndex === other.ruleIndex && this.actionIndex === other.actionIndex; - } - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerMoreAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Implements the {@code more} lexer action by calling {@link Lexer//more}. - * - *The {@code more} command does not have any parameters, so this action is - * implemented as a singleton instance exposed by {@link //INSTANCE}.
- */ -class LexerMoreAction extends LexerAction { - constructor() { - super(LexerActionType.MORE); - } - - /** - *This action is implemented by calling {@link Lexer//popMode}.
- */ - execute(lexer) { - lexer.more(); - } - - toString() { - return "more"; - } -} - -LexerMoreAction.INSTANCE = new LexerMoreAction(); - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerTypeAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Implements the {@code type} lexer action by calling {@link Lexer//setType} - * with the assigned type - */ - -class LexerTypeAction extends LexerAction { - constructor(type) { - super(LexerActionType.TYPE); - this.type = type; - } - - execute(lexer) { - lexer.type = this.type; - } - - updateHashCode(hash) { - hash.update(this.actionType, this.type); - } - - equals(other) { - if(this === other) { - return true; - } else if (! (other instanceof LexerTypeAction)) { - return false; - } else { - return this.type === other.type; - } - } - - toString() { - return "type(" + this.type + ")"; - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerPushModeAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Implements the {@code pushMode} lexer action by calling - * {@link Lexer//pushMode} with the assigned mode - */ -class LexerPushModeAction extends LexerAction { - constructor(mode) { - super(LexerActionType.PUSH_MODE); - this.mode = mode; - } - - /** - *This action is implemented by calling {@link Lexer//pushMode} with the - * value provided by {@link //getMode}.
- */ - execute(lexer) { - lexer.pushMode(this.mode); - } - - updateHashCode(hash) { - hash.update(this.actionType, this.mode); - } - - equals(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerPushModeAction)) { - return false; - } else { - return this.mode === other.mode; - } - } - - toString() { - return "pushMode(" + this.mode + ")"; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerPopModeAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. - * - *The {@code popMode} command does not have any parameters, so this action is - * implemented as a singleton instance exposed by {@link //INSTANCE}.
- */ -class LexerPopModeAction extends LexerAction { - constructor() { - super(LexerActionType.POP_MODE); - } - - /** - *This action is implemented by calling {@link Lexer//popMode}.
- */ - execute(lexer) { - lexer.popMode(); - } - - toString() { - return "popMode"; - } -} - -LexerPopModeAction.INSTANCE = new LexerPopModeAction(); - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerModeAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Implements the {@code mode} lexer action by calling {@link Lexer//mode} with - * the assigned mode - */ -class LexerModeAction extends LexerAction { - constructor(mode) { - super(LexerActionType.MODE); - this.mode = mode; - } - - /** - *This action is implemented by calling {@link Lexer//mode} with the - * value provided by {@link //getMode}.
- */ - execute(lexer) { - lexer.mode(this.mode); - } - - updateHashCode(hash) { - hash.update(this.actionType, this.mode); - } - - equals(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerModeAction)) { - return false; - } else { - return this.mode === other.mode; - } - } - - toString() { - return "mode(" + this.mode + ")"; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/ATNDeserializer.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -const SERIALIZED_VERSION = 4; - -function initArray( length, value) { - const tmp = []; - tmp[length-1] = value; - return tmp.map(function(i) {return value;}); -} - -class ATNDeserializer { - constructor(options) { - - if ( options=== undefined || options === null ) { - options = ATNDeserializationOptions.defaultOptions; - } - this.deserializationOptions = options; - this.stateFactories = null; - this.actionFactories = null; - } - - deserialize(data) { - const legacy = this.reset(data); - this.checkVersion(legacy); - if(legacy) - this.skipUUID(); - const atn = this.readATN(); - this.readStates(atn, legacy); - this.readRules(atn, legacy); - this.readModes(atn); - const sets = []; - this.readSets(atn, sets, this.readInt.bind(this)); - if(legacy) - this.readSets(atn, sets, this.readInt32.bind(this)); - this.readEdges(atn, sets); - this.readDecisions(atn); - this.readLexerActions(atn, legacy); - this.markPrecedenceDecisions(atn); - this.verifyATN(atn); - if (this.deserializationOptions.generateRuleBypassTransitions && atn.grammarType === ATNType.PARSER ) { - this.generateRuleBypassTransitions(atn); - // re-verify after modification - this.verifyATN(atn); - } - return atn; - } - - reset(data) { - const version = data.charCodeAt ? data.charCodeAt(0) : data[0]; - if(version === SERIALIZED_VERSION - 1) { - const adjust = function (c) { - const v = c.charCodeAt(0); - return v > 1 ? v - 2 : v + 65534; - }; - const temp = data.split("").map(adjust); - // don't adjust the first value since that's the version number - temp[0] = data.charCodeAt(0); - this.data = temp; - this.pos = 0; - return true; - } else { - this.data = data - this.pos = 0; - return false; - } - } - - skipUUID() { - let count = 0; - while(count++ < 8) - this.readInt(); - } - - checkVersion(legacy) { - const version = this.readInt(); - if ( !legacy && version !== SERIALIZED_VERSION ) { - throw ("Could not deserialize ATN with version " + version + " (expected " + SERIALIZED_VERSION + ")."); - } - } - - readATN() { - const grammarType = this.readInt(); - const maxTokenType = this.readInt(); - return new ATN(grammarType, maxTokenType); - } - - readStates(atn, legacy) { - let j, pair, stateNumber; - const loopBackStateNumbers = []; - const endStateNumbers = []; - const nstates = this.readInt(); - for(let i=0; iThis method updates {@link //dipsIntoOuterContext} and - * {@link //hasSemanticContext} when necessary.
- */ - add(config, mergeCache) { - if (mergeCache === undefined) { - mergeCache = null; - } - if (this.readOnly) { - throw "This set is readonly"; - } - if (config.semanticContext !== SemanticContext.NONE) { - this.hasSemanticContext = true; - } - if (config.reachesIntoOuterContext > 0) { - this.dipsIntoOuterContext = true; - } - const existing = this.configLookup.add(config); - if (existing === config) { - this.cachedHashCode = -1; - this.configs.push(config); // track order here - return true; - } - // a previous (s,i,pi,_), merge with it and save result - const rootIsWildcard = !this.fullCtx; - const merged = merge(existing.context, config.context, rootIsWildcard, mergeCache); - /** - * no need to check for existing.context, config.context in cache - * since only way to create new graphs is "call rule" and here. We - * cache at both places - */ - existing.reachesIntoOuterContext = Math.max( existing.reachesIntoOuterContext, config.reachesIntoOuterContext); - // make sure to preserve the precedence filter suppression during the merge - if (config.precedenceFilterSuppressed) { - existing.precedenceFilterSuppressed = true; - } - existing.context = merged; // replace context; no need to alt mapping - return true; - } - - getStates() { - const states = new HashSet(); - for (let i = 0; i < this.configs.length; i++) { - states.add(this.configs[i].state); - } - return states; - } - - getPredicates() { - const preds = []; - for (let i = 0; i < this.configs.length; i++) { - const c = this.configs[i].semanticContext; - if (c !== SemanticContext.NONE) { - preds.push(c.semanticContext); - } - } - return preds; - } - - optimizeConfigs(interpreter) { - if (this.readOnly) { - throw "This set is readonly"; - } - if (this.configLookup.length === 0) { - return; - } - for (let i = 0; i < this.configs.length; i++) { - const config = this.configs[i]; - config.context = interpreter.getCachedContext(config.context); - } - } - - addAll(coll) { - for (let i = 0; i < coll.length; i++) { - this.add(coll[i]); - } - return false; - } - - equals(other) { - return this === other || - (other instanceof ATNConfigSet && - equalArrays(this.configs, other.configs) && - this.fullCtx === other.fullCtx && - this.uniqueAlt === other.uniqueAlt && - this.conflictingAlts === other.conflictingAlts && - this.hasSemanticContext === other.hasSemanticContext && - this.dipsIntoOuterContext === other.dipsIntoOuterContext); - } - - hashCode() { - const hash = new HashCode(); - hash.update(this.configs); - return hash.finish(); - } - - updateHashCode(hash) { - if (this.readOnly) { - if (this.cachedHashCode === -1) { - this.cachedHashCode = this.hashCode(); - } - hash.update(this.cachedHashCode); - } else { - hash.update(this.hashCode()); - } - } - - isEmpty() { - return this.configs.length === 0; - } - - contains(item) { - if (this.configLookup === null) { - throw "This method is not implemented for readonly sets."; - } - return this.configLookup.contains(item); - } - - containsFast(item) { - if (this.configLookup === null) { - throw "This method is not implemented for readonly sets."; - } - return this.configLookup.containsFast(item); - } - - clear() { - if (this.readOnly) { - throw "This set is readonly"; - } - this.configs = []; - this.cachedHashCode = -1; - this.configLookup = new HashSet(); - } - - setReadonly(readOnly) { - this.readOnly = readOnly; - if (readOnly) { - this.configLookup = null; // can't mod, no need for lookup cache - } - } - - toString() { - return arrayToString(this.configs) + - (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + - (this.uniqueAlt !== ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + - (this.conflictingAlts !== null ? ",conflictingAlts=" + this.conflictingAlts : "") + - (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : ""); - } - - get items(){ - return this.configs; - } - - get length(){ - return this.configs.length; - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/dfa/DFAState.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - -/** - * A DFA state represents a set of possible ATN configurations. - * As Aho, Sethi, Ullman p. 117 says "The DFA uses its state - * to keep track of all possible states the ATN can be in after - * reading each input symbol. That is to say, after reading - * input a1a2..an, the DFA is in a state that represents the - * subset T of the states of the ATN that are reachable from the - * ATN's start state along some path labeled a1a2..an." - * In conventional NFA→DFA conversion, therefore, the subset T - * would be a bitset representing the set of states the - * ATN could be in. We need to track the alt predicted by each - * state as well, however. More importantly, we need to maintain - * a stack of states, tracking the closure operations as they - * jump from rule to rule, emulating rule invocations (method calls). - * I have to add a stack to simulate the proper lookahead sequences for - * the underlying LL grammar from which the ATN was derived. - * - *I use a set of ATNConfig objects not simple states. An ATNConfig - * is both a state (ala normal conversion) and a RuleContext describing - * the chain of rules (if any) followed to arrive at that state.
- * - *A DFA state may have multiple references to a particular state, - * but with different ATN contexts (with same or different alts) - * meaning that state was reached via a different set of rule invocations.
- */ -class DFAState { - constructor(stateNumber, configs) { - if (stateNumber === null) { - stateNumber = -1; - } - if (configs === null) { - configs = new ATNConfigSet(); - } - this.stateNumber = stateNumber; - this.configs = configs; - /** - * {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) - * {@link Token//EOF} maps to {@code edges[0]}. - */ - this.edges = null; - this.isAcceptState = false; - /** - * if accept state, what ttype do we match or alt do we predict? - * This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link//predicates} - * {@code !=null} or {@link //requiresFullContext}. - */ - this.prediction = 0; - this.lexerActionExecutor = null; - /** - * Indicates that this state was created during SLL prediction that - * discovered a conflict between the configurations in the state. Future - * {@link ParserATNSimulator//execATN} invocations immediately jumped doing - * full context prediction if this field is true. - */ - this.requiresFullContext = false; - /** - * During SLL parsing, this is a list of predicates associated with the - * ATN configurations of the DFA state. When we have predicates, - * {@link //requiresFullContext} is {@code false} since full context - * prediction evaluates predicates - * on-the-fly. If this is not null, then {@link //prediction} is - * {@link ATN//INVALID_ALT_NUMBER}. - * - *We only use these for non-{@link //requiresFullContext} but - * conflicting states. That - * means we know from the context (it's $ or we don't dip into outer - * context) that it's an ambiguity not a conflict.
- * - *This list is computed by {@link - * ParserATNSimulator//predicateDFAState}.
- */ - this.predicates = null; - return this; - } - - /** - * Get the set of all alts mentioned by all ATN configurations in this - * DFA state. - */ - getAltSet() { - const alts = new HashSet(); - if (this.configs !== null) { - for (let i = 0; i < this.configs.length; i++) { - const c = this.configs[i]; - alts.add(c.alt); - } - } - if (alts.length === 0) { - return null; - } else { - return alts; - } - } - - /** - * Two {@link DFAState} instances are equal if their ATN configuration sets - * are the same. This method is used to see if a state already exists. - * - *Because the number of alternatives and number of ATN configurations are - * finite, there is a finite number of DFA states that can be processed. - * This is necessary to show that the algorithm terminates.
- * - *Cannot test the DFA state numbers here because in - * {@link ParserATNSimulator//addDFAState} we need to know if any other state - * exists that has this exact set of ATN configurations. The - * {@link //stateNumber} is irrelevant.
- */ - equals(other) { - // compare set of ATN configurations in this set with other - return this === other || - (other instanceof DFAState && - this.configs.equals(other.configs)); - } - - toString() { - let s = "" + this.stateNumber + ":" + this.configs; - if(this.isAcceptState) { - s = s + "=>"; - if (this.predicates !== null) - s = s + this.predicates; - else - s = s + this.prediction; - } - return s; - } - - hashCode() { - const hash = new HashCode(); - hash.update(this.configs); - return hash.finish(); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/ATNSimulator.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - -class ATNSimulator { - constructor(atn, sharedContextCache) { - /** - * The context cache maps all PredictionContext objects that are == - * to a single cached copy. This cache is shared across all contexts - * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet - * to use only cached nodes/graphs in addDFAState(). We don't want to - * fill this during closure() since there are lots of contexts that - * pop up but are not used ever again. It also greatly slows down closure(). - * - *This cache makes a huge difference in memory and a little bit in speed. - * For the Java grammar on java.*, it dropped the memory requirements - * at the end from 25M to 16M. We don't store any of the full context - * graphs in the DFA because they are limited to local context only, - * but apparently there's a lot of repetition there as well. We optimize - * the config contexts before storing the config set in the DFA states - * by literally rebuilding them with cached subgraphs only.
- * - *I tried a cache for use during closure operations, that was - * whacked after each adaptivePredict(). It cost a little bit - * more time I think and doesn't save on the overall footprint - * so it's not worth the complexity.
- */ - this.atn = atn; - this.sharedContextCache = sharedContextCache; - return this; - } - - getCachedContext(context) { - if (this.sharedContextCache ===null) { - return context; - } - const visited = new HashMap_HashMap(); - return getCachedPredictionContext(context, this.sharedContextCache, visited); - } -} - -// Must distinguish between missing edge and edge we know leads nowhere/// -ATNSimulator.ERROR = new DFAState(0x7FFFFFFF, new ATNConfigSet()); - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/OrderedATNConfigSet.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -class OrderedATNConfigSet extends ATNConfigSet { - constructor() { - super(); - this.configLookup = new HashSet(); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/LexerATNConfig.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -class LexerATNConfig extends ATNConfig { - constructor(params, config) { - super(params, config); - - // This is the backing field for {@link //getLexerActionExecutor}. - const lexerActionExecutor = params.lexerActionExecutor || null; - this.lexerActionExecutor = lexerActionExecutor || (config!==null ? config.lexerActionExecutor : null); - this.passedThroughNonGreedyDecision = config!==null ? this.checkNonGreedyDecision(config, this.state) : false; - this.hashCodeForConfigSet = LexerATNConfig.prototype.hashCode; - this.equalsForConfigSet = LexerATNConfig.prototype.equals; - return this; - } - - updateHashCode(hash) { - hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext, this.passedThroughNonGreedyDecision, this.lexerActionExecutor); - } - - equals(other) { - return this === other || - (other instanceof LexerATNConfig && - this.passedThroughNonGreedyDecision === other.passedThroughNonGreedyDecision && - (this.lexerActionExecutor ? this.lexerActionExecutor.equals(other.lexerActionExecutor) : !other.lexerActionExecutor) && - super.equals(other)); - } - - checkNonGreedyDecision(source, target) { - return source.passedThroughNonGreedyDecision || - (target instanceof DecisionState) && target.nonGreedy; - } -} - - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerIndexedCustomAction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/** - * This implementation of {@link LexerAction} is used for tracking input offsets - * for position-dependent actions within a {@link LexerActionExecutor}. - * - *This action is not serialized as part of the ATN, and is only required for - * position-dependent lexer actions which appear at a location other than the - * end of a rule. For more information about DFA optimizations employed for - * lexer actions, see {@link LexerActionExecutor//append} and - * {@link LexerActionExecutor//fixOffsetBeforeMatch}.
- * - * Constructs a new indexed custom action by associating a character offset - * with a {@link LexerAction}. - * - *Note: This class is only required for lexer actions for which - * {@link LexerAction//isPositionDependent} returns {@code true}.
- * - * @param offset The offset into the input {@link CharStream}, relative to - * the token start index, at which the specified lexer action should be - * executed. - * @param action The lexer action to execute at a particular offset in the - * input {@link CharStream}. - */ - - - -class LexerIndexedCustomAction extends LexerAction { - constructor(offset, action) { - super(action.actionType); - this.offset = offset; - this.action = action; - this.isPositionDependent = true; - } - - /** - *This method calls {@link //execute} on the result of {@link //getAction} - * using the provided {@code lexer}.
- */ - execute(lexer) { - // assume the input stream position was properly set by the calling code - this.action.execute(lexer); - } - - updateHashCode(hash) { - hash.update(this.actionType, this.offset, this.action); - } - - equals(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerIndexedCustomAction)) { - return false; - } else { - return this.offset === other.offset && this.action === other.action; - } - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/LexerActionExecutor.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -class LexerActionExecutor { - /** - * Represents an executor for a sequence of lexer actions which traversed during - * the matching operation of a lexer rule (token). - * - *The executor tracks position information for position-dependent lexer actions - * efficiently, ensuring that actions appearing only at the end of the rule do - * not cause bloating of the {@link DFA} created for the lexer.
- */ - constructor(lexerActions) { - this.lexerActions = lexerActions === null ? [] : lexerActions; - /** - * Caches the result of {@link //hashCode} since the hash code is an element - * of the performance-critical {@link LexerATNConfig//hashCode} operation - */ - this.cachedHashCode = HashCode.hashStuff(lexerActions); // "".join([str(la) for la in - // lexerActions])) - return this; - } - - /** - * Creates a {@link LexerActionExecutor} which encodes the current offset - * for position-dependent lexer actions. - * - *Normally, when the executor encounters lexer actions where - * {@link LexerAction//isPositionDependent} returns {@code true}, it calls - * {@link IntStream//seek} on the input {@link CharStream} to set the input - * position to the end of the current token. This behavior provides - * for efficient DFA representation of lexer actions which appear at the end - * of a lexer rule, even when the lexer rule matches a variable number of - * characters.
- * - *Prior to traversing a match transition in the ATN, the current offset - * from the token start index is assigned to all position-dependent lexer - * actions which have not already been assigned a fixed offset. By storing - * the offsets relative to the token start index, the DFA representation of - * lexer actions which appear in the middle of tokens remains efficient due - * to sharing among tokens of the same length, regardless of their absolute - * position in the input stream.
- * - *If the current executor already has offsets assigned to all - * position-dependent lexer actions, the method returns {@code this}.
- * - * @param offset The current offset to assign to all position-dependent - * lexer actions which do not already have offsets assigned. - * - * @return {LexerActionExecutor} A {@link LexerActionExecutor} which stores input stream offsets - * for all position-dependent lexer actions. - */ - fixOffsetBeforeMatch(offset) { - let updatedLexerActions = null; - for (let i = 0; i < this.lexerActions.length; i++) { - if (this.lexerActions[i].isPositionDependent && - !(this.lexerActions[i] instanceof LexerIndexedCustomAction)) { - if (updatedLexerActions === null) { - updatedLexerActions = this.lexerActions.concat([]); - } - updatedLexerActions[i] = new LexerIndexedCustomAction(offset, - this.lexerActions[i]); - } - } - if (updatedLexerActions === null) { - return this; - } else { - return new LexerActionExecutor(updatedLexerActions); - } - } - - /** - * Execute the actions encapsulated by this executor within the context of a - * particular {@link Lexer}. - * - *This method calls {@link IntStream//seek} to set the position of the - * {@code input} {@link CharStream} prior to calling - * {@link LexerAction//execute} on a position-dependent action. Before the - * method returns, the input position will be restored to the same position - * it was in when the method was invoked.
- * - * @param lexer The lexer instance. - * @param input The input stream which is the source for the current token. - * When this method is called, the current {@link IntStream//index} for - * {@code input} should be the start of the following token, i.e. 1 - * character past the end of the current token. - * @param startIndex The token start index. This value may be passed to - * {@link IntStream//seek} to set the {@code input} position to the beginning - * of the token. - */ - execute(lexer, input, startIndex) { - let requiresSeek = false; - const stopIndex = input.index; - try { - for (let i = 0; i < this.lexerActions.length; i++) { - let lexerAction = this.lexerActions[i]; - if (lexerAction instanceof LexerIndexedCustomAction) { - const offset = lexerAction.offset; - input.seek(startIndex + offset); - lexerAction = lexerAction.action; - requiresSeek = (startIndex + offset) !== stopIndex; - } else if (lexerAction.isPositionDependent) { - input.seek(stopIndex); - requiresSeek = false; - } - lexerAction.execute(lexer); - } - } finally { - if (requiresSeek) { - input.seek(stopIndex); - } - } - } - - hashCode() { - return this.cachedHashCode; - } - - updateHashCode(hash) { - hash.update(this.cachedHashCode); - } - - equals(other) { - if (this === other) { - return true; - } else if (!(other instanceof LexerActionExecutor)) { - return false; - } else if (this.cachedHashCode != other.cachedHashCode) { - return false; - } else if (this.lexerActions.length != other.lexerActions.length) { - return false; - } else { - const numActions = this.lexerActions.length - for (let idx = 0; idx < numActions; ++idx) { - if (!this.lexerActions[idx].equals(other.lexerActions[idx])) { - return false; - } - } - return true; - } - } - - /** - * Creates a {@link LexerActionExecutor} which executes the actions for - * the input {@code lexerActionExecutor} followed by a specified - * {@code lexerAction}. - * - * @param lexerActionExecutor The executor for actions already traversed by - * the lexer while matching a token within a particular - * {@link LexerATNConfig}. If this is {@code null}, the method behaves as - * though it were an empty executor. - * @param lexerAction The lexer action to execute after the actions - * specified in {@code lexerActionExecutor}. - * - * @return {LexerActionExecutor} A {@link LexerActionExecutor} for executing the combine actions - * of {@code lexerActionExecutor} and {@code lexerAction}. - */ - static append(lexerActionExecutor, lexerAction) { - if (lexerActionExecutor === null) { - return new LexerActionExecutor([ lexerAction ]); - } - const lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]); - return new LexerActionExecutor(lexerActions); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/LexerATNSimulator.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - - - - - - - - - -function resetSimState(sim) { - sim.index = -1; - sim.line = 0; - sim.column = -1; - sim.dfaState = null; -} - -class SimState { - constructor() { - resetSimState(this); - } - - reset() { - resetSimState(this); - } -} - -class LexerATNSimulator extends ATNSimulator { - /** - * When we hit an accept state in either the DFA or the ATN, we - * have to notify the character stream to start buffering characters - * via {@link IntStream//mark} and record the current state. The current sim state - * includes the current index into the input, the current line, - * and current character position in that line. Note that the Lexer is - * tracking the starting line and characterization of the token. These - * variables track the "state" of the simulator when it hits an accept state. - * - *We track these variables separately for the DFA and ATN simulation - * because the DFA simulation often has to fail over to the ATN - * simulation. If the ATN simulation fails, we need the DFA to fall - * back to its previously accepted state, if any. If the ATN succeeds, - * then the ATN does the accept and the DFA simulator that invoked it - * can simply return the predicted token type.
- */ - constructor(recog, atn, decisionToDFA, sharedContextCache) { - super(atn, sharedContextCache); - this.decisionToDFA = decisionToDFA; - this.recog = recog; - /** - * The current token's starting index into the character stream. - * Shared across DFA to ATN simulation in case the ATN fails and the - * DFA did not have a previous accept state. In this case, we use the - * ATN-generated exception object - */ - this.startIndex = -1; - // line number 1..n within the input/// - this.line = 1; - /** - * The index of the character relative to the beginning of the line - * 0..n-1 - */ - this.column = 0; - this.mode = Lexer.DEFAULT_MODE; - /** - * Used during DFA/ATN exec to record the most recent accept configuration - * info - */ - this.prevAccept = new SimState(); - } - - copyState(simulator) { - this.column = simulator.column; - this.line = simulator.line; - this.mode = simulator.mode; - this.startIndex = simulator.startIndex; - } - - match(input, mode) { - this.mode = mode; - const mark = input.mark(); - try { - this.startIndex = input.index; - this.prevAccept.reset(); - const dfa = this.decisionToDFA[mode]; - if (dfa.s0 === null) { - return this.matchATN(input); - } else { - return this.execATN(input, dfa.s0); - } - } finally { - input.release(mark); - } - } - - reset() { - this.prevAccept.reset(); - this.startIndex = -1; - this.line = 1; - this.column = 0; - this.mode = Lexer.DEFAULT_MODE; - } - - matchATN(input) { - const startState = this.atn.modeToStartState[this.mode]; - - if (LexerATNSimulator.debug) { - console.log("matchATN mode " + this.mode + " start: " + startState); - } - const old_mode = this.mode; - const s0_closure = this.computeStartState(input, startState); - const suppressEdge = s0_closure.hasSemanticContext; - s0_closure.hasSemanticContext = false; - - const next = this.addDFAState(s0_closure); - if (!suppressEdge) { - this.decisionToDFA[this.mode].s0 = next; - } - - const predict = this.execATN(input, next); - - if (LexerATNSimulator.debug) { - console.log("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString()); - } - return predict; - } - - execATN(input, ds0) { - if (LexerATNSimulator.debug) { - console.log("start state closure=" + ds0.configs); - } - if (ds0.isAcceptState) { - // allow zero-length tokens - this.captureSimState(this.prevAccept, input, ds0); - } - let t = input.LA(1); - let s = ds0; // s is current/from DFA state - - for (; ;) { // while more work - if (LexerATNSimulator.debug) { - console.log("execATN loop starting closure: " + s.configs); - } - - /** - * As we move src->trg, src->trg, we keep track of the previous trg to - * avoid looking up the DFA state again, which is expensive. - * If the previous target was already part of the DFA, we might - * be able to avoid doing a reach operation upon t. If s!=null, - * it means that semantic predicates didn't prevent us from - * creating a DFA state. Once we know s!=null, we check to see if - * the DFA state has an edge already for t. If so, we can just reuse - * it's configuration set; there's no point in re-computing it. - * This is kind of like doing DFA simulation within the ATN - * simulation because DFA simulation is really just a way to avoid - * computing reach/closure sets. Technically, once we know that - * we have a previously added DFA state, we could jump over to - * the DFA simulator. But, that would mean popping back and forth - * a lot and making things more complicated algorithmically. - * This optimization makes a lot of sense for loops within DFA. - * A character will take us back to an existing DFA state - * that already has lots of edges out of it. e.g., .* in comments. - * print("Target for:" + str(s) + " and:" + str(t)) - */ - let target = this.getExistingTargetState(s, t); - // print("Existing:" + str(target)) - if (target === null) { - target = this.computeTargetState(input, s, t); - // print("Computed:" + str(target)) - } - if (target === ATNSimulator.ERROR) { - break; - } - // If this is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if (t !== Token.EOF) { - this.consume(input); - } - if (target.isAcceptState) { - this.captureSimState(this.prevAccept, input, target); - if (t === Token.EOF) { - break; - } - } - t = input.LA(1); - s = target; // flip; current DFA target becomes new src/from state - } - return this.failOrAccept(this.prevAccept, input, s.configs, t); - } - - /** - * Get an existing target state for an edge in the DFA. If the target state - * for the edge has not yet been computed or is otherwise not available, - * this method returns {@code null}. - * - * @param s The current DFA state - * @param t The next input symbol - * @return The existing target DFA state for the given input symbol - * {@code t}, or {@code null} if the target state for this edge is not - * already cached - */ - getExistingTargetState(s, t) { - if (s.edges === null || t < LexerATNSimulator.MIN_DFA_EDGE || t > LexerATNSimulator.MAX_DFA_EDGE) { - return null; - } - - let target = s.edges[t - LexerATNSimulator.MIN_DFA_EDGE]; - if (target === undefined) { - target = null; - } - if (LexerATNSimulator.debug && target !== null) { - console.log("reuse state " + s.stateNumber + " edge to " + target.stateNumber); - } - return target; - } - - /** - * Compute a target state for an edge in the DFA, and attempt to add the - * computed state and corresponding edge to the DFA. - * - * @param input The input stream - * @param s The current DFA state - * @param t The next input symbol - * - * @return The computed target DFA state for the given input symbol - * {@code t}. If {@code t} does not lead to a valid DFA state, this method - * returns {@link //ERROR}. - */ - computeTargetState(input, s, t) { - const reach = new OrderedATNConfigSet(); - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - this.getReachableConfigSet(input, s.configs, reach, t); - - if (reach.items.length === 0) { // we got nowhere on t from s - if (!reach.hasSemanticContext) { - // we got nowhere on t, don't throw out this knowledge; it'd - // cause a failover from DFA later. - this.addDFAEdge(s, t, ATNSimulator.ERROR); - } - // stop when we can't match any more char - return ATNSimulator.ERROR; - } - // Add an edge from s to target DFA found/created for reach - return this.addDFAEdge(s, t, null, reach); - } - - failOrAccept(prevAccept, input, reach, t) { - if (this.prevAccept.dfaState !== null) { - const lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor; - this.accept(input, lexerActionExecutor, this.startIndex, - prevAccept.index, prevAccept.line, prevAccept.column); - return prevAccept.dfaState.prediction; - } else { - // if no accept and EOF is first char, return EOF - if (t === Token.EOF && input.index === this.startIndex) { - return Token.EOF; - } - throw new LexerNoViableAltException(this.recog, input, this.startIndex, reach); - } - } - - /** - * Given a starting configuration set, figure out all ATN configurations - * we can reach upon input {@code t}. Parameter {@code reach} is a return - * parameter. - */ - getReachableConfigSet(input, closure, reach, t) { - // this is used to skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - let skipAlt = ATN.INVALID_ALT_NUMBER; - for (let i = 0; i < closure.items.length; i++) { - const cfg = closure.items[i]; - const currentAltReachedAcceptState = (cfg.alt === skipAlt); - if (currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision) { - continue; - } - if (LexerATNSimulator.debug) { - console.log("testing %s at %s\n", this.getTokenName(t), cfg - .toString(this.recog, true)); - } - for (let j = 0; j < cfg.state.transitions.length; j++) { - const trans = cfg.state.transitions[j]; // for each transition - const target = this.getReachableTarget(trans, t); - if (target !== null) { - let lexerActionExecutor = cfg.lexerActionExecutor; - if (lexerActionExecutor !== null) { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - this.startIndex); - } - const treatEofAsEpsilon = (t === Token.EOF); - const config = new LexerATNConfig({state: target, lexerActionExecutor: lexerActionExecutor}, cfg); - if (this.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEofAsEpsilon)) { - // any remaining configs for this alt have a lower priority - // than the one that just reached an accept state. - skipAlt = cfg.alt; - } - } - } - } - } - - accept(input, lexerActionExecutor, startIndex, index, line, charPos) { - if (LexerATNSimulator.debug) { - console.log("ACTION %s\n", lexerActionExecutor); - } - // seek to after last char in token - input.seek(index); - this.line = line; - this.column = charPos; - if (lexerActionExecutor !== null && this.recog !== null) { - lexerActionExecutor.execute(this.recog, input, startIndex); - } - } - - getReachableTarget(trans, t) { - if (trans.matches(t, 0, Lexer.MAX_CHAR_VALUE)) { - return trans.target; - } else { - return null; - } - } - - computeStartState(input, p) { - const initialContext = PredictionContext.EMPTY; - const configs = new OrderedATNConfigSet(); - for (let i = 0; i < p.transitions.length; i++) { - const target = p.transitions[i].target; - const cfg = new LexerATNConfig({state: target, alt: i + 1, context: initialContext}, null); - this.closure(input, cfg, configs, false, false, false); - } - return configs; - } - - /** - * Since the alternatives within any lexer decision are ordered by - * preference, this method stops pursuing the closure as soon as an accept - * state is reached. After the first accept state is reached by depth-first - * search from {@code config}, all other (potentially reachable) states for - * this rule would have a lower priority. - * - * @return {Boolean} {@code true} if an accept state is reached, otherwise - * {@code false}. - */ - closure(input, config, configs, - currentAltReachedAcceptState, speculative, treatEofAsEpsilon) { - let cfg = null; - if (LexerATNSimulator.debug) { - console.log("closure(" + config.toString(this.recog, true) + ")"); - } - if (config.state instanceof RuleStopState) { - if (LexerATNSimulator.debug) { - if (this.recog !== null) { - console.log("closure at %s rule stop %s\n", this.recog.ruleNames[config.state.ruleIndex], config); - } else { - console.log("closure at rule stop %s\n", config); - } - } - if (config.context === null || config.context.hasEmptyPath()) { - if (config.context === null || config.context.isEmpty()) { - configs.add(config); - return true; - } else { - configs.add(new LexerATNConfig({state: config.state, context: PredictionContext.EMPTY}, config)); - currentAltReachedAcceptState = true; - } - } - if (config.context !== null && !config.context.isEmpty()) { - for (let i = 0; i < config.context.length; i++) { - if (config.context.getReturnState(i) !== PredictionContext.EMPTY_RETURN_STATE) { - const newContext = config.context.getParent(i); // "pop" return state - const returnState = this.atn.states[config.context.getReturnState(i)]; - cfg = new LexerATNConfig({state: returnState, context: newContext}, config); - currentAltReachedAcceptState = this.closure(input, cfg, - configs, currentAltReachedAcceptState, speculative, - treatEofAsEpsilon); - } - } - } - return currentAltReachedAcceptState; - } - // optimization - if (!config.state.epsilonOnlyTransitions) { - if (!currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision) { - configs.add(config); - } - } - for (let j = 0; j < config.state.transitions.length; j++) { - const trans = config.state.transitions[j]; - cfg = this.getEpsilonTarget(input, config, trans, configs, speculative, treatEofAsEpsilon); - if (cfg !== null) { - currentAltReachedAcceptState = this.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEofAsEpsilon); - } - } - return currentAltReachedAcceptState; - } - - // side-effect: can alter configs.hasSemanticContext - getEpsilonTarget(input, config, trans, - configs, speculative, treatEofAsEpsilon) { - let cfg = null; - if (trans.serializationType === Transition.RULE) { - const newContext = SingletonPredictionContext.create(config.context, trans.followState.stateNumber); - cfg = new LexerATNConfig({state: trans.target, context: newContext}, config); - } else if (trans.serializationType === Transition.PRECEDENCE) { - throw "Precedence predicates are not supported in lexers."; - } else if (trans.serializationType === Transition.PREDICATE) { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for this "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // this predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? ; - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since this is when we - // test them, we cannot cash the DFA state target of ID. - - if (LexerATNSimulator.debug) { - console.log("EVAL rule " + trans.ruleIndex + ":" + trans.predIndex); - } - configs.hasSemanticContext = true; - if (this.evaluatePredicate(input, trans.ruleIndex, trans.predIndex, speculative)) { - cfg = new LexerATNConfig({state: trans.target}, config); - } - } else if (trans.serializationType === Transition.ACTION) { - if (config.context === null || config.context.hasEmptyPath()) { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In this case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - const lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor, - this.atn.lexerActions[trans.actionIndex]); - cfg = new LexerATNConfig({state: trans.target, lexerActionExecutor: lexerActionExecutor}, config); - } else { - // ignore actions in referenced rules - cfg = new LexerATNConfig({state: trans.target}, config); - } - } else if (trans.serializationType === Transition.EPSILON) { - cfg = new LexerATNConfig({state: trans.target}, config); - } else if (trans.serializationType === Transition.ATOM || - trans.serializationType === Transition.RANGE || - trans.serializationType === Transition.SET) { - if (treatEofAsEpsilon) { - if (trans.matches(Token.EOF, 0, Lexer.MAX_CHAR_VALUE)) { - cfg = new LexerATNConfig({state: trans.target}, config); - } - } - } - return cfg; - } - - /** - * Evaluate a predicate specified in the lexer. - * - *If {@code speculative} is {@code true}, this method was called before - * {@link //consume} for the matched character. This method should call - * {@link //consume} before evaluating the predicate to ensure position - * sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine}, - * and {@link Lexer//getcolumn}, properly reflect the current - * lexer state. This method should restore {@code input} and the simulator - * to the original state before returning (i.e. undo the actions made by the - * call to {@link //consume}.
- * - * @param input The input stream. - * @param ruleIndex The rule containing the predicate. - * @param predIndex The index of the predicate within the rule. - * @param speculative {@code true} if the current index in {@code input} is - * one character before the predicate's location. - * - * @return {@code true} if the specified predicate evaluates to - * {@code true}. - */ - evaluatePredicate(input, ruleIndex, - predIndex, speculative) { - // assume true if no recognizer was provided - if (this.recog === null) { - return true; - } - if (!speculative) { - return this.recog.sempred(null, ruleIndex, predIndex); - } - const savedcolumn = this.column; - const savedLine = this.line; - const index = input.index; - const marker = input.mark(); - try { - this.consume(input); - return this.recog.sempred(null, ruleIndex, predIndex); - } finally { - this.column = savedcolumn; - this.line = savedLine; - input.seek(index); - input.release(marker); - } - } - - captureSimState(settings, input, dfaState) { - settings.index = input.index; - settings.line = this.line; - settings.column = this.column; - settings.dfaState = dfaState; - } - - addDFAEdge(from_, tk, to, cfgs) { - if (to === undefined) { - to = null; - } - if (cfgs === undefined) { - cfgs = null; - } - if (to === null && cfgs !== null) { - // leading to this call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes this edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to resynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - const suppressEdge = cfgs.hasSemanticContext; - cfgs.hasSemanticContext = false; - - to = this.addDFAState(cfgs); - - if (suppressEdge) { - return to; - } - } - // add the edge - if (tk < LexerATNSimulator.MIN_DFA_EDGE || tk > LexerATNSimulator.MAX_DFA_EDGE) { - // Only track edges within the DFA bounds - return to; - } - if (LexerATNSimulator.debug) { - console.log("EDGE " + from_ + " -> " + to + " upon " + tk); - } - if (from_.edges === null) { - // make room for tokens 1..n and -1 masquerading as index 0 - from_.edges = []; - } - from_.edges[tk - LexerATNSimulator.MIN_DFA_EDGE] = to; // connect - - return to; - } - - /** - * Add a new DFA state if there isn't one with this set of - * configurations already. This method also detects the first - * configuration containing an ATN rule stop state. Later, when - * traversing the DFA, we will know which rule to accept. - */ - addDFAState(configs) { - const proposed = new DFAState(null, configs); - let firstConfigWithRuleStopState = null; - for (let i = 0; i < configs.items.length; i++) { - const cfg = configs.items[i]; - if (cfg.state instanceof RuleStopState) { - firstConfigWithRuleStopState = cfg; - break; - } - } - if (firstConfigWithRuleStopState !== null) { - proposed.isAcceptState = true; - proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor; - proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; - } - const dfa = this.decisionToDFA[this.mode]; - const existing = dfa.states.get(proposed); - if (existing !== null) { - return existing; - } - const newState = proposed; - newState.stateNumber = dfa.states.length; - configs.setReadonly(true); - newState.configs = configs; - dfa.states.add(newState); - return newState; - } - - getDFA(mode) { - return this.decisionToDFA[mode]; - } - -// Get the text matched so far for the current token. - getText(input) { - // index is first lookahead char, don't include. - return input.getText(this.startIndex, input.index - 1); - } - - consume(input) { - const curChar = input.LA(1); - if (curChar === "\n".charCodeAt(0)) { - this.line += 1; - this.column = 0; - } else { - this.column += 1; - } - input.consume(); - } - - getTokenName(tt) { - if (tt === -1) { - return "EOF"; - } else { - return "'" + String.fromCharCode(tt) + "'"; - } - } -} - -LexerATNSimulator.debug = false; -LexerATNSimulator.dfa_debug = false; - -LexerATNSimulator.MIN_DFA_EDGE = 0; -LexerATNSimulator.MAX_DFA_EDGE = 127; // forces unicode to stay in ATN - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/dfa/PredPrediction.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/** - * Map a predicate to a predicted alternative. - */ -class PredPrediction { - constructor(pred, alt) { - this.alt = alt; - this.pred = pred; - } - - toString() { - return "(" + this.pred + ", " + this.alt + ")"; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/AltDict.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -class AltDict { - - constructor() { - this.data = {}; - } - - get(key) { - return this.data["k-" + key] || null; - } - - set(key, value) { - this.data["k-" + key] = value; - } - - values() { - return Object.keys(this.data).filter(key => key.startsWith("k-")).map(key => this.data[key], this); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/PredictionMode.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - - - - - -/** - * This enumeration defines the prediction modes available in ANTLR 4 along with - * utility methods for analyzing configuration sets for conflicts and/or - * ambiguities. - */ -const PredictionMode = { - /** - * The SLL(*) prediction mode. This prediction mode ignores the current - * parser context when making predictions. This is the fastest prediction - * mode, and provides correct results for many grammars. This prediction - * mode is more powerful than the prediction mode provided by ANTLR 3, but - * may result in syntax errors for grammar and input combinations which are - * not SLL. - * - *- * When using this prediction mode, the parser will either return a correct - * parse tree (i.e. the same parse tree that would be returned with the - * {@link //LL} prediction mode), or it will report a syntax error. If a - * syntax error is encountered when using the {@link //SLL} prediction mode, - * it may be due to either an actual syntax error in the input or indicate - * that the particular combination of grammar and input requires the more - * powerful {@link //LL} prediction abilities to complete successfully.
- * - *- * This prediction mode does not provide any guarantees for prediction - * behavior for syntactically-incorrect inputs.
- */ - SLL: 0, - - /** - * The LL(*) prediction mode. This prediction mode allows the current parser - * context to be used for resolving SLL conflicts that occur during - * prediction. This is the fastest prediction mode that guarantees correct - * parse results for all combinations of grammars with syntactically correct - * inputs. - * - *- * When using this prediction mode, the parser will make correct decisions - * for all syntactically-correct grammar and input combinations. However, in - * cases where the grammar is truly ambiguous this prediction mode might not - * report a precise answer for exactly which alternatives are - * ambiguous.
- * - *- * This prediction mode does not provide any guarantees for prediction - * behavior for syntactically-incorrect inputs.
- */ - LL: 1, - - /** - * - * The LL(*) prediction mode with exact ambiguity detection. In addition to - * the correctness guarantees provided by the {@link //LL} prediction mode, - * this prediction mode instructs the prediction algorithm to determine the - * complete and exact set of ambiguous alternatives for every ambiguous - * decision encountered while parsing. - * - *- * This prediction mode may be used for diagnosing ambiguities during - * grammar development. Due to the performance overhead of calculating sets - * of ambiguous alternatives, this prediction mode should be avoided when - * the exact results are not necessary.
- * - *- * This prediction mode does not provide any guarantees for prediction - * behavior for syntactically-incorrect inputs.
- */ - LL_EXACT_AMBIG_DETECTION: 2, - - /** - * - * Computes the SLL prediction termination condition. - * - *- * This method computes the SLL prediction termination condition for both of - * the following cases.
- * - *COMBINED SLL+LL PARSING
- * - *When LL-fallback is enabled upon SLL conflict, correct predictions are - * ensured regardless of how the termination condition is computed by this - * method. Due to the substantially higher cost of LL prediction, the - * prediction should only fall back to LL when the additional lookahead - * cannot lead to a unique SLL prediction.
- * - *Assuming combined SLL+LL parsing, an SLL configuration set with only - * conflicting subsets should fall back to full LL, even if the - * configuration sets don't resolve to the same alternative (e.g. - * {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting - * configuration, SLL could continue with the hopes that more lookahead will - * resolve via one of those non-conflicting configurations.
- * - *Here's the prediction termination rule them: SLL (for SLL+LL parsing) - * stops when it sees only conflicting configuration subsets. In contrast, - * full LL keeps going when there is uncertainty.
- * - *HEURISTIC
- * - *As a heuristic, we stop prediction when we see any conflicting subset - * unless we see a state that only has one alternative associated with it. - * The single-alt-state thing lets prediction continue upon rules like - * (otherwise, it would admit defeat too soon):
- * - *{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}
- * - *When the ATN simulation reaches the state before {@code ';'}, it has a - * DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally - * {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop - * processing this node because alternative to has another way to continue, - * via {@code [6|2|[]]}.
- * - *It also let's us continue for this rule:
- * - *{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}
- * - *After matching input A, we reach the stop state for rule A, state 1. - * State 8 is the state right before B. Clearly alternatives 1 and 2 - * conflict and no amount of further lookahead will separate the two. - * However, alternative 3 will be able to continue and so we do not stop - * working on this state. In the previous example, we're concerned with - * states associated with the conflicting alternatives. Here alt 3 is not - * associated with the conflicting configs, but since we can continue - * looking for input reasonably, don't declare the state done.
- * - *PURE SLL PARSING
- * - *To handle pure SLL parsing, all we have to do is make sure that we - * combine stack contexts for configurations that differ only by semantic - * predicate. From there, we can do the usual SLL termination heuristic.
- * - *PREDICATES IN SLL+LL PARSING
- * - *SLL decisions don't evaluate predicates until after they reach DFA stop - * states because they need to create the DFA cache that works in all - * semantic situations. In contrast, full LL evaluates predicates collected - * during start state computation so it can ignore predicates thereafter. - * This means that SLL termination detection can totally ignore semantic - * predicates.
- * - *Implementation-wise, {@link ATNConfigSet} combines stack contexts but not - * semantic predicate contexts so we might see two configurations like the - * following.
- * - *{@code (s, 1, x, {}), (s, 1, x', {p})}
- * - *Before testing these configurations against others, we have to merge - * {@code x} and {@code x'} (without modifying the existing configurations). - * For example, we test {@code (x+x')==x''} when looking for conflicts in - * the following configurations.
- * - *{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
- * - *If the configuration set has predicates (as indicated by - * {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of - * the configurations to strip out all of the predicates so that a standard - * {@link ATNConfigSet} will merge everything ignoring predicates.
- */ - hasSLLConflictTerminatingPrediction: function( mode, configs) { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to match additional input so we terminate prediction. - // - if (PredictionMode.allConfigsInRuleStopStates(configs)) { - return true; - } - // pure SLL mode parsing - if (mode === PredictionMode.SLL) { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL; costs more time - // since we'll often fail over anyway. - if (configs.hasSemanticContext) { - // dup configs, tossing out semantic predicates - const dup = new ATNConfigSet(); - for(let i=0;iThe basic idea is to split the set of configurations {@code C}, into - * conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with - * non-conflicting configurations. Two configurations conflict if they have - * identical {@link ATNConfig//state} and {@link ATNConfig//context} values - * but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} - * and {@code (s, j, ctx, _)} for {@code i!=j}.
- * - *Reduce these configuration subsets to the set of possible alternatives. - * You can compute the alternative subsets in one pass as follows:
- * - *{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in - * {@code C} holding {@code s} and {@code ctx} fixed.
- * - *Or in pseudo-code, for each configuration {@code c} in {@code C}:
- * - *- * map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not - * alt and not pred - *- * - *
The values in {@code map} are the set of {@code A_s,ctx} sets.
- * - *If {@code |A_s,ctx|=1} then there is no conflict associated with - * {@code s} and {@code ctx}.
- * - *Reduce the subsets to singletons by choosing a minimum of each subset. If - * the union of these alternative subsets is a singleton, then no amount of - * more lookahead will help us. We will always pick that alternative. If, - * however, there is more than one alternative, then we are uncertain which - * alternative to predict and must continue looking for resolution. We may - * or may not discover an ambiguity in the future, even if there are no - * conflicting subsets this round.
- * - *The biggest sin is to terminate early because it means we've made a - * decision but were uncertain as to the eventual outcome. We haven't used - * enough lookahead. On the other hand, announcing a conflict too late is no - * big deal; you will still have the conflict. It's just inefficient. It - * might even look until the end of file.
- * - *No special consideration for semantic predicates is required because - * predicates are evaluated on-the-fly for full LL prediction, ensuring that - * no configuration contains a semantic context during the termination - * check.
- * - *CONFLICTING CONFIGS
- * - *Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict - * when {@code i!=j} but {@code x=x'}. Because we merge all - * {@code (s, i, _)} configurations together, that means that there are at - * most {@code n} configurations associated with state {@code s} for - * {@code n} possible alternatives in the decision. The merged stacks - * complicate the comparison of configuration contexts {@code x} and - * {@code x'}. Sam checks to see if one is a subset of the other by calling - * merge and checking to see if the merged result is either {@code x} or - * {@code x'}. If the {@code x} associated with lowest alternative {@code i} - * is the superset, then {@code i} is the only possible prediction since the - * others resolve to {@code min(i)} as well. However, if {@code x} is - * associated with {@code j>i} then at least one stack configuration for - * {@code j} is not in conflict with alternative {@code i}. The algorithm - * should keep going, looking for more lookahead due to the uncertainty.
- * - *For simplicity, I'm doing a equality check between {@code x} and - * {@code x'} that lets the algorithm continue to consume lookahead longer - * than necessary. The reason I like the equality is of course the - * simplicity but also because that is the test you need to detect the - * alternatives that are actually in conflict.
- * - *CONTINUE/STOP RULE
- * - *Continue if union of resolved alternative sets from non-conflicting and - * conflicting alternative subsets has more than one alternative. We are - * uncertain about which alternative to predict.
- * - *The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which - * alternatives are still in the running for the amount of input we've - * consumed at this point. The conflicting sets let us to strip away - * configurations that won't lead to more states because we resolve - * conflicts to the configuration with a minimum alternate for the - * conflicting set.
- * - *CASES
- * - *EXACT AMBIGUITY DETECTION
- * - *If all states report the same conflicting set of alternatives, then we - * know we have the exact ambiguity set.
- * - *|A_i|>1
and
- * A_i = A_j
for all i, j.
In other words, we continue examining lookahead until all {@code A_i} - * have more than one alternative and all {@code A_i} are the same. If - * {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate - * because the resolved set is {@code {1}}. To determine what the real - * ambiguity is, we have to know whether the ambiguity is between one and - * two or one and three so we keep going. We can only stop prediction when - * we need exact ambiguity detection when the sets look like - * {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
- */ - resolvesToJustOneViableAlt: function(altsets) { - return PredictionMode.getSingleViableAlt(altsets); - }, - - /** - * Determines if every alternative subset in {@code altsets} contains more - * than one alternative. - * - * @param altsets a collection of alternative subsets - * @return {@code true} if every {@link BitSet} in {@code altsets} has - * {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} - */ - allSubsetsConflict: function(altsets) { - return ! PredictionMode.hasNonConflictingAltSet(altsets); - }, - /** - * Determines if any single alternative subset in {@code altsets} contains - * exactly one alternative. - * - * @param altsets a collection of alternative subsets - * @return {@code true} if {@code altsets} contains a {@link BitSet} with - * {@link BitSet//cardinality cardinality} 1, otherwise {@code false} - */ - hasNonConflictingAltSet: function(altsets) { - for(let i=0;i- * map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} - *- */ - getStateToAltMap: function(configs) { - const m = new AltDict(); - configs.items.map(function(c) { - let alts = m.get(c.state); - if (alts === null) { - alts = new BitSet(); - m.set(c.state, alts); - } - alts.add(c.alt); - }); - return m; - }, - - hasStateAssociatedWithOneAlt: function(configs) { - const values = PredictionMode.getStateToAltMap(configs).values(); - for(let i=0;i
- * All of that is done without using the outer context because we want to create - * a DFA that is not dependent upon the rule invocation stack when we do a - * prediction. One DFA works in all contexts. We avoid using context not - * necessarily because it's slower, although it can be, but because of the DFA - * caching problem. The closure routine only considers the rule invocation stack - * created during prediction beginning in the decision rule. For example, if - * prediction occurs without invoking another rule's ATN, there are no context - * stacks in the configurations. When lack of context leads to a conflict, we - * don't know if it's an ambiguity or a weakness in the strong LL(*) parsing - * strategy (versus full LL(*)).
- * - *- * When SLL yields a configuration set with conflict, we rewind the input and - * retry the ATN simulation, this time using full outer context without adding - * to the DFA. Configuration context stacks will be the full invocation stacks - * from the start rule. If we get a conflict using full context, then we can - * definitively say we have a true ambiguity for that input sequence. If we - * don't get a conflict, it implies that the decision is sensitive to the outer - * context. (It is not context-sensitive in the sense of context-sensitive - * grammars.)
- * - *- * The next time we reach this DFA state with an SLL conflict, through DFA - * simulation, we will again retry the ATN simulation using full context mode. - * This is slow because we can't save the results and have to "interpret" the - * ATN each time we get that input.
- * - *- * CACHING FULL CONTEXT PREDICTIONS
- * - *- * We could cache results from full context to predicted alternative easily and - * that saves a lot of time but doesn't work in presence of predicates. The set - * of visible predicates from the ATN start state changes depending on the - * context, because closure can fall off the end of a rule. I tried to cache - * tuples (stack context, semantic context, predicted alt) but it was slower - * than interpreting and much more complicated. Also required a huge amount of - * memory. The goal is not to create the world's fastest parser anyway. I'd like - * to keep this algorithm simple. By launching multiple threads, we can improve - * the speed of parsing across a large number of files.
- * - *- * There is no strict ordering between the amount of input used by SLL vs LL, - * which makes it really hard to build a cache for full context. Let's say that - * we have input A B C that leads to an SLL conflict with full context X. That - * implies that using X we might only use A B but we could also use A B C D to - * resolve conflict. Input A B C D could predict alternative 1 in one position - * in the input and A B C E could predict alternative 2 in another position in - * input. The conflicting SLL configurations could still be non-unique in the - * full context prediction, which would lead us to requiring more input than the - * original A B C. To make a prediction cache work, we have to track the exact - * input used during the previous prediction. That amounts to a cache that maps - * X to a specific DFA for that context.
- * - *- * Something should be done for left-recursive expression predictions. They are - * likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry - * with full LL thing Sam does.
- * - *- * AVOIDING FULL CONTEXT PREDICTION
- * - *- * We avoid doing full context retry when the outer context is empty, we did not - * dip into the outer context by falling off the end of the decision state rule, - * or when we force SLL mode.
- * - *- * As an example of the not dip into outer context case, consider as super - * constructor calls versus function calls. One grammar might look like - * this:
- * - *- * ctorBody - * : '{' superCall? stat* '}' - * ; - *- * - *
- * Or, you might see something like
- * - *- * stat - * : superCall ';' - * | expression ';' - * | ... - * ; - *- * - *
- * In both cases I believe that no closure operations will dip into the outer - * context. In the first case ctorBody in the worst case will stop at the '}'. - * In the 2nd case it should stop at the ';'. Both cases should stay within the - * entry rule and not dip into the outer context.
- * - *- * PREDICATES
- * - *- * Predicates are always evaluated if present in either SLL or LL both. SLL and - * LL simulation deals with predicates differently. SLL collects predicates as - * it performs closure operations like ANTLR v3 did. It delays predicate - * evaluation until it reaches and accept state. This allows us to cache the SLL - * ATN simulation whereas, if we had evaluated predicates on-the-fly during - * closure, the DFA state configuration sets would be different and we couldn't - * build up a suitable DFA.
- * - *- * When building a DFA accept state during ATN simulation, we evaluate any - * predicates and return the sole semantically valid alternative. If there is - * more than 1 alternative, we report an ambiguity. If there are 0 alternatives, - * we throw an exception. Alternatives without predicates act like they have - * true predicates. The simple way to think about it is to strip away all - * alternatives with false predicates and choose the minimum alternative that - * remains.
- * - *- * When we start in the DFA and reach an accept state that's predicated, we test - * those and return the minimum semantically viable alternative. If no - * alternatives are viable, we throw an exception.
- * - *- * During full LL ATN simulation, closure always evaluates predicates and - * on-the-fly. This is crucial to reducing the configuration set size during - * closure. It hits a landmine when parsing with the Java grammar, for example, - * without this on-the-fly evaluation.
- * - *- * SHARING DFA
- * - *- * All instances of the same parser share the same decision DFAs through a - * static field. Each instance gets its own ATN simulator but they share the - * same {@link //decisionToDFA} field. They also share a - * {@link PredictionContextCache} object that makes sure that all - * {@link PredictionContext} objects are shared among the DFA states. This makes - * a big size difference.
- * - *- * THREAD SAFETY
- * - *- * The {@link ParserATNSimulator} locks on the {@link //decisionToDFA} field when - * it adds a new DFA object to that array. {@link //addDFAEdge} - * locks on the DFA for the current decision when setting the - * {@link DFAState//edges} field. {@link //addDFAState} locks on - * the DFA for the current decision when looking up a DFA state to see if it - * already exists. We must make sure that all requests to add DFA states that - * are equivalent result in the same shared DFA object. This is because lots of - * threads will be trying to update the DFA at once. The - * {@link //addDFAState} method also locks inside the DFA lock - * but this time on the shared context cache when it rebuilds the - * configurations' {@link PredictionContext} objects using cached - * subgraphs/nodes. No other locking occurs, even during DFA simulation. This is - * safe as long as we can guarantee that all threads referencing - * {@code s.edge[t]} get the same physical target {@link DFAState}, or - * {@code null}. Once into the DFA, the DFA simulation does not reference the - * {@link DFA//states} map. It follows the {@link DFAState//edges} field to new - * targets. The DFA simulator will either find {@link DFAState//edges} to be - * {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or - * {@code dfa.edges[t]} to be non-null. The - * {@link //addDFAEdge} method could be racing to set the field - * but in either case the DFA simulator works; if {@code null}, and requests ATN - * simulation. It could also race trying to get {@code dfa.edges[t]}, but either - * way it will work because it's not doing a test and set operation.
- * - *- * Starting with SLL then failing to combined SLL/LL (Two-Stage - * Parsing)
- * - *- * Sam pointed out that if SLL does not give a syntax error, then there is no - * point in doing full LL, which is slower. We only have to try LL if we get a - * syntax error. For maximum speed, Sam starts the parser set to pure SLL - * mode with the {@link BailErrorStrategy}:
- * - *- * parser.{@link Parser//getInterpreter() getInterpreter()}.{@link //setPredictionMode setPredictionMode}{@code (}{@link PredictionMode//SLL}{@code )}; - * parser.{@link Parser//setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}()); - *- * - *
- * If it does not get a syntax error, then we're done. If it does get a syntax - * error, we need to retry with the combined SLL/LL strategy.
- * - *- * The reason this works is as follows. If there are no SLL conflicts, then the - * grammar is SLL (at least for that input set). If there is an SLL conflict, - * the full LL analysis must yield a set of viable alternatives which is a - * subset of the alternatives reported by SLL. If the LL set is a singleton, - * then the grammar is LL but not SLL. If the LL set is the same size as the SLL - * set, the decision is SLL. If the LL set has size > 1, then that decision - * is truly ambiguous on the current input. If the LL set is smaller, then the - * SLL conflict resolution might choose an alternative that the full LL would - * rule out as a possibility based upon better context information. If that's - * the case, then the SLL parse will definitely get an error because the full LL - * analysis says it's not viable. If SLL conflict resolution chooses an - * alternative within the LL set, them both SLL and LL would choose the same - * alternative because they both choose the minimum of multiple conflicting - * alternatives.
- * - *- * Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and - * a smaller LL set called s. If s is {@code {2, 3}}, then SLL - * parsing will get an error because SLL will pursue alternative 1. If - * s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will - * choose the same alternative because alternative one is the minimum of either - * set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax - * error. If s is {@code {1}} then SLL will succeed.
- * - *- * Of course, if the input is invalid, then we will get an error for sure in - * both SLL and LL parsing. Erroneous input will therefore require 2 passes over - * the input.
- */ -class ParserATNSimulator extends ATNSimulator { - constructor(parser, atn, decisionToDFA, sharedContextCache) { - super(atn, sharedContextCache); - this.parser = parser; - this.decisionToDFA = decisionToDFA; - // SLL, LL, or LL + exact ambig detection?// - this.predictionMode = atn_PredictionMode.LL; - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - this._input = null; - this._startIndex = 0; - this._outerContext = null; - this._dfa = null; - /** - * Each prediction operation uses a cache for merge of prediction contexts. - * Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - * isn't synchronized but we're ok since two threads shouldn't reuse same - * parser/atnsim object because it can only handle one input at a time. - * This maps graphs a and b to merged result c. (a,b)→c. We can avoid - * the merge if we ever see a and b again. Note that (b,a)→c should - * also be examined during cache lookup. - */ - this.mergeCache = null; - this.debug = false; - this.debug_closure = false; - this.debug_add = false; - this.debug_list_atn_decisions = false; - this.dfa_debug = false; - this.retry_debug = false; - } - - reset() {} - - adaptivePredict(input, decision, outerContext) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("adaptivePredict decision " + decision + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + input.LT(1).line + ":" + - input.LT(1).column); - } - this._input = input; - this._startIndex = input.index; - this._outerContext = outerContext; - - const dfa = this.decisionToDFA[decision]; - this._dfa = dfa; - const m = input.mark(); - const index = input.index; - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - try { - let s0; - if (dfa.precedenceDfa) { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()); - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0; - } - if (s0===null) { - if (outerContext===null) { - outerContext = RuleContext.EMPTY; - } - if (this.debug || this.debug_list_atn_decisions) { - console.log("predictATN decision " + dfa.decision + - " exec LA(1)==" + this.getLookaheadName(input) + - ", outerContext=" + outerContext.toString(this.parser.ruleNames)); - } - - const fullCtx = false; - let s0_closure = this.computeStartState(dfa.atnStartState, RuleContext.EMPTY, fullCtx); - - if( dfa.precedenceDfa) { - // If this is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0_closure; // not used for prediction but useful to know start configs anyway - s0_closure = this.applyPrecedenceFilter(s0_closure); - s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); - dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0); - } else { - s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); - dfa.s0 = s0; - } - } - const alt = this.execATN(dfa, s0, input, index, outerContext); - if (this.debug) { - console.log("DFA after predictATN: " + dfa.toString(this.parser.literalNames, this.parser.symbolicNames)); - } - return alt; - } finally { - this._dfa = null; - this.mergeCache = null; // wack cache after each prediction - input.seek(index); - input.release(m); - } - } - - /** - * Performs ATN simulation to compute a predicted alternative based - * upon the remaining input, but also updates the DFA cache to avoid - * having to traverse the ATN again for the same input sequence. - * - * There are some key conditions we're looking for after computing a new - * set of ATN configs (proposed DFA state): - * if the set is empty, there is no viable alternative for current symbol - * does the state uniquely predict an alternative? - * does the state have a conflict that would prevent us from - * putting it on the work list? - * - * We also have some key operations to do: - * add an edge from previous DFA state to potentially new DFA state, D, - * upon current symbol but only if adding to work list, which means in all - * cases except no viable alternative (and possibly non-greedy decisions?) - * collecting predicates and adding semantic context to DFA accept states - * adding rule context to context-sensitive DFA accept states - * consuming an input symbol - * reporting a conflict - * reporting an ambiguity - * reporting a context sensitivity - * reporting insufficient predicates - * - * cover these cases: - * dead end - * single alt - * single alt + preds - * conflict - * conflict + preds - * - */ - execATN(dfa, s0, input, startIndex, outerContext ) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("execATN decision " + dfa.decision + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + input.LT(1).line + ":" + input.LT(1).column); - } - let alt; - let previousD = s0; - - if (this.debug) { - console.log("s0 = " + s0); - } - let t = input.LA(1); - for(;;) { // while more work - let D = this.getExistingTargetState(previousD, t); - if(D===null) { - D = this.computeTargetState(dfa, previousD, t); - } - if(D===ATNSimulator.ERROR) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision; better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - const e = this.noViableAlt(input, outerContext, previousD.configs, startIndex); - input.seek(startIndex); - alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext); - if(alt!==ATN.INVALID_ALT_NUMBER) { - return alt; - } else { - throw e; - } - } - if(D.requiresFullContext && this.predictionMode !== atn_PredictionMode.SLL) { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - let conflictingAlts = null; - if (D.predicates!==null) { - if (this.debug) { - console.log("DFA state has preds in DFA sim LL failover"); - } - const conflictIndex = input.index; - if(conflictIndex !== startIndex) { - input.seek(startIndex); - } - conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true); - if (conflictingAlts.length===1) { - if(this.debug) { - console.log("Full LL avoided"); - } - return conflictingAlts.minValue(); - } - if (conflictIndex !== startIndex) { - // restore the index so reporting the fallback to full - // context occurs with the index at the correct spot - input.seek(conflictIndex); - } - } - if (this.dfa_debug) { - console.log("ctx sensitive state " + outerContext +" in " + D); - } - const fullCtx = true; - const s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx); - this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index); - alt = this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext); - return alt; - } - if (D.isAcceptState) { - if (D.predicates===null) { - return D.prediction; - } - const stopIndex = input.index; - input.seek(startIndex); - const alts = this.evalSemanticContext(D.predicates, outerContext, true); - if (alts.length===0) { - throw this.noViableAlt(input, outerContext, D.configs, startIndex); - } else if (alts.length===1) { - return alts.minValue(); - } else { - // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. - this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs); - return alts.minValue(); - } - } - previousD = D; - - if (t !== Token.EOF) { - input.consume(); - t = input.LA(1); - } - } - } - - /** - * Get an existing target state for an edge in the DFA. If the target state - * for the edge has not yet been computed or is otherwise not available, - * this method returns {@code null}. - * - * @param previousD The current DFA state - * @param t The next input symbol - * @return The existing target DFA state for the given input symbol - * {@code t}, or {@code null} if the target state for this edge is not - * already cached - */ - getExistingTargetState(previousD, t) { - const edges = previousD.edges; - if (edges===null) { - return null; - } else { - return edges[t + 1] || null; - } - } - - /** - * Compute a target state for an edge in the DFA, and attempt to add the - * computed state and corresponding edge to the DFA. - * - * @param dfa The DFA - * @param previousD The current DFA state - * @param t The next input symbol - * - * @return The computed target DFA state for the given input symbol - * {@code t}. If {@code t} does not lead to a valid DFA state, this method - * returns {@link //ERROR - */ - computeTargetState(dfa, previousD, t) { - const reach = this.computeReachSet(previousD.configs, t, false); - if(reach===null) { - this.addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); - return ATNSimulator.ERROR; - } - // create new target state; we'll add to DFA after it's complete - let D = new DFAState(null, reach); - - const predictedAlt = this.getUniqueAlt(reach); - - if (this.debug) { - const altSubSets = atn_PredictionMode.getConflictingAltSubsets(reach); - console.log("SLL altSubSets=" + arrayToString(altSubSets) + - /*", previous=" + previousD.configs + */ - ", configs=" + reach + - ", predict=" + predictedAlt + - ", allSubsetsConflict=" + - atn_PredictionMode.allSubsetsConflict(altSubSets) + ", conflictingAlts=" + - this.getConflictingAlts(reach)); - } - if (predictedAlt!==ATN.INVALID_ALT_NUMBER) { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true; - D.configs.uniqueAlt = predictedAlt; - D.prediction = predictedAlt; - } else if (atn_PredictionMode.hasSLLConflictTerminatingPrediction(this.predictionMode, reach)) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.conflictingAlts = this.getConflictingAlts(reach); - D.requiresFullContext = true; - // in SLL-only mode, we will stop at this state and return the minimum alt - D.isAcceptState = true; - D.prediction = D.configs.conflictingAlts.minValue(); - } - if (D.isAcceptState && D.configs.hasSemanticContext) { - this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)); - if( D.predicates!==null) { - D.prediction = ATN.INVALID_ALT_NUMBER; - } - } - // all adds to dfa are done after we've created full D state - D = this.addDFAEdge(dfa, previousD, t, D); - return D; - } - - predicateDFAState(dfaState, decisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - const nalts = decisionState.transitions.length; - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - const altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs); - const altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts); - if (altToPred!==null) { - dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred); - dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.prediction = altsToCollectPredsFrom.minValue(); - } - } - -// comes back with reach.uniqueAlt set to a valid alt - execATNWithFullContext(dfa, D, // how far we got before failing over - s0, - input, - startIndex, - outerContext) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("execATNWithFullContext "+s0); - } - const fullCtx = true; - let foundExactAmbig = false; - let reach; - let previous = s0; - input.seek(startIndex); - let t = input.LA(1); - let predictedAlt = -1; - for (;;) { // while more work - reach = this.computeReachSet(previous, t, fullCtx); - if (reach===null) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision; better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - const e = this.noViableAlt(input, outerContext, previous, startIndex); - input.seek(startIndex); - const alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext); - if(alt!==ATN.INVALID_ALT_NUMBER) { - return alt; - } else { - throw e; - } - } - const altSubSets = atn_PredictionMode.getConflictingAltSubsets(reach); - if(this.debug) { - console.log("LL altSubSets=" + altSubSets + ", predict=" + - atn_PredictionMode.getUniqueAlt(altSubSets) + ", resolvesToJustOneViableAlt=" + - atn_PredictionMode.resolvesToJustOneViableAlt(altSubSets)); - } - reach.uniqueAlt = this.getUniqueAlt(reach); - // unique prediction? - if(reach.uniqueAlt!==ATN.INVALID_ALT_NUMBER) { - predictedAlt = reach.uniqueAlt; - break; - } else if (this.predictionMode !== atn_PredictionMode.LL_EXACT_AMBIG_DETECTION) { - predictedAlt = atn_PredictionMode.resolvesToJustOneViableAlt(altSubSets); - if(predictedAlt !== ATN.INVALID_ALT_NUMBER) { - break; - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if (atn_PredictionMode.allSubsetsConflict(altSubSets) && atn_PredictionMode.allSubsetsEqual(altSubSets)) { - foundExactAmbig = true; - predictedAlt = atn_PredictionMode.getSingleViableAlt(altSubSets); - break; - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach; - if( t !== Token.EOF) { - input.consume(); - t = input.LA(1); - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if (reach.uniqueAlt !== ATN.INVALID_ALT_NUMBER ) { - this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index); - return predictedAlt; - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In this case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve this without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - this.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, null, reach); - - return predictedAlt; - } - - computeReachSet(closure, t, fullCtx) { - if (this.debug) { - console.log("in computeReachSet, starting closure: " + closure); - } - if( this.mergeCache===null) { - this.mergeCache = new DoubleDict(); - } - const intermediate = new ATNConfigSet(fullCtx); - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative matching the longest overall sequence is - // chosen when multiple such configurations can match the input. - - let skippedStopStates = null; - - // First figure out where we can reach on input t - for (let i=0; i- * The prediction context must be considered by this filter to address - * situations like the following. - *
- *
- *
- * grammar TA;
- * prog: statement* EOF;
- * statement: letterA | statement letterA 'b' ;
- * letterA: 'a';
- *
- *
- * - * If the above grammar, the ATN state immediately before the token - * reference {@code 'a'} in {@code letterA} is reachable from the left edge - * of both the primary and closure blocks of the left-recursive rule - * {@code statement}. The prediction context associated with each of these - * configurations distinguishes between them, and prevents the alternative - * which stepped out to {@code prog} (and then back in to {@code statement} - * from being eliminated by the filter. - *
- * - * @param configs The configuration set computed by - * {@link //computeStartState} as the start state for the DFA. - * @return The transformed configuration set representing the start state - * for a precedence DFA at a particular precedence level (determined by - * calling {@link Parser//getPrecedence}) - */ - applyPrecedenceFilter(configs) { - let config; - const statesFromAlt1 = []; - const configSet = new ATNConfigSet(configs.fullCtx); - for(let i=0; i- * In some scenarios, the algorithm described above could predict an - * alternative which will result in a {@link FailedPredicateException} in - * the parser. Specifically, this could occur if the only configuration - * capable of successfully parsing to the end of the decision rule is - * blocked by a semantic predicate. By choosing this alternative within - * {@link //adaptivePredict} instead of throwing a - * {@link NoViableAltException}, the resulting - * {@link FailedPredicateException} in the parser will identify the specific - * predicate which is preventing the parser from successfully parsing the - * decision rule, which helps developers identify and correct logic errors - * in semantic predicates. - *
- * - * @param configs The ATN configurations which were valid immediately before - * the {@link //ERROR} state was reached - * @param outerContext The is the \gamma_0 initial parser context from the paper - * or the parser stack at the instant before prediction commences. - * - * @return The value to return from {@link //adaptivePredict}, or - * {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not - * identified and {@link //adaptivePredict} should report an error instead - */ - getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs, outerContext) { - const cfgs = this.splitAccordingToSemanticValidity(configs, outerContext); - const semValidConfigs = cfgs[0]; - const semInvalidConfigs = cfgs[1]; - let alt = this.getAltThatFinishedDecisionEntryRule(semValidConfigs); - if (alt!==ATN.INVALID_ALT_NUMBER) { // semantically/syntactically viable path exists - return alt; - } - // Is there a syntactically valid path with a failed pred? - if (semInvalidConfigs.items.length>0) { - alt = this.getAltThatFinishedDecisionEntryRule(semInvalidConfigs); - if (alt!==ATN.INVALID_ALT_NUMBER) { // syntactically viable path exists - return alt; - } - } - return ATN.INVALID_ALT_NUMBER; - } - - getAltThatFinishedDecisionEntryRule(configs) { - const alts = []; - for(let i=0;iIf {@code D} is {@link //ERROR}, this method returns {@link //ERROR} and - * does not change the DFA.
- * - * @param dfa The dfa - * @param D The DFA state to add - * @return The state stored in the DFA. This will be either the existing - * state if {@code D} is already in the DFA, or {@code D} itself if the - * state was not already present - */ - addDFAState(dfa, D) { - if (D === ATNSimulator.ERROR) { - return D; - } - const existing = dfa.states.get(D); - if(existing!==null) { - return existing; - } - D.stateNumber = dfa.states.length; - if (! D.configs.readOnly) { - D.configs.optimizeConfigs(this); - D.configs.setReadonly(true); - } - dfa.states.add(D); - if (this.debug) { - console.log("adding new DFA state: " + D); - } - return D; - } - - reportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex) { - if (this.debug || this.retry_debug) { - const interval = new Interval(startIndex, stopIndex + 1); - console.log("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + - ", input=" + this.parser.getTokenStream().getText(interval)); - } - if (this.parser!==null) { - this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs); - } - } - - reportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex) { - if (this.debug || this.retry_debug) { - const interval = new Interval(startIndex, stopIndex + 1); - console.log("reportContextSensitivity decision=" + dfa.decision + ":" + configs + - ", input=" + this.parser.getTokenStream().getText(interval)); - } - if (this.parser!==null) { - this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs); - } - } - - // If context sensitive parsing, we know it's ambiguity not conflict// - reportAmbiguity(dfa, D, startIndex, stopIndex, - exact, ambigAlts, configs ) { - if (this.debug || this.retry_debug) { - const interval = new Interval(startIndex, stopIndex + 1); - console.log("reportAmbiguity " + ambigAlts + ":" + configs + - ", input=" + this.parser.getTokenStream().getText(interval)); - } - if (this.parser!==null) { - this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs); - } - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/index.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - -/* harmony default export */ const atn = ({ ATN: ATN, ATNDeserializer: ATNDeserializer, LexerATNSimulator: LexerATNSimulator, ParserATNSimulator: ParserATNSimulator, PredictionMode: atn_PredictionMode }); - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/dfa/DFASerializer.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -/** - * A DFA walker that knows how to dump them to serialized strings. - */ -class DFASerializer { - constructor(dfa, literalNames, symbolicNames) { - this.dfa = dfa; - this.literalNames = literalNames || []; - this.symbolicNames = symbolicNames || []; - } - - toString() { - if(this.dfa.s0 === null) { - return null; - } - let buf = ""; - const states = this.dfa.sortedStates(); - for(let i=0; iThe default implementation simply calls {@link //endErrorCondition} to - * ensure that the handler is not in error recovery mode.
- */ - reset(recognizer) { - this.endErrorCondition(recognizer); - } - - /** - * This method is called to enter error recovery mode when a recognition - * exception is reported. - * - * @param recognizer the parser instance - */ - beginErrorCondition(recognizer) { - this.errorRecoveryMode = true; - } - - inErrorRecoveryMode(recognizer) { - return this.errorRecoveryMode; - } - - /** - * This method is called to leave error recovery mode after recovering from - * a recognition exception. - * @param recognizer - */ - endErrorCondition(recognizer) { - this.errorRecoveryMode = false; - this.lastErrorStates = null; - this.lastErrorIndex = -1; - } - - /** - * {@inheritDoc} - *The default implementation simply calls {@link //endErrorCondition}.
- */ - reportMatch(recognizer) { - this.endErrorCondition(recognizer); - } - - /** - * {@inheritDoc} - * - *The default implementation returns immediately if the handler is already - * in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} - * and dispatches the reporting task based on the runtime type of {@code e} - * according to the following table.
- * - *The default implementation resynchronizes the parser by consuming tokens - * until we find one in the resynchronization set--loosely the set of tokens - * that can follow the current rule.
- * - */ - recover(recognizer, e) { - if (this.lastErrorIndex===recognizer.getInputStream().index && - this.lastErrorStates !== null && this.lastErrorStates.indexOf(recognizer.state)>=0) { - // uh oh, another error at same token index and previously-visited - // state in ATN; must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop; this is a failsafe. - recognizer.consume(); - } - this.lastErrorIndex = recognizer._input.index; - if (this.lastErrorStates === null) { - this.lastErrorStates = []; - } - this.lastErrorStates.push(recognizer.state); - const followSet = this.getErrorRecoverySet(recognizer) - this.consumeUntil(recognizer, followSet); - } - - /** - * The default implementation of {@link ANTLRErrorStrategy//sync} makes sure - * that the current lookahead symbol is consistent with what were expecting - * at this point in the ATN. You can call this anytime but ANTLR only - * generates code to check before subrules/loops and each iteration. - * - *Implements Jim Idle's magic sync mechanism in closures and optional - * subrules. E.g.,
- * - *- * a : sync ( stuff sync )* ; - * sync : {consume to what can follow sync} ; - *- * - * At the start of a sub rule upon error, {@link //sync} performs single - * token deletion, if possible. If it can't do that, it bails on the current - * rule and uses the default error recovery, which consumes until the - * resynchronization set of the current rule. - * - *
If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block - * with an empty alternative), then the expected set includes what follows - * the subrule.
- * - *During loop iteration, it consumes until it sees a token that can start a - * sub rule or what follows loop. Yes, that is pretty aggressive. We opt to - * stay in the loop as long as possible.
- * - *ORIGINS
- * - *Previous versions of ANTLR did a poor job of their recovery within loops. - * A single mismatch token or missing token would force the parser to bail - * out of the entire rules surrounding the loop. So, for rule
- * - *- * classDef : 'class' ID '{' member* '}' - *- * - * input with an extra token between members would force the parser to - * consume until it found the next class definition rather than the next - * member definition of the current class. - * - *
This functionality cost a little bit of effort because the parser has to - * compare token set at the start of the loop and at each iteration. If for - * some reason speed is suffering for you, you can turn off this - * functionality by simply overriding this method as a blank { }.
- * - */ - sync(recognizer) { - // If already recovering, don't try to sync - if (this.inErrorRecoveryMode(recognizer)) { - return; - } - const s = recognizer._interp.atn.states[recognizer.state]; - const la = recognizer.getTokenStream().LA(1); - // try cheaper subset first; might get lucky. seems to shave a wee bit off - const nextTokens = recognizer.atn.nextTokens(s); - if(nextTokens.contains(la)) { - this.nextTokensContext = null; - this.nextTokenState = ATNState.INVALID_STATE_NUMBER; - return; - } else if (nextTokens.contains(Token.EPSILON)) { - if(this.nextTokensContext === null) { - // It's possible the next token won't match information tracked - // by sync is restricted for performance. - this.nextTokensContext = recognizer._ctx; - this.nextTokensState = recognizer._stateNumber; - } - return; - } - switch (s.stateType) { - case ATNState.BLOCK_START: - case ATNState.STAR_BLOCK_START: - case ATNState.PLUS_BLOCK_START: - case ATNState.STAR_LOOP_ENTRY: - // report error and recover if possible - if( this.singleTokenDeletion(recognizer) !== null) { - return; - } else { - throw new InputMismatchException(recognizer); - } - case ATNState.PLUS_LOOP_BACK: - case ATNState.STAR_LOOP_BACK: - { - this.reportUnwantedToken(recognizer); - const expecting = new IntervalSet(); - expecting.addSet(recognizer.getExpectedTokens()); - const whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)); - this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule); - } - break; - default: - // do nothing if we can't identify the exact kind of ATN state - } - } - - /** - * This is called by {@link //reportError} when the exception is a - * {@link NoViableAltException}. - * - * @see //reportError - * - * @param recognizer the parser instance - * @param e the recognition exception - */ - reportNoViableAlternative(recognizer, e) { - const tokens = recognizer.getTokenStream() - let input - if(tokens !== null) { - if (e.startToken.type===Token.EOF) { - input = "This method is called when {@link //singleTokenDeletion} identifies - * single-token deletion as a viable recovery strategy for a mismatched - * input error.
- * - *The default implementation simply returns if the handler is already in - * error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to - * enter error recovery mode, followed by calling - * {@link Parser//notifyErrorListeners}.
- * - * @param recognizer the parser instance - * - */ - reportUnwantedToken(recognizer) { - if (this.inErrorRecoveryMode(recognizer)) { - return; - } - this.beginErrorCondition(recognizer); - const t = recognizer.getCurrentToken() - const tokenName = this.getTokenErrorDisplay(t) - const expecting = this.getExpectedTokens(recognizer) - const msg = "extraneous input " + tokenName + " expecting " + - expecting.toString(recognizer.literalNames, recognizer.symbolicNames) - recognizer.notifyErrorListeners(msg, t, null); - } - - /** - * This method is called to report a syntax error which requires the - * insertion of a missing token into the input stream. At the time this - * method is called, the missing token has not yet been inserted. When this - * method returns, {@code recognizer} is in error recovery mode. - * - *This method is called when {@link //singleTokenInsertion} identifies - * single-token insertion as a viable recovery strategy for a mismatched - * input error.
- * - *The default implementation simply returns if the handler is already in - * error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to - * enter error recovery mode, followed by calling - * {@link Parser//notifyErrorListeners}.
- * - * @param recognizer the parser instance - */ - reportMissingToken(recognizer) { - if ( this.inErrorRecoveryMode(recognizer)) { - return; - } - this.beginErrorCondition(recognizer); - const t = recognizer.getCurrentToken() - const expecting = this.getExpectedTokens(recognizer) - const msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + - " at " + this.getTokenErrorDisplay(t) - recognizer.notifyErrorListeners(msg, t, null); - } - - /** - *The default implementation attempts to recover from the mismatched input - * by using single token insertion and deletion as described below. If the - * recovery attempt fails, this method throws an - * {@link InputMismatchException}.
- * - *EXTRA TOKEN (single token deletion)
- * - *{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the - * right token, however, then assume {@code LA(1)} is some extra spurious - * token and delete it. Then consume and return the next token (which was - * the {@code LA(2)} token) as the successful result of the match operation.
- * - *This recovery strategy is implemented by {@link - * //singleTokenDeletion}.
- * - *MISSING TOKEN (single token insertion)
- * - *If current token (at {@code LA(1)}) is consistent with what could come - * after the expected {@code LA(1)} token, then assume the token is missing - * and use the parser's {@link TokenFactory} to create it on the fly. The - * "insertion" is performed by returning the created token as the successful - * result of the match operation.
- * - *This recovery strategy is implemented by {@link - * //singleTokenInsertion}.
- * - *EXAMPLE
- * - *For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When - * the parser returns from the nested call to {@code expr}, it will have - * call chain:
- * - *- * stat → expr → atom - *- * - * and it will be trying to match the {@code ')'} at this point in the - * derivation: - * - *
- * => ID '=' '(' INT ')' ('+' atom)* ';' - * ^ - *- * - * The attempt to match {@code ')'} will fail when it sees {@code ';'} and - * call {@link //recoverInline}. To recover, it sees that {@code LA(1)==';'} - * is in the set of tokens that can follow the {@code ')'} token reference - * in rule {@code atom}. It can assume that you forgot the {@code ')'}. - */ - recoverInline(recognizer) { - // SINGLE TOKEN DELETION - const matchedSymbol = this.singleTokenDeletion(recognizer) - if (matchedSymbol !== null) { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.consume(); - return matchedSymbol; - } - // SINGLE TOKEN INSERTION - if (this.singleTokenInsertion(recognizer)) { - return this.getMissingSymbol(recognizer); - } - // even that didn't work; must throw the exception - throw new InputMismatchException(recognizer); - } - - /** - * This method implements the single-token insertion inline error recovery - * strategy. It is called by {@link //recoverInline} if the single-token - * deletion strategy fails to recover from the mismatched input. If this - * method returns {@code true}, {@code recognizer} will be in error recovery - * mode. - * - *
This method determines whether or not single-token insertion is viable by - * checking if the {@code LA(1)} input symbol could be successfully matched - * if it were instead the {@code LA(2)} symbol. If this method returns - * {@code true}, the caller is responsible for creating and inserting a - * token with the correct type to produce this behavior.
- * - * @param recognizer the parser instance - * @return {@code true} if single-token insertion is a viable recovery - * strategy for the current mismatched input, otherwise {@code false} - */ - singleTokenInsertion(recognizer) { - const currentSymbolType = recognizer.getTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token; error recovery - // is free to conjure up and insert the missing token - const atn = recognizer._interp.atn - const currentState = atn.states[recognizer.state] - const next = currentState.transitions[0].target - const expectingAtLL2 = atn.nextTokens(next, recognizer._ctx) - if (expectingAtLL2.contains(currentSymbolType) ){ - this.reportMissingToken(recognizer); - return true; - } else { - return false; - } - } - - /** - * This method implements the single-token deletion inline error recovery - * strategy. It is called by {@link //recoverInline} to attempt to recover - * from mismatched input. If this method returns null, the parser and error - * handler state will not have changed. If this method returns non-null, - * {@code recognizer} will not be in error recovery mode since the - * returned token was a successful match. - * - *If the single-token deletion is successful, this method calls - * {@link //reportUnwantedToken} to report the error, followed by - * {@link Parser//consume} to actually "delete" the extraneous token. Then, - * before returning {@link //reportMatch} is called to signal a successful - * match.
- * - * @param recognizer the parser instance - * @return the successfully matched {@link Token} instance if single-token - * deletion successfully recovers from the mismatched input, otherwise - * {@code null} - */ - singleTokenDeletion(recognizer) { - const nextTokenType = recognizer.getTokenStream().LA(2) - const expecting = this.getExpectedTokens(recognizer) - if (expecting.contains(nextTokenType)) { - this.reportUnwantedToken(recognizer); - // print("recoverFromMismatchedToken deleting " \ - // + str(recognizer.getTokenStream().LT(1)) \ - // + " since " + str(recognizer.getTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.consume(); // simply delete extra token - // we want to return the token we're actually matching - const matchedSymbol = recognizer.getCurrentToken() - this.reportMatch(recognizer); // we know current token is correct - return matchedSymbol; - } else { - return null; - } - } - - /** - * Conjure up a missing token during error recovery. - * - * The recognizer attempts to recover from single missing - * symbols. But, actions might refer to that missing symbol. - * For example, x=ID {f($x);}. The action clearly assumes - * that there has been an identifier matched previously and that - * $x points at that token. If that token is missing, but - * the next token in the stream is what we want we assume that - * this token is missing and we keep going. Because we - * have to return some token to replace the missing token, - * we have to conjure one up. This method gives the user control - * over the tokens returned for missing tokens. Mostly, - * you will want to create something special for identifier - * tokens. For literals such as '{' and ',', the default - * action in the parser or tree parser works. It simply creates - * a CommonToken of the appropriate type. The text will be the token. - * If you change what tokens must be created by the lexer, - * override this method to create the appropriate tokens. - * - */ - getMissingSymbol(recognizer) { - const currentSymbol = recognizer.getCurrentToken() - const expecting = this.getExpectedTokens(recognizer) - const expectedTokenType = expecting.first() // get any element - let tokenText - if (expectedTokenType===Token.EOF) { - tokenText = "- * This error strategy is useful in the following scenarios.
- * - *- * {@code myparser.setErrorHandler(new BailErrorStrategy());}
- * - * @see Parser//setErrorHandler(ANTLRErrorStrategy) - * */ -class BailErrorStrategy extends DefaultErrorStrategy { - - constructor() { - super(); - } - - /** - * Instead of recovering from exception {@code e}, re-throw it wrapped - * in a {@link ParseCancellationException} so it is not caught by the - * rule function catches. Use {@link Exception//getCause()} to get the - * original {@link RecognitionException}. - */ - recover(recognizer, e) { - let context = recognizer._ctx - while (context !== null) { - context.exception = e; - context = context.parentCtx; - } - throw new ParseCancellationException(e); - } - - /** - * Make sure we don't attempt to recover inline; if the parser - * successfully recovers, it won't throw an exception. - */ - recoverInline(recognizer) { - this.recover(recognizer, new InputMismatchException(recognizer)); - } - -// Make sure we don't attempt to recover from problems in subrules.// - sync(recognizer) { - // pass - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/index.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - - - - - -/* harmony default export */ const error = ({ - RecognitionException: RecognitionException, NoViableAltException: NoViableAltException, LexerNoViableAltException: LexerNoViableAltException, InputMismatchException: InputMismatchException, FailedPredicateException: FailedPredicateException, - DiagnosticErrorListener: DiagnosticErrorListener, BailErrorStrategy: BailErrorStrategy, DefaultErrorStrategy: DefaultErrorStrategy, ErrorListener: ErrorListener -}); - -// EXTERNAL MODULE: fs (ignored) -var fs_ignored_ = __webpack_require__(262); -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/CharStreams.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -/** - * Utility functions to create InputStreams from various sources. - * - * All returned InputStreams support the full range of Unicode - * up to U+10FFFF (the default behavior of InputStream only supports - * code points up to U+FFFF). - */ -/* harmony default export */ const CharStreams = ({ - // Creates an InputStream from a string. - fromString: function(str) { - return new InputStream(str, true); - }, - - /** - * Asynchronously creates an InputStream from a blob given the - * encoding of the bytes in that blob (defaults to 'utf8' if - * encoding is null). - * - * Invokes onLoad(result) on success, onError(error) on - * failure. - */ - fromBlob: function(blob, encoding, onLoad, onError) { - const reader = new window.FileReader(); - reader.onload = function(e) { - const is = new InputStream(e.target.result, true); - onLoad(is); - }; - reader.onerror = onError; - reader.readAsText(blob, encoding); - }, - - /** - * Creates an InputStream from a Buffer given the - * encoding of the bytes in that buffer (defaults to 'utf8' if - * encoding is null). - */ - fromBuffer: function(buffer, encoding) { - return new InputStream(buffer.toString(encoding), true); - }, - - /** Asynchronously creates an InputStream from a file on disk given - * the encoding of the bytes in that file (defaults to 'utf8' if - * encoding is null). - * - * Invokes callback(error, result) on completion. - */ - fromPath: function(path, encoding, callback) { - fs_ignored_.readFile(path, encoding, function(err, data) { - let is = null; - if (data !== null) { - is = new InputStream(data, true); - } - callback(err, is); - }); - }, - - /** - * Synchronously creates an InputStream given a path to a file - * on disk and the encoding of the bytes in that file (defaults to - * 'utf8' if encoding is null). - */ - fromPathSync: function(path, encoding) { - const data = fs_ignored_.readFileSync(path, encoding); - return new InputStream(data, true); - } -}); - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/FileStream.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -/** - * This is an InputStream that is loaded from a file all at once - * when you construct the object. - */ -class FileStream extends InputStream { - constructor(fileName, decodeToUnicodeCodePoints) { - const data = fs_ignored_.readFileSync(fileName, "utf8"); - super(data, decodeToUnicodeCodePoints); - this.fileName = fileName; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/TraceListener.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - -class TraceListener extends ParseTreeListener { - constructor(parser) { - super(); - this.parser = parser; - } - - enterEveryRule(ctx) { - console.log("enter " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); - } - - visitTerminal(node) { - console.log("consume " + node.symbol + " rule " + this.parser.ruleNames[this.parser._ctx.ruleIndex]); - } - - exitEveryRule(ctx) { - console.log("exit " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Parser.js -/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - - - - - - - -class Parser extends Recognizer { - /** - * this is all the parsing support code essentially; most of it is error - * recovery stuff. - */ - constructor(input) { - super(); - // The input stream. - this._input = null; - /** - * The error handling strategy for the parser. The default value is a new - * instance of {@link DefaultErrorStrategy}. - */ - this._errHandler = new DefaultErrorStrategy(); - this._precedenceStack = []; - this._precedenceStack.push(0); - /** - * The {@link ParserRuleContext} object for the currently executing rule. - * this is always non-null during the parsing process. - */ - this._ctx = null; - /** - * Specifies whether or not the parser should construct a parse tree during - * the parsing process. The default value is {@code true}. - */ - this.buildParseTrees = true; - /** - * When {@link //setTrace}{@code (true)} is called, a reference to the - * {@link TraceListener} is stored here so it can be easily removed in a - * later call to {@link //setTrace}{@code (false)}. The listener itself is - * implemented as a parser listener so this field is not directly used by - * other parser methods. - */ - this._tracer = null; - /** - * The list of {@link ParseTreeListener} listeners registered to receive - * events during the parse. - */ - this._parseListeners = null; - /** - * The number of syntax errors reported during parsing. this value is - * incremented each time {@link //notifyErrorListeners} is called. - */ - this._syntaxErrors = 0; - this.setInputStream(input); - } - - // reset the parser's state - reset() { - if (this._input !== null) { - this._input.seek(0); - } - this._errHandler.reset(this); - this._ctx = null; - this._syntaxErrors = 0; - this.setTrace(false); - this._precedenceStack = []; - this._precedenceStack.push(0); - if (this._interp !== null) { - this._interp.reset(); - } - } - - /** - * Match current input symbol against {@code ttype}. If the symbol type - * matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are - * called to complete the match process. - * - *If the symbol type does not match, - * {@link ANTLRErrorStrategy//recoverInline} is called on the current error - * strategy to attempt recovery. If {@link //getBuildParseTree} is - * {@code true} and the token index of the symbol returned by - * {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to - * the parse tree by calling {@link ParserRuleContext//addErrorNode}.
- * - * @param ttype the token type to match - * @return the matched symbol - * @throws RecognitionException if the current input symbol did not match - * {@code ttype} and the error strategy could not recover from the - * mismatched symbol - */ - match(ttype) { - let t = this.getCurrentToken(); - if (t.type === ttype) { - this._errHandler.reportMatch(this); - this.consume(); - } else { - t = this._errHandler.recoverInline(this); - if (this.buildParseTrees && t.tokenIndex === -1) { - // we must have conjured up a new token during single token - // insertion - // if it's not the current symbol - this._ctx.addErrorNode(t); - } - } - return t; - } - - /** - * Match current input symbol as a wildcard. If the symbol type matches - * (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch} - * and {@link //consume} are called to complete the match process. - * - *If the symbol type does not match, - * {@link ANTLRErrorStrategy//recoverInline} is called on the current error - * strategy to attempt recovery. If {@link //getBuildParseTree} is - * {@code true} and the token index of the symbol returned by - * {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to - * the parse tree by calling {@link ParserRuleContext//addErrorNode}.
- * - * @return the matched symbol - * @throws RecognitionException if the current input symbol did not match - * a wildcard and the error strategy could not recover from the mismatched - * symbol - */ - matchWildcard() { - let t = this.getCurrentToken(); - if (t.type > 0) { - this._errHandler.reportMatch(this); - this.consume(); - } else { - t = this._errHandler.recoverInline(this); - if (this._buildParseTrees && t.tokenIndex === -1) { - // we must have conjured up a new token during single token - // insertion - // if it's not the current symbol - this._ctx.addErrorNode(t); - } - } - return t; - } - - getParseListeners() { - return this._parseListeners || []; - } - - /** - * Registers {@code listener} to receive events during the parsing process. - * - *To support output-preserving grammar transformations (including but not - * limited to left-recursion removal, automated left-factoring, and - * optimized code generation), calls to listener methods during the parse - * may differ substantially from calls made by - * {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In - * particular, rule entry and exit events may occur in a different order - * during the parse than after the parser. In addition, calls to certain - * rule entry methods may be omitted.
- * - *With the following specific exceptions, calls to listener events are - * deterministic, i.e. for identical input the calls to listener - * methods will be the same.
- * - *If {@code listener} is {@code null} or has not been added as a parse - * listener, this method does nothing.
- * @param listener the listener to remove - */ - removeParseListener(listener) { - if (this._parseListeners !== null) { - const idx = this._parseListeners.indexOf(listener); - if (idx >= 0) { - this._parseListeners.splice(idx, 1); - } - if (this._parseListeners.length === 0) { - this._parseListeners = null; - } - } - } - - // Remove all parse listeners. - removeParseListeners() { - this._parseListeners = null; - } - - // Notify any parse listeners of an enter rule event. - triggerEnterRuleEvent() { - if (this._parseListeners !== null) { - const ctx = this._ctx; - this._parseListeners.forEach(function (listener) { - listener.enterEveryRule(ctx); - ctx.enterRule(listener); - }); - } - } - - /** - * Notify any parse listeners of an exit rule event. - * @see //addParseListener - */ - triggerExitRuleEvent() { - if (this._parseListeners !== null) { - // reverse order walk of listeners - const ctx = this._ctx; - this._parseListeners.slice(0).reverse().forEach(function (listener) { - ctx.exitRule(listener); - listener.exitEveryRule(ctx); - }); - } - } - - getTokenFactory() { - return this._input.tokenSource._factory; - } - - // Tell our token source and error strategy about a new way to create tokens. - setTokenFactory(factory) { - this._input.tokenSource._factory = factory; - } - - /** - * The ATN with bypass alternatives is expensive to create so we create it - * lazily. - * - * @throws UnsupportedOperationException if the current parser does not - * implement the {@link //getSerializedATN()} method. - */ - getATNWithBypassAlts() { - const serializedAtn = this.getSerializedATN(); - if (serializedAtn === null) { - throw "The current parser does not support an ATN with bypass alternatives."; - } - let result = this.bypassAltsAtnCache[serializedAtn]; - if (result === null) { - const deserializationOptions = new ATNDeserializationOptions(); - deserializationOptions.generateRuleBypassTransitions = true; - result = new ATNDeserializer(deserializationOptions) - .deserialize(serializedAtn); - this.bypassAltsAtnCache[serializedAtn] = result; - } - return result; - } - - getInputStream() { - return this.getTokenStream(); - } - - setInputStream(input) { - this.setTokenStream(input); - } - - getTokenStream() { - return this._input; - } - - // Set the token stream and reset the parser. - setTokenStream(input) { - this._input = null; - this.reset(); - this._input = input; - } - - /** - * Match needs to return the current input symbol, which gets put - * into the label for the associated token ref; e.g., x=ID. - */ - getCurrentToken() { - return this._input.LT(1); - } - - notifyErrorListeners(msg, offendingToken, err) { - offendingToken = offendingToken || null; - err = err || null; - if (offendingToken === null) { - offendingToken = this.getCurrentToken(); - } - this._syntaxErrors += 1; - const line = offendingToken.line; - const column = offendingToken.column; - const listener = this.getErrorListenerDispatch(); - listener.syntaxError(this, offendingToken, line, column, msg, err); - } - - /** - * Consume and return the {@linkplain //getCurrentToken current symbol}. - * - *E.g., given the following input with {@code A} being the current - * lookahead symbol, this function moves the cursor to {@code B} and returns - * {@code A}.
- * - *- * A B - * ^ - *- * - * If the parser is not in error recovery mode, the consumed symbol is added - * to the parse tree using {@link ParserRuleContext//addChild(Token)}, and - * {@link ParseTreeListener//visitTerminal} is called on any parse listeners. - * If the parser is in error recovery mode, the consumed symbol is - * added to the parse tree using - * {@link ParserRuleContext//addErrorNode(Token)}, and - * {@link ParseTreeListener//visitErrorNode} is called on any parse - * listeners. - */ - consume() { - const o = this.getCurrentToken(); - if (o.type !== Token.EOF) { - this.getInputStream().consume(); - } - const hasListener = this._parseListeners !== null && this._parseListeners.length > 0; - if (this.buildParseTrees || hasListener) { - let node; - if (this._errHandler.inErrorRecoveryMode(this)) { - node = this._ctx.addErrorNode(o); - } else { - node = this._ctx.addTokenNode(o); - } - node.invokingState = this.state; - if (hasListener) { - this._parseListeners.forEach(function (listener) { - if (node instanceof ErrorNode || (node.isErrorNode !== undefined && node.isErrorNode())) { - listener.visitErrorNode(node); - } else if (node instanceof TerminalNode) { - listener.visitTerminal(node); - } - }); - } - } - return o; - } - - addContextToParseTree() { - // add current context to parent if we have a parent - if (this._ctx.parentCtx !== null) { - this._ctx.parentCtx.addChild(this._ctx); - } - } - - /** - * Always called by generated parsers upon entry to a rule. Access field - * {@link //_ctx} get the current context. - */ - enterRule(localctx, state, ruleIndex) { - this.state = state; - this._ctx = localctx; - this._ctx.start = this._input.LT(1); - if (this.buildParseTrees) { - this.addContextToParseTree(); - } - this.triggerEnterRuleEvent(); - } - - exitRule() { - this._ctx.stop = this._input.LT(-1); - // trigger event on _ctx, before it reverts to parent - this.triggerExitRuleEvent(); - this.state = this._ctx.invokingState; - this._ctx = this._ctx.parentCtx; - } - - enterOuterAlt(localctx, altNum) { - localctx.setAltNumber(altNum); - // if we have new localctx, make sure we replace existing ctx - // that is previous child of parse tree - if (this.buildParseTrees && this._ctx !== localctx) { - if (this._ctx.parentCtx !== null) { - this._ctx.parentCtx.removeLastChild(); - this._ctx.parentCtx.addChild(localctx); - } - } - this._ctx = localctx; - } - - /** - * Get the precedence level for the top-most precedence rule. - * - * @return The precedence level for the top-most precedence rule, or -1 if - * the parser context is not nested within a precedence rule. - */ - getPrecedence() { - if (this._precedenceStack.length === 0) { - return -1; - } else { - return this._precedenceStack[this._precedenceStack.length - 1]; - } - } - - enterRecursionRule(localctx, state, ruleIndex, precedence) { - this.state = state; - this._precedenceStack.push(precedence); - this._ctx = localctx; - this._ctx.start = this._input.LT(1); - this.triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules - } - - // Like {@link //enterRule} but for recursive rules. - pushNewRecursionContext(localctx, state, ruleIndex) { - const previous = this._ctx; - previous.parentCtx = localctx; - previous.invokingState = state; - previous.stop = this._input.LT(-1); - - this._ctx = localctx; - this._ctx.start = previous.start; - if (this.buildParseTrees) { - this._ctx.addChild(previous); - } - this.triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules - } - - unrollRecursionContexts(parentCtx) { - this._precedenceStack.pop(); - this._ctx.stop = this._input.LT(-1); - const retCtx = this._ctx; // save current ctx (return value) - // unroll so _ctx is as it was before call to recursive method - const parseListeners = this.getParseListeners(); - if (parseListeners !== null && parseListeners.length > 0) { - while (this._ctx !== parentCtx) { - this.triggerExitRuleEvent(); - this._ctx = this._ctx.parentCtx; - } - } else { - this._ctx = parentCtx; - } - // hook into tree - retCtx.parentCtx = parentCtx; - if (this.buildParseTrees && parentCtx !== null) { - // add return ctx into invoking rule's tree - parentCtx.addChild(retCtx); - } - } - - getInvokingContext(ruleIndex) { - let ctx = this._ctx; - while (ctx !== null) { - if (ctx.ruleIndex === ruleIndex) { - return ctx; - } - ctx = ctx.parentCtx; - } - return null; - } - - precpred(localctx, precedence) { - return precedence >= this._precedenceStack[this._precedenceStack.length - 1]; - } - - inContext(context) { - // TODO: useful in parser? - return false; - } - - /** - * Checks whether or not {@code symbol} can follow the current state in the - * ATN. The behavior of this method is equivalent to the following, but is - * implemented such that the complete context-sensitive follow set does not - * need to be explicitly constructed. - * - *
- * return getExpectedTokens().contains(symbol); - *- * - * @param symbol the symbol type to check - * @return {@code true} if {@code symbol} can follow the current state in - * the ATN, otherwise {@code false}. - */ - isExpectedToken(symbol) { - const atn = this._interp.atn; - let ctx = this._ctx; - const s = atn.states[this.state]; - let following = atn.nextTokens(s); - if (following.contains(symbol)) { - return true; - } - if (!following.contains(Token.EPSILON)) { - return false; - } - while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { - const invokingState = atn.states[ctx.invokingState]; - const rt = invokingState.transitions[0]; - following = atn.nextTokens(rt.followState); - if (following.contains(symbol)) { - return true; - } - ctx = ctx.parentCtx; - } - if (following.contains(Token.EPSILON) && symbol === Token.EOF) { - return true; - } else { - return false; - } - } - - /** - * Computes the set of input symbols which could follow the current parser - * state and context, as given by {@link //getState} and {@link //getContext}, - * respectively. - * - * @see ATN//getExpectedTokens(int, RuleContext) - */ - getExpectedTokens() { - return this._interp.atn.getExpectedTokens(this.state, this._ctx); - } - - getExpectedTokensWithinCurrentRule() { - const atn = this._interp.atn; - const s = atn.states[this.state]; - return atn.nextTokens(s); - } - - // Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. - getRuleIndex(ruleName) { - const ruleIndex = this.getRuleIndexMap()[ruleName]; - if (ruleIndex !== null) { - return ruleIndex; - } else { - return -1; - } - } - - /** - * Return List<String> of the rule names in your parser instance - * leading up to a call to the current rule. You could override if - * you want more details such as the file/line info of where - * in the ATN a rule is invoked. - * - * this is very useful for error messages. - */ - getRuleInvocationStack(p) { - p = p || null; - if (p === null) { - p = this._ctx; - } - const stack = []; - while (p !== null) { - // compute what follows who invoked us - const ruleIndex = p.ruleIndex; - if (ruleIndex < 0) { - stack.push("n/a"); - } else { - stack.push(this.ruleNames[ruleIndex]); - } - p = p.parentCtx; - } - return stack; - } - - // For debugging and other purposes. - getDFAStrings() { - return this._interp.decisionToDFA.toString(); - } - - // For debugging and other purposes. - dumpDFA() { - let seenOne = false; - for (let i = 0; i < this._interp.decisionToDFA.length; i++) { - const dfa = this._interp.decisionToDFA[i]; - if (dfa.states.length > 0) { - if (seenOne) { - console.log(); - } - this.printer.println("Decision " + dfa.decision + ":"); - this.printer.print(dfa.toString(this.literalNames, this.symbolicNames)); - seenOne = true; - } - } - } - - /* - " printer = function() {\r\n" + - " this.println = function(s) { document.getElementById('output') += s + '\\n'; }\r\n" + - " this.print = function(s) { document.getElementById('output') += s; }\r\n" + - " };\r\n" + - */ - getSourceName() { - return this._input.sourceName; - } - - /** - * During a parse is sometimes useful to listen in on the rule entry and exit - * events as well as token matches. this is for quick and dirty debugging. - */ - setTrace(trace) { - if (!trace) { - this.removeParseListener(this._tracer); - this._tracer = null; - } else { - if (this._tracer !== null) { - this.removeParseListener(this._tracer); - } - this._tracer = new TraceListener(this); - this.addParseListener(this._tracer); - } - } -} - -/** - * this field maps from the serialized ATN string to the deserialized {@link - * ATN} with - * bypass alternatives. - * - * @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() - */ -Parser.bypassAltsAtnCache = {}; - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/PredictionContextCache.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - -/** - * Used to cache {@link PredictionContext} objects. Its used for the shared - * context cash associated with contexts in DFA states. This cache - * can be used for both lexers and parsers. - */ -class PredictionContextCache { - - constructor() { - this.cache = new HashMap_HashMap(); - } - - /** - * Add a context to the cache and return it. If the context already exists, - * return that one instead and do not add a new context to the cache. - * Protect shared cache from unsafe thread access. - */ - add(ctx) { - if (ctx === PredictionContext.EMPTY) { - return PredictionContext.EMPTY; - } - const existing = this.cache.get(ctx) || null; - if (existing !== null) { - return existing; - } - this.cache.set(ctx, ctx); - return ctx; - } - - get(ctx) { - return this.cache.get(ctx) || null; - } - - get length(){ - return this.cache.length; - } -} - -;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/TerminalNodeImpl.js -/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. - * Use is of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - - - - -class TerminalNodeImpl extends TerminalNode { - constructor(symbol) { - super(); - this.parentCtx = null; - this.symbol = symbol; - } - - getChild(i) { - return null; - } - - getSymbol() { - return this.symbol; - } - - getParent() { - return this.parentCtx; - } - - getPayload() { - return this.symbol; - } - - getSourceInterval() { - if (this.symbol === null) { - return Interval.INVALID_INTERVAL; - } - const tokenIndex = this.symbol.tokenIndex; - return new Interval(tokenIndex, tokenIndex); - } - - getChildCount() { - return 0; - } - - accept(visitor) { - return visitor.visitTerminal(this); - } - - getText() { - return this.symbol.text; - } - - toString() { - if (this.symbol.type === Token.EOF) { - return "
+ * This implementation prints messages to {@link System//err} containing the + * values of {@code line}, {@code charPositionInLine}, and {@code msg} using + * the following format.
+ * + *+ * line line:charPositionInLine msg + *+ * + */ +class ConsoleErrorListener extends ErrorListener { + constructor() { + super(); + } + + syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + console.error("line " + line + ":" + column + " " + msg); + } +} + + +/** + * Provides a default instance of {@link ConsoleErrorListener}. + */ +ConsoleErrorListener.INSTANCE = new ConsoleErrorListener(); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/ProxyErrorListener.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class ProxyErrorListener extends ErrorListener { + constructor(delegates) { + super(); + if (delegates===null) { + throw "delegates"; + } + this.delegates = delegates; + return this; + } + + syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + this.delegates.map(d => d.syntaxError(recognizer, offendingSymbol, line, column, msg, e)); + } + + reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) { + this.delegates.map(d => d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)); + } + + reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { + this.delegates.map(d => d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)); + } + + reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) { + this.delegates.map(d => d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Recognizer.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + +class Recognizer { + constructor() { + this._listeners = [ ConsoleErrorListener.INSTANCE ]; + this._interp = null; + this._stateNumber = -1; + } + + checkVersion(toolVersion) { + const runtimeVersion = "4.10.1"; + if (runtimeVersion!==toolVersion) { + console.log("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion); + } + } + + addErrorListener(listener) { + this._listeners.push(listener); + } + + removeErrorListeners() { + this._listeners = []; + } + + getLiteralNames() { + return Object.getPrototypeOf(this).constructor.literalNames || []; + } + + getSymbolicNames() { + return Object.getPrototypeOf(this).constructor.symbolicNames || []; + } + + getTokenNames() { + if(!this.tokenNames) { + const literalNames = this.getLiteralNames(); + const symbolicNames = this.getSymbolicNames(); + const length = literalNames.length > symbolicNames.length ? literalNames.length : symbolicNames.length; + this.tokenNames = []; + for(let i=0; i
+ * If {@code oldToken} is also a {@link CommonToken} instance, the newly + * constructed token will share a reference to the {@link //text} field and + * the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will + * be assigned the result of calling {@link //getText}, and {@link //source} + * will be constructed from the result of {@link Token//getTokenSource} and + * {@link Token//getInputStream}.
+ * + * @param oldToken The token to copy. + */ + clone() { + const t = new CommonToken(this.source, this.type, this.channel, this.start, this.stop); + t.tokenIndex = this.tokenIndex; + t.line = this.line; + t.column = this.column; + t.text = this.text; + return t; + } + + toString() { + let txt = this.text; + if (txt !== null) { + txt = txt.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t"); + } else { + txt = "+ * The default value is {@code false} to avoid the performance and memory + * overhead of copying text for every token unless explicitly requested.
+ */ + this.copyText = copyText===undefined ? false : copyText; + } + + create(source, type, text, channel, start, stop, line, column) { + const t = new CommonToken(source, type, channel, start, stop); + t.line = line; + t.column = column; + if (text !==null) { + t.text = text; + } else if (this.copyText && source[1] !==null) { + t.text = source[1].getText(start,stop); + } + return t; + } + + createThin(type, text) { + const t = new CommonToken(null, type); + t.text = text; + return t; + } +} + +/** + * The default {@link CommonTokenFactory} instance. + * + *+ * This token factory does not explicitly copy token text when constructing + * tokens.
+ */ +CommonTokenFactory.DEFAULT = new CommonTokenFactory(); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/RecognitionException.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +/** + * The root of the ANTLR exception hierarchy. In general, ANTLR tracks just + * 3 kinds of errors: prediction errors, failed predicate errors, and + * mismatched input errors. In each case, the parser knows where it is + * in the input, where it is in the ATN, the rule invocation stack, + * and what kind of problem occurred. + */ + +class RecognitionException extends Error { + constructor(params) { + super(params.message); + if (Error.captureStackTrace) + Error.captureStackTrace(this, RecognitionException); + this.message = params.message; + this.recognizer = params.recognizer; + this.input = params.input; + this.ctx = params.ctx; + /** + * The current {@link Token} when an error occurred. Since not all streams + * support accessing symbols by index, we have to track the {@link Token} + * instance itself + */ + this.offendingToken = null; + /** + * Get the ATN state number the parser was in at the time the error + * occurred. For {@link NoViableAltException} and + * {@link LexerNoViableAltException} exceptions, this is the + * {@link DecisionState} number. For others, it is the state whose outgoing + * edge we couldn't match. + */ + this.offendingState = -1; + if (this.recognizer!==null) { + this.offendingState = this.recognizer.state; + } + } + + /** + * Gets the set of input symbols which could potentially follow the + * previously matched symbol at the time this exception was thrown. + * + *If the set of expected tokens is not known and could not be computed, + * this method returns {@code null}.
+ * + * @return The set of token types that could potentially follow the current + * state in the ATN, or {@code null} if the information is not available. + */ + getExpectedTokens() { + if (this.recognizer!==null) { + return this.recognizer.atn.getExpectedTokens(this.offendingState, this.ctx); + } else { + return null; + } + } + + //If the state number is not known, this method returns -1.
+ toString() { + return this.message; + } +} + + + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/Interval.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +/* stop is not included! */ +class Interval { + + constructor(start, stop) { + this.start = start; + this.stop = stop; + } + + clone() { + return new Interval(this.start, this.stop); + } + + contains(item) { + return item >= this.start && item < this.stop; + } + + toString() { + if(this.start===this.stop-1) { + return this.start.toString(); + } else { + return this.start.toString() + ".." + (this.stop-1).toString(); + } + } + + get length(){ + return this.stop - this.start; + } +} + +Interval.INVALID_INTERVAL = new Interval(-1, -2); + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/LexerNoViableAltException.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +class LexerNoViableAltException extends RecognitionException { + constructor(lexer, input, startIndex, deadEndConfigs) { + super({message: "", recognizer: lexer, input: input, ctx: null}); + this.startIndex = startIndex; + this.deadEndConfigs = deadEndConfigs; + } + + toString() { + let symbol = ""; + if (this.startIndex >= 0 && this.startIndex < this.input.size) { + symbol = this.input.getText(new Interval(this.startIndex,this.startIndex)); + } + return "LexerNoViableAltException" + symbol; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Lexer.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + +/** + * A lexer is recognizer that draws input symbols from a character stream. + * lexer grammars result in a subclass of this object. A Lexer object + * uses simplified match() and error recovery mechanisms in the interest of speed. + */ +class Lexer extends Recognizer { + constructor(input) { + super(); + this._input = input; + this._factory = CommonTokenFactory.DEFAULT; + this._tokenFactorySourcePair = [ this, input ]; + + this._interp = null; // child classes must populate this + + /** + * The goal of all lexer rules/methods is to create a token object. + * this is an instance variable as multiple rules may collaborate to + * create a single token. nextToken will return this object after + * matching lexer rule(s). If you subclass to allow multiple token + * emissions, then set this to the last token to be matched or + * something nonnull so that the auto token emit mechanism will not + * emit another token. + */ + this._token = null; + + /** + * What character index in the stream did the current token start at? + * Needed, for example, to get the text for current token. Set at + * the start of nextToken. + */ + this._tokenStartCharIndex = -1; + + // The line on which the first character of the token resides/// + this._tokenStartLine = -1; + + // The character position of first character within the line/// + this._tokenStartColumn = -1; + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF ; then you see DONE EOF. + this._hitEOF = false; + + // The channel number for the current token/// + this._channel = Token.DEFAULT_CHANNEL; + + // The token type for the current token/// + this._type = Token.INVALID_TYPE; + + this._modeStack = []; + this._mode = Lexer.DEFAULT_MODE; + + /** + * You can set the text for the current token to override what is in + * the input char buffer. Use setText() or can set this instance var. + */ + this._text = null; + } + + reset() { + // wack Lexer state variables + if (this._input !== null) { + this._input.seek(0); // rewind the input + } + this._token = null; + this._type = Token.INVALID_TYPE; + this._channel = Token.DEFAULT_CHANNEL; + this._tokenStartCharIndex = -1; + this._tokenStartColumn = -1; + this._tokenStartLine = -1; + this._text = null; + + this._hitEOF = false; + this._mode = Lexer.DEFAULT_MODE; + this._modeStack = []; + + this._interp.reset(); + } + +// Return a token from this source; i.e., match a token on the char stream. + nextToken() { + if (this._input === null) { + throw "nextToken requires a non-null input stream."; + } + + /** + * Mark start location in char stream so unbuffered streams are + * guaranteed at least have text of current token + */ + const tokenStartMarker = this._input.mark(); + try { + for (;;) { + if (this._hitEOF) { + this.emitEOF(); + return this._token; + } + this._token = null; + this._channel = Token.DEFAULT_CHANNEL; + this._tokenStartCharIndex = this._input.index; + this._tokenStartColumn = this._interp.column; + this._tokenStartLine = this._interp.line; + this._text = null; + let continueOuter = false; + for (;;) { + this._type = Token.INVALID_TYPE; + let ttype = Lexer.SKIP; + try { + ttype = this._interp.match(this._input, this._mode); + } catch (e) { + if(e instanceof RecognitionException) { + this.notifyListeners(e); // report error + this.recover(e); + } else { + console.log(e.stack); + throw e; + } + } + if (this._input.LA(1) === Token.EOF) { + this._hitEOF = true; + } + if (this._type === Token.INVALID_TYPE) { + this._type = ttype; + } + if (this._type === Lexer.SKIP) { + continueOuter = true; + break; + } + if (this._type !== Lexer.MORE) { + break; + } + } + if (continueOuter) { + continue; + } + if (this._token === null) { + this.emit(); + } + return this._token; + } + } finally { + // make sure we release marker after match or + // unbuffered char stream will keep buffering + this._input.release(tokenStartMarker); + } + } + + /** + * Instruct the lexer to skip creating a token for current lexer rule + * and look for another token. nextToken() knows to keep looking when + * a lexer rule finishes with token set to SKIP_TOKEN. Recall that + * if token==null at end of any token rule, it creates one for you + * and emits it. + */ + skip() { + this._type = Lexer.SKIP; + } + + more() { + this._type = Lexer.MORE; + } + + mode(m) { + this._mode = m; + } + + pushMode(m) { + if (this._interp.debug) { + console.log("pushMode " + m); + } + this._modeStack.push(this._mode); + this.mode(m); + } + + popMode() { + if (this._modeStack.length === 0) { + throw "Empty Stack"; + } + if (this._interp.debug) { + console.log("popMode back to " + this._modeStack.slice(0, -1)); + } + this.mode(this._modeStack.pop()); + return this._mode; + } + + /** + * By default does not support multiple emits per nextToken invocation + * for efficiency reasons. Subclass and override this method, nextToken, + * and getToken (to push tokens into a list and pull from that list + * rather than a single variable as this implementation does). + */ + emitToken(token) { + this._token = token; + } + + /** + * The standard method called to automatically emit a token at the + * outermost lexical rule. The token object should point into the + * char buffer start..stop. If there is a text override in 'text', + * use that to set the token's text. Override this method to emit + * custom Token objects or provide a new factory. + */ + emit() { + const t = this._factory.create(this._tokenFactorySourcePair, this._type, + this._text, this._channel, this._tokenStartCharIndex, this + .getCharIndex() - 1, this._tokenStartLine, + this._tokenStartColumn); + this.emitToken(t); + return t; + } + + emitEOF() { + const cpos = this.column; + const lpos = this.line; + const eof = this._factory.create(this._tokenFactorySourcePair, Token.EOF, + null, Token.DEFAULT_CHANNEL, this._input.index, + this._input.index - 1, lpos, cpos); + this.emitToken(eof); + return eof; + } + +// What is the index of the current character of lookahead?/// + getCharIndex() { + return this._input.index; + } + + /** + * Return a list of all Token objects in input char stream. + * Forces load of all tokens. Does not include EOF token. + */ + getAllTokens() { + const tokens = []; + let t = this.nextToken(); + while (t.type !== Token.EOF) { + tokens.push(t); + t = this.nextToken(); + } + return tokens; + } + + notifyListeners(e) { + const start = this._tokenStartCharIndex; + const stop = this._input.index; + const text = this._input.getText(start, stop); + const msg = "token recognition error at: '" + this.getErrorDisplay(text) + "'"; + const listener = this.getErrorListenerDispatch(); + listener.syntaxError(this, null, this._tokenStartLine, + this._tokenStartColumn, msg, e); + } + + getErrorDisplay(s) { + const d = []; + for (let i = 0; i < s.length; i++) { + d.push(s[i]); + } + return d.join(''); + } + + getErrorDisplayForChar(c) { + if (c.charCodeAt(0) === Token.EOF) { + return "+ * This token stream ignores the value of {@link Token//getChannel}. If your + * parser requires the token stream filter tokens to only those on a particular + * channel, such as {@link Token//DEFAULT_CHANNEL} or + * {@link Token//HIDDEN_CHANNEL}, use a filtering token stream such a + * {@link CommonTokenStream}.
+ */ +class BufferedTokenStream extends TokenStream { + constructor(tokenSource) { + + super(); + // The {@link TokenSource} from which tokens for this stream are fetched. + this.tokenSource = tokenSource; + /** + * A collection of all tokens fetched from the token source. The list is + * considered a complete view of the input once {@link //fetchedEOF} is set + * to {@code true}. + */ + this.tokens = []; + + /** + * The index into {@link //tokens} of the current token (next token to + * {@link //consume}). {@link //tokens}{@code [}{@link //p}{@code ]} should + * be + * {@link //LT LT(1)}. + * + *This field is set to -1 when the stream is first constructed or when + * {@link //setTokenSource} is called, indicating that the first token has + * not yet been fetched from the token source. For additional information, + * see the documentation of {@link IntStream} for a description of + * Initializing Methods.
+ */ + this.index = -1; + + /** + * Indicates whether the {@link Token//EOF} token has been fetched from + * {@link //tokenSource} and added to {@link //tokens}. This field improves + * performance for the following cases: + * + *For example, {@link CommonTokenStream} overrides this method to ensure + * that + * the seek target is always an on-channel token.
+ * + * @param {Number} i The target token index. + * @return {Number} The adjusted target token index. + */ + adjustSeekIndex(i) { + return i; + } + + lazyInit() { + if (this.index === -1) { + this.setup(); + } + } + + setup() { + this.sync(0); + this.index = this.adjustSeekIndex(0); + } + +// Reset this token stream by setting its token source./// + setTokenSource(tokenSource) { + this.tokenSource = tokenSource; + this.tokens = []; + this.index = -1; + this.fetchedEOF = false; + } + + /** + * Given a starting index, return the index of the next token on channel. + * Return i if tokens[i] is on channel. Return -1 if there are no tokens + * on channel between i and EOF. + */ + nextTokenOnChannel(i, channel) { + this.sync(i); + if (i >= this.tokens.length) { + return -1; + } + let token = this.tokens[i]; + while (token.channel !== this.channel) { + if (token.type === Token.EOF) { + return -1; + } + i += 1; + this.sync(i); + token = this.tokens[i]; + } + return i; + } + + /** + * Given a starting index, return the index of the previous token on channel. + * Return i if tokens[i] is on channel. Return -1 if there are no tokens + * on channel between i and 0. + */ + previousTokenOnChannel(i, channel) { + while (i >= 0 && this.tokens[i].channel !== channel) { + i -= 1; + } + return i; + } + + /** + * Collect all tokens on specified channel to the right of + * the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or + * EOF. If channel is -1, find any non default channel token. + */ + getHiddenTokensToRight(tokenIndex, + channel) { + if (channel === undefined) { + channel = -1; + } + this.lazyInit(); + if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { + throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; + } + const nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1, Lexer.DEFAULT_TOKEN_CHANNEL); + const from_ = tokenIndex + 1; + // if none onchannel to right, nextOnChannel=-1 so set to = last token + const to = nextOnChannel === -1 ? this.tokens.length - 1 : nextOnChannel; + return this.filterForChannel(from_, to, channel); + } + + /** + * Collect all tokens on specified channel to the left of + * the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. + * If channel is -1, find any non default channel token. + */ + getHiddenTokensToLeft(tokenIndex, + channel) { + if (channel === undefined) { + channel = -1; + } + this.lazyInit(); + if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { + throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; + } + const prevOnChannel = this.previousTokenOnChannel(tokenIndex - 1, Lexer.DEFAULT_TOKEN_CHANNEL); + if (prevOnChannel === tokenIndex - 1) { + return null; + } + // if none on channel to left, prevOnChannel=-1 then from=0 + const from_ = prevOnChannel + 1; + const to = tokenIndex - 1; + return this.filterForChannel(from_, to, channel); + } + + filterForChannel(left, right, channel) { + const hidden = []; + for (let i = left; i < right + 1; i++) { + const t = this.tokens[i]; + if (channel === -1) { + if (t.channel !== Lexer.DEFAULT_TOKEN_CHANNEL) { + hidden.push(t); + } + } else if (t.channel === channel) { + hidden.push(t); + } + } + if (hidden.length === 0) { + return null; + } + return hidden; + } + + getSourceName() { + return this.tokenSource.getSourceName(); + } + +// Get the text of all tokens in this buffer./// + getText(interval) { + this.lazyInit(); + this.fill(); + if (interval === undefined || interval === null) { + interval = new Interval(0, this.tokens.length - 1); + } + let start = interval.start; + if (start instanceof Token) { + start = start.tokenIndex; + } + let stop = interval.stop; + if (stop instanceof Token) { + stop = stop.tokenIndex; + } + if (start === null || stop === null || start < 0 || stop < 0) { + return ""; + } + if (stop >= this.tokens.length) { + stop = this.tokens.length - 1; + } + let s = ""; + for (let i = start; i < stop + 1; i++) { + const t = this.tokens[i]; + if (t.type === Token.EOF) { + break; + } + s = s + t.text; + } + return s; + } + +// Get all tokens from lexer until EOF/// + fill() { + this.lazyInit(); + while (this.fetch(1000) === 1000) { + continue; + } + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/CommonTokenStream.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + +/** + * This class extends {@link BufferedTokenStream} with functionality to filter + * token streams to tokens on a particular channel (tokens where + * {@link Token//getChannel} returns a particular value). + * + *+ * This token stream provides access to all tokens by index or when calling + * methods like {@link //getText}. The channel filtering is only used for code + * accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and + * {@link //LB}.
+ * + *+ * By default, tokens are placed on the default channel + * ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the + * {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to + * call {@link Lexer//setChannel}. + *
+ * + *+ * Note: lexer rules which use the {@code ->skip} lexer command or call + * {@link Lexer//skip} do not produce tokens at all, so input text matched by + * such a rule will not be available as part of the token stream, regardless of + * channel.
+ */ +class CommonTokenStream extends BufferedTokenStream { + constructor(lexer, channel) { + super(lexer); + this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel; + } + + adjustSeekIndex(i) { + return this.nextTokenOnChannel(i, this.channel); + } + + LB(k) { + if (k===0 || this.index-k<0) { + return null; + } + let i = this.index; + let n = 1; + // find k good tokens looking backwards + while (n <= k) { + // skip off-channel tokens + i = this.previousTokenOnChannel(i - 1, this.channel); + n += 1; + } + if (i < 0) { + return null; + } + return this.tokens[i]; + } + + LT(k) { + this.lazyInit(); + if (k === 0) { + return null; + } + if (k < 0) { + return this.LB(-k); + } + let i = this.index; + let n = 1; // we know tokens[pos] is a good one + // find k good tokens + while (n < k) { + // skip off-channel tokens, but make sure to not look past EOF + if (this.sync(i + 1)) { + i = this.nextTokenOnChannel(i + 1, this.channel); + } + n += 1; + } + return this.tokens[i]; + } + + // Count EOF just once. + getNumberOfOnChannelTokens() { + let n = 0; + this.fill(); + for (let i =0; i< this.tokens.length;i++) { + const t = this.tokens[i]; + if( t.channel===this.channel) { + n += 1; + } + if( t.type===Token.EOF) { + break; + } + } + return n; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/utils/stringHashCode.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +String.prototype.seed = String.prototype.seed || Math.round(Math.random() * Math.pow(2, 32)); + +String.prototype.hashCode = function () { + const key = this.toString(); + let h1b, k1; + + const remainder = key.length & 3; // key.length % 4 + const bytes = key.length - remainder; + let h1 = String.prototype.seed; + const c1 = 0xcc9e2d51; + const c2 = 0x1b873593; + let i = 0; + + while (i < bytes) { + k1 = + ((key.charCodeAt(i) & 0xff)) | + ((key.charCodeAt(++i) & 0xff) << 8) | + ((key.charCodeAt(++i) & 0xff) << 16) | + ((key.charCodeAt(++i) & 0xff) << 24); + ++i; + + k1 = ((((k1 & 0xffff) * c1) + ((((k1 >>> 16) * c1) & 0xffff) << 16))) & 0xffffffff; + k1 = (k1 << 15) | (k1 >>> 17); + k1 = ((((k1 & 0xffff) * c2) + ((((k1 >>> 16) * c2) & 0xffff) << 16))) & 0xffffffff; + + h1 ^= k1; + h1 = (h1 << 13) | (h1 >>> 19); + h1b = ((((h1 & 0xffff) * 5) + ((((h1 >>> 16) * 5) & 0xffff) << 16))) & 0xffffffff; + h1 = (((h1b & 0xffff) + 0x6b64) + ((((h1b >>> 16) + 0xe654) & 0xffff) << 16)); + } + + k1 = 0; + + switch (remainder) { + case 3: + k1 ^= (key.charCodeAt(i + 2) & 0xff) << 16; + // no-break + case 2: + k1 ^= (key.charCodeAt(i + 1) & 0xff) << 8; + // no-break + case 1: + k1 ^= (key.charCodeAt(i) & 0xff); + k1 = (((k1 & 0xffff) * c1) + ((((k1 >>> 16) * c1) & 0xffff) << 16)) & 0xffffffff; + k1 = (k1 << 15) | (k1 >>> 17); + k1 = (((k1 & 0xffff) * c2) + ((((k1 >>> 16) * c2) & 0xffff) << 16)) & 0xffffffff; + h1 ^= k1; + } + + h1 ^= key.length; + + h1 ^= h1 >>> 16; + h1 = (((h1 & 0xffff) * 0x85ebca6b) + ((((h1 >>> 16) * 0x85ebca6b) & 0xffff) << 16)) & 0xffffffff; + h1 ^= h1 >>> 13; + h1 = ((((h1 & 0xffff) * 0xc2b2ae35) + ((((h1 >>> 16) * 0xc2b2ae35) & 0xffff) << 16))) & 0xffffffff; + h1 ^= h1 >>> 16; + + return h1 >>> 0; +}; + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/utils/equalArrays.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +function equalArrays(a, b) { + if (!Array.isArray(a) || !Array.isArray(b)) + return false; + if (a === b) + return true; + if (a.length !== b.length) + return false; + for (let i = 0; i < a.length; i++) { + if (a[i] === b[i]) + continue; + if (!a[i].equals || !a[i].equals(b[i])) + return false; + } + return true; +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/HashCode.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +class HashCode { + + constructor() { + this.count = 0; + this.hash = 0; + } + + update() { + for(let i=0;iI have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of + * {@link SemanticContext} within the scope of this outer class.
+ */ +class SemanticContext { + + hashCode() { + const hash = new HashCode(); + this.updateHashCode(hash); + return hash.finish(); + } + + /** + * For context independent predicates, we evaluate them without a local + * context (i.e., null context). That way, we can evaluate them without + * having to create proper rule-specific context during prediction (as + * opposed to the parser, which creates them naturally). In a practical + * sense, this avoids a cast exception from RuleContext to myruleContext. + * + *For context dependent predicates, we must pass in a local context so that + * references such as $arg evaluate properly as _localctx.arg. We only + * capture context dependent predicates in the context in which we begin + * prediction, so we passed in the outer context here in case of context + * dependent predicate evaluation.
+ */ + evaluate(parser, outerContext) {} + + /** + * Evaluate the precedence predicates for the context and reduce the result. + * + * @param parser The parser instance. + * @param outerContext The current parser context object. + * @return The simplified semantic context after precedence predicates are + * evaluated, which will be one of the following values. + *+ * The evaluation of predicates by this context is short-circuiting, but + * unordered.
+ */ + evaluate(parser, outerContext) { + for (let i = 0; i < this.opnds.length; i++) { + if (this.opnds[i].evaluate(parser, outerContext)) { + return true; + } + } + return false; + } + + evalPrecedence(parser, outerContext) { + let differs = false; + const operands = []; + for (let i = 0; i < this.opnds.length; i++) { + const context = this.opnds[i]; + const evaluated = context.evalPrecedence(parser, outerContext); + differs |= (evaluated !== context); + if (evaluated === SemanticContext.NONE) { + // The OR context is true if any element is true + return SemanticContext.NONE; + } else if (evaluated !== null) { + // Reduce the result by skipping false elements + operands.push(evaluated); + } + } + if (!differs) { + return this; + } + if (operands.length === 0) { + // all elements were false, so the OR context is false + return null; + } + const result = null; + operands.map(function(o) { + return result === null ? o : SemanticContext.orContext(result, o); + }); + return result; + } + + toString() { + const s = this.opnds.map(o => o.toString()); + return (s.length > 3 ? s.slice(3) : s).join("||"); + } +} + +function filterPrecedencePredicates(set) { + const result = []; + set.values().map( function(context) { + if (context instanceof SemanticContext.PrecedencePredicate) { + result.push(context); + } + }); + return result; +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/ATNConfig.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +function checkParams(params, isCfg) { + if(params===null) { + const result = { state:null, alt:null, context:null, semanticContext:null }; + if(isCfg) { + result.reachesIntoOuterContext = 0; + } + return result; + } else { + const props = {}; + props.state = params.state || null; + props.alt = (params.alt === undefined) ? null : params.alt; + props.context = params.context || null; + props.semanticContext = params.semanticContext || null; + if(isCfg) { + props.reachesIntoOuterContext = params.reachesIntoOuterContext || 0; + props.precedenceFilterSuppressed = params.precedenceFilterSuppressed || false; + } + return props; + } +} + +class ATNConfig { + /** + * @param {Object} params A tuple: (ATN state, predicted alt, syntactic, semantic context). + * The syntactic context is a graph-structured stack node whose + * path(s) to the root is the rule invocation(s) + * chain used to arrive at the state. The semantic context is + * the tree of semantic predicates encountered before reaching + * an ATN state + */ + constructor(params, config) { + this.checkContext(params, config); + params = checkParams(params); + config = checkParams(config, true); + // The ATN state associated with this configuration/// + this.state = params.state!==null ? params.state : config.state; + // What alt (or lexer rule) is predicted by this configuration/// + this.alt = params.alt!==null ? params.alt : config.alt; + /** + * The stack of invoking states leading to the rule/states associated + * with this config. We track only those contexts pushed during + * execution of the ATN simulator + */ + this.context = params.context!==null ? params.context : config.context; + this.semanticContext = params.semanticContext!==null ? params.semanticContext : + (config.semanticContext!==null ? config.semanticContext : SemanticContext.NONE); + // TODO: make it a boolean then + /** + * We cannot execute predicates dependent upon local context unless + * we know for sure we are in the correct context. Because there is + * no way to do this efficiently, we simply cannot evaluate + * dependent predicates unless we are in the rule that initially + * invokes the ATN simulator. + * closure() tracks the depth of how far we dip into the + * outer context: depth > 0. Note that it may not be totally + * accurate depth since I don't ever decrement + */ + this.reachesIntoOuterContext = config.reachesIntoOuterContext; + this.precedenceFilterSuppressed = config.precedenceFilterSuppressed; + } + + checkContext(params, config) { + if((params.context===null || params.context===undefined) && + (config===null || config.context===null || config.context===undefined)) { + this.context = null; + } + } + + hashCode() { + const hash = new HashCode(); + this.updateHashCode(hash); + return hash.finish(); + } + + updateHashCode(hash) { + hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext); + } + + /** + * An ATN configuration is equal to another if both have + * the same state, they predict the same alternative, and + * syntactic/semantic contexts are the same + */ + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof ATNConfig)) { + return false; + } else { + return this.state.stateNumber===other.state.stateNumber && + this.alt===other.alt && + (this.context===null ? other.context===null : this.context.equals(other.context)) && + this.semanticContext.equals(other.semanticContext) && + this.precedenceFilterSuppressed===other.precedenceFilterSuppressed; + } + } + + hashCodeForConfigSet() { + const hash = new HashCode(); + hash.update(this.state.stateNumber, this.alt, this.semanticContext); + return hash.finish(); + } + + equalsForConfigSet(other) { + if (this === other) { + return true; + } else if (! (other instanceof ATNConfig)) { + return false; + } else { + return this.state.stateNumber===other.state.stateNumber && + this.alt===other.alt && + this.semanticContext.equals(other.semanticContext); + } + } + + toString() { + return "(" + this.state + "," + this.alt + + (this.context!==null ? ",[" + this.context.toString() + "]" : "") + + (this.semanticContext !== SemanticContext.NONE ? + ("," + this.semanticContext.toString()) + : "") + + (this.reachesIntoOuterContext>0 ? + (",up=" + this.reachesIntoOuterContext) + : "") + ")"; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/IntervalSet.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +class IntervalSet { + constructor() { + this.intervals = null; + this.readOnly = false; + } + + first(v) { + if (this.intervals === null || this.intervals.length===0) { + return Token.INVALID_TYPE; + } else { + return this.intervals[0].start; + } + } + + addOne(v) { + this.addInterval(new Interval(v, v + 1)); + } + + addRange(l, h) { + this.addInterval(new Interval(l, h + 1)); + } + + addInterval(toAdd) { + if (this.intervals === null) { + this.intervals = []; + this.intervals.push(toAdd.clone()); + } else { + // find insert pos + for (let pos = 0; pos < this.intervals.length; pos++) { + const existing = this.intervals[pos]; + // distinct range -> insert + if (toAdd.stop < existing.start) { + this.intervals.splice(pos, 0, toAdd); + return; + } + // contiguous range -> adjust + else if (toAdd.stop === existing.start) { + this.intervals[pos] = new Interval(toAdd.start, existing.stop) + return; + } + // overlapping range -> adjust and reduce + else if (toAdd.start <= existing.stop) { + this.intervals[pos] = new Interval(Math.min(existing.start, toAdd.start), Math.max(existing.stop, toAdd.stop)); + this.reduce(pos); + return; + } + } + // greater than any existing + this.intervals.push(toAdd.clone()); + } + } + + addSet(other) { + if (other.intervals !== null) { + other.intervals.forEach( toAdd => this.addInterval(toAdd), this); + } + return this; + } + + reduce(pos) { + // only need to reduce if pos is not the last + if (pos < this.intervals.length - 1) { + const current = this.intervals[pos]; + const next = this.intervals[pos + 1]; + // if next contained in current + if (current.stop >= next.stop) { + this.intervals.splice(pos + 1, 1); + this.reduce(pos); + } else if (current.stop >= next.start) { + this.intervals[pos] = new Interval(current.start, next.stop); + this.intervals.splice(pos + 1, 1); + } + } + } + + complement(start, stop) { + const result = new IntervalSet(); + result.addInterval(new Interval(start, stop + 1)); + if(this.intervals !== null) + this.intervals.forEach(toRemove => result.removeRange(toRemove)); + return result; + } + + contains(item) { + if (this.intervals === null) { + return false; + } else { + for (let k = 0; k < this.intervals.length; k++) { + if(this.intervals[k].contains(item)) { + return true; + } + } + return false; + } + } + + removeRange(toRemove) { + if(toRemove.start===toRemove.stop-1) { + this.removeOne(toRemove.start); + } else if (this.intervals !== null) { + let pos = 0; + for(let n=0; nThis is a one way link. It emanates from a state (usually via a list of + * transitions) and has a target state.
+ * + *Since we never have to change the ATN transitions once we construct it, + * we can fix these transitions as specific classes. The DFA transitions + * on the other hand need to update the labels as it adds transitions to + * the states. We'll use the term Edge for the DFA to distinguish them from + * ATN transitions.
+ */ +class Transition { + constructor(target) { + // The target of this transition. + if (target===undefined || target===null) { + throw "target cannot be null."; + } + this.target = target; + // Are we epsilon, action, sempred? + this.isEpsilon = false; + this.label = null; + } +} + +// constants for serialization + +Transition.EPSILON = 1; +Transition.RANGE = 2; +Transition.RULE = 3; +// e.g., {isType(input.LT(1))}? +Transition.PREDICATE = 4; +Transition.ATOM = 5; +Transition.ACTION = 6; +// ~(A|B) or ~atom, wildcard, which convert to next 2 +Transition.SET = 7; +Transition.NOT_SET = 8; +Transition.WILDCARD = 9; +Transition.PRECEDENCE = 10; + +Transition.serializationNames = [ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE" + ]; + +Transition.serializationTypes = { + EpsilonTransition: Transition.EPSILON, + RangeTransition: Transition.RANGE, + RuleTransition: Transition.RULE, + PredicateTransition: Transition.PREDICATE, + AtomTransition: Transition.ATOM, + ActionTransition: Transition.ACTION, + SetTransition: Transition.SET, + NotSetTransition: Transition.NOT_SET, + WildcardTransition: Transition.WILDCARD, + PrecedencePredicateTransition: Transition.PRECEDENCE + }; + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/RuleTransition.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class RuleTransition extends Transition { + constructor(ruleStart, ruleIndex, precedence, followState) { + super(ruleStart); + // ptr to the rule definition object for this rule ref + this.ruleIndex = ruleIndex; + this.precedence = precedence; + // what node to begin computations following ref to rule + this.followState = followState; + this.serializationType = Transition.RULE; + this.isEpsilon = true; + } + + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/SetTransition.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +// A transition containing a set of values. + + + + +class SetTransition extends Transition { + constructor(target, set) { + super(target); + this.serializationType = Transition.SET; + if (set !==undefined && set !==null) { + this.label = set; + } else { + this.label = new IntervalSet(); + this.label.addOne(Token.INVALID_TYPE); + } + } + + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return this.label.contains(symbol); + } + + toString() { + return this.label.toString(); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/NotSetTransition.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +class NotSetTransition extends SetTransition { + constructor(target, set) { + super(target, set); + this.serializationType = Transition.NOT_SET; + } + + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && + !super.matches(symbol, minVocabSymbol, maxVocabSymbol); + } + + toString() { + return '~' + super.toString(); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/transition/WildcardTransition.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class WildcardTransition extends Transition { + constructor(target) { + super(target); + this.serializationType = Transition.WILDCARD; + } + + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; + } + + toString() { + return "."; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/AbstractPredicateTransition.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class AbstractPredicateTransition extends Transition { + constructor(target) { + super(target); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/Tree.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +/** + * The basic notion of a tree has a parent, a payload, and a list of children. + * It is the most abstract interface for all the trees used by ANTLR. + */ +class Tree {} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/SyntaxTree.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class SyntaxTree extends Tree { +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/ParseTree.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class ParseTree extends SyntaxTree { +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/RuleNode.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class RuleNode extends ParseTree { + + getRuleContext(){ + throw new Error("missing interface implementation") + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/TerminalNode.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class TerminalNode extends ParseTree { +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/ErrorNode.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class ErrorNode extends TerminalNode { +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/utils/escapeWhitespace.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +function escapeWhitespace(s, escapeSpaces) { + s = s.replace(/\t/g, "\\t") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r"); + if (escapeSpaces) { + s = s.replace(/ /g, "\u00B7"); + } + return s; +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/Trees.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + +/** A set of utility routines useful for all kinds of ANTLR trees. */ +const Trees = { + /** + * Print out a whole tree in LISP form. {@link //getNodeText} is used on the + * node payloads to get the text for the nodes. Detect + * parse trees and extract data appropriately. + */ + toStringTree: function(tree, ruleNames, recog) { + ruleNames = ruleNames || null; + recog = recog || null; + if(recog!==null) { + ruleNames = recog.ruleNames; + } + let s = Trees.getNodeText(tree, ruleNames); + s = escapeWhitespace(s, false); + const c = tree.getChildCount(); + if(c===0) { + return s; + } + let res = "(" + s + ' '; + if(c>0) { + s = Trees.toStringTree(tree.getChild(0), ruleNames); + res = res.concat(s); + } + for(let i=1;i+ * private int referenceHashCode() { + * int hash = {@link MurmurHash//initialize MurmurHash.initialize}({@link + * //INITIAL_HASH}); + * + * for (int i = 0; i < {@link //size()}; i++) { + * hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent + * getParent}(i)); + * } + * + * for (int i = 0; i < {@link //size()}; i++) { + * hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link + * //getReturnState getReturnState}(i)); + * } + * + * hash = {@link MurmurHash//finish MurmurHash.finish}(hash, 2// {@link + * //size()}); + * return hash; + * } + *+ * This means only the {@link //EMPTY} context is in set. + */ + isEmpty() { + return this === PredictionContext.EMPTY; + } + + hasEmptyPath() { + return this.getReturnState(this.length - 1) === PredictionContext.EMPTY_RETURN_STATE; + } + + hashCode() { + return this.cachedHashCode; + } + + updateHashCode(hash) { + hash.update(this.cachedHashCode); + } +} + +/** + * Represents {@code $} in local context prediction, which means wildcard. + * {@code//+x =//}. + */ +PredictionContext.EMPTY = null; + +/** + * Represents {@code $} in an array in full context mode, when {@code $} + * doesn't mean wildcard: {@code $ + x = [$,x]}. Here, + * {@code $} = {@link //EMPTY_RETURN_STATE}. + */ +PredictionContext.EMPTY_RETURN_STATE = 0x7FFFFFFF; + +PredictionContext.globalNodeCount = 1; +PredictionContext.id = PredictionContext.globalNodeCount; + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/ArrayPredictionContext.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +class ArrayPredictionContext extends PredictionContext { + + constructor(parents, returnStates) { + /** + * Parent can be null only if full ctx mode and we make an array + * from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + * null parent and + * returnState == {@link //EMPTY_RETURN_STATE}. + */ + const h = new HashCode(); + h.update(parents, returnStates); + const hashCode = h.finish(); + super(hashCode); + this.parents = parents; + this.returnStates = returnStates; + return this; + } + + isEmpty() { + // since EMPTY_RETURN_STATE can only appear in the last position, we + // don't need to verify that size==1 + return this.returnStates[0] === PredictionContext.EMPTY_RETURN_STATE; + } + + getParent(index) { + return this.parents[index]; + } + + getReturnState(index) { + return this.returnStates[index]; + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof ArrayPredictionContext)) { + return false; + } else if (this.hashCode() !== other.hashCode()) { + return false; // can't be same if hash is different + } else { + return equalArrays(this.returnStates, other.returnStates) && + equalArrays(this.parents, other.parents); + } + } + + toString() { + if (this.isEmpty()) { + return "[]"; + } else { + let s = "["; + for (let i = 0; i < this.returnStates.length; i++) { + if (i > 0) { + s = s + ", "; + } + if (this.returnStates[i] === PredictionContext.EMPTY_RETURN_STATE) { + s = s + "$"; + continue; + } + s = s + this.returnStates[i]; + if (this.parents[i] !== null) { + s = s + " " + this.parents[i]; + } else { + s = s + "null"; + } + } + return s + "]"; + } + } + + get length(){ + return this.returnStates.length; + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/SingletonPredictionContext.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +class SingletonPredictionContext extends PredictionContext { + + constructor(parent, returnState) { + let hashCode = 0; + const hash = new HashCode(); + if(parent !== null) { + hash.update(parent, returnState); + } else { + hash.update(1); + } + hashCode = hash.finish(); + super(hashCode); + this.parentCtx = parent; + this.returnState = returnState; + } + + getParent(index) { + return this.parentCtx; + } + + getReturnState(index) { + return this.returnState; + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof SingletonPredictionContext)) { + return false; + } else if (this.hashCode() !== other.hashCode()) { + return false; // can't be same if hash is different + } else { + if(this.returnState !== other.returnState) + return false; + else if(this.parentCtx==null) + return other.parentCtx==null + else + return this.parentCtx.equals(other.parentCtx); + } + } + + toString() { + const up = this.parentCtx === null ? "" : this.parentCtx.toString(); + if (up.length === 0) { + if (this.returnState === PredictionContext.EMPTY_RETURN_STATE) { + return "$"; + } else { + return "" + this.returnState; + } + } else { + return "" + this.returnState + " " + up; + } + } + + get length(){ + return 1; + } + + static create(parent, returnState) { + if (returnState === PredictionContext.EMPTY_RETURN_STATE && parent === null) { + // someone can pass in the bits of an array ctx that mean $ + return PredictionContext.EMPTY; + } else { + return new SingletonPredictionContext(parent, returnState); + } + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/EmptyPredictionContext.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +class EmptyPredictionContext extends SingletonPredictionContext { + + constructor() { + super(null, PredictionContext.EMPTY_RETURN_STATE); + } + + isEmpty() { + return true; + } + + getParent(index) { + return null; + } + + getReturnState(index) { + return this.returnState; + } + + equals(other) { + return this === other; + } + + toString() { + return "$"; + } +} + + +PredictionContext.EMPTY = new EmptyPredictionContext(); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/HashMap.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +const HashMap_HASH_KEY_PREFIX = "h-"; + +class HashMap_HashMap { + + constructor(hashFunction, equalsFunction) { + this.data = {}; + this.hashFunction = hashFunction || standardHashCodeFunction; + this.equalsFunction = equalsFunction || standardEqualsFunction; + } + + set(key, value) { + const hashKey = HashMap_HASH_KEY_PREFIX + this.hashFunction(key); + if (hashKey in this.data) { + const entries = this.data[hashKey]; + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (this.equalsFunction(key, entry.key)) { + const oldValue = entry.value; + entry.value = value; + return oldValue; + } + } + entries.push({key:key, value:value}); + return value; + } else { + this.data[hashKey] = [{key:key, value:value}]; + return value; + } + } + + containsKey(key) { + const hashKey = HashMap_HASH_KEY_PREFIX + this.hashFunction(key); + if(hashKey in this.data) { + const entries = this.data[hashKey]; + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (this.equalsFunction(key, entry.key)) + return true; + } + } + return false; + } + + get(key) { + const hashKey = HashMap_HASH_KEY_PREFIX + this.hashFunction(key); + if(hashKey in this.data) { + const entries = this.data[hashKey]; + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (this.equalsFunction(key, entry.key)) + return entry.value; + } + } + return null; + } + + entries() { + return Object.keys(this.data).filter(key => key.startsWith(HashMap_HASH_KEY_PREFIX)).flatMap(key => this.data[key], this); + } + + getKeys() { + return this.entries().map(e => e.key); + } + + getValues() { + return this.entries().map(e => e.value); + } + + toString() { + const ss = this.entries().map(e => '{' + e.key + ':' + e.value + '}'); + return '[' + ss.join(", ") + ']'; + } + + get length() { + return Object.keys(this.data).filter(key => key.startsWith(HashMap_HASH_KEY_PREFIX)).map(key => this.data[key].length, this).reduce((accum, item) => accum + item, 0); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/context/PredictionContextUtils.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + +/** + * Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. + * Return {@link //EMPTY} if {@code outerContext} is empty or null. + */ +function predictionContextFromRuleContext(atn, outerContext) { + if (outerContext === undefined || outerContext === null) { + outerContext = RuleContext.EMPTY; + } + // if we are in RuleContext of start rule, s, then PredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if (outerContext.parentCtx === null || outerContext === RuleContext.EMPTY) { + return PredictionContext.EMPTY; + } + // If we have a parent, convert it to a PredictionContext graph + const parent = predictionContextFromRuleContext(atn, outerContext.parentCtx); + const state = atn.states[outerContext.invokingState]; + const transition = state.transitions[0]; + return SingletonPredictionContext.create(parent, transition.followState.stateNumber); +} + + +function getCachedPredictionContext(context, contextCache, visited) { + if (context.isEmpty()) { + return context; + } + let existing = visited.get(context) || null; + if (existing !== null) { + return existing; + } + existing = contextCache.get(context); + if (existing !== null) { + visited.set(context, existing); + return existing; + } + let changed = false; + let parents = []; + for (let i = 0; i < parents.length; i++) { + const parent = getCachedPredictionContext(context.getParent(i), contextCache, visited); + if (changed || parent !== context.getParent(i)) { + if (!changed) { + parents = []; + for (let j = 0; j < context.length; j++) { + parents[j] = context.getParent(j); + } + changed = true; + } + parents[i] = parent; + } + } + if (!changed) { + contextCache.add(context); + visited.set(context, context); + return context; + } + let updated = null; + if (parents.length === 0) { + updated = PredictionContext.EMPTY; + } else if (parents.length === 1) { + updated = SingletonPredictionContext.create(parents[0], context + .getReturnState(0)); + } else { + updated = new ArrayPredictionContext(parents, context.returnStates); + } + contextCache.add(updated); + visited.set(updated, updated); + visited.set(context, updated); + + return updated; +} + +function merge(a, b, rootIsWildcard, mergeCache) { + // share same graph if both same + if (a === b) { + return a; + } + if (a instanceof SingletonPredictionContext && b instanceof SingletonPredictionContext) { + return mergeSingletons(a, b, rootIsWildcard, mergeCache); + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as// wildcard + if (rootIsWildcard) { + if (a instanceof EmptyPredictionContext) { + return a; + } + if (b instanceof EmptyPredictionContext) { + return b; + } + } + // convert singleton so both are arrays to normalize + if (a instanceof SingletonPredictionContext) { + a = new ArrayPredictionContext([a.getParent()], [a.returnState]); + } + if (b instanceof SingletonPredictionContext) { + b = new ArrayPredictionContext([b.getParent()], [b.returnState]); + } + return mergeArrays(a, b, rootIsWildcard, mergeCache); +} + + +/** + * Merge two {@link ArrayPredictionContext} instances. + * + *
Different tops, different parents.
+ *
Shared top, same parents.
+ *
Shared top, different parents.
+ *
Shared top, all shared parents.
+ *
Equal tops, merge parents and reduce top to
+ * {@link SingletonPredictionContext}.
+ *
Stack tops equal, parents merge is same; return left graph.
+ *
Same stack top, parents differ; merge parents giving array node, then
+ * remainders of those graphs. A new root node is created to point to the
+ * merged parents.
+ *
Different stack tops pointing to same parent. Make array node for the
+ * root where both element in the root point to the same (original)
+ * parent.
+ *
Different stack tops pointing to different parents. Make array node for
+ * the root where each element points to the corresponding original
+ * parent.
+ *
These local-context merge operations are used when {@code rootIsWildcard} + * is true.
+ * + *{@link //EMPTY} is superset of any graph; return {@link //EMPTY}.
+ *
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+ * {@code //EMPTY}; return left graph.
+ *
Special case of last merge if local context.
+ *
These full-context merge operations are used when {@code rootIsWildcard} + * is false.
+ * + * + * + *Must keep all contexts; {@link //EMPTY} in array is a special value (and
+ * null parent).
+ *
If {@code ctx} is {@code null} and the end of the rule containing + * {@code s} is reached, {@link Token//EPSILON} is added to the result set. + * If {@code ctx} is not {@code null} and the end of the outermost rule is + * reached, {@link Token//EOF} is added to the result set.
+ * + * @param s the ATN state + * @param stopState the ATN state to stop at. This can be a + * {@link BlockEndState} to detect epsilon paths through a closure. + * @param ctx the complete parser context, or {@code null} if the context + * should be ignored + * + * @return The set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + */ + LOOK(s, stopState, ctx) { + const r = new IntervalSet(); + const seeThruPreds = true; // ignore preds; get all lookahead + ctx = ctx || null; + const lookContext = ctx!==null ? predictionContextFromRuleContext(s.atn, ctx) : null; + this._LOOK(s, stopState, lookContext, r, new HashSet(), new BitSet(), seeThruPreds, true); + return r; + } + + /** + * Compute set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + * + *If {@code ctx} is {@code null} and {@code stopState} or the end of the + * rule containing {@code s} is reached, {@link Token//EPSILON} is added to + * the result set. If {@code ctx} is not {@code null} and {@code addEOF} is + * {@code true} and {@code stopState} or the end of the outermost rule is + * reached, {@link Token//EOF} is added to the result set.
+ * + * @param s the ATN state. + * @param stopState the ATN state to stop at. This can be a + * {@link BlockEndState} to detect epsilon paths through a closure. + * @param ctx The outer context, or {@code null} if the outer context should + * not be used. + * @param look The result lookahead set. + * @param lookBusy A set used for preventing epsilon closures in the ATN + * from causing a stack overflow. Outside code should pass + * {@code new CustomizedSetThe {@code skip} command does not have any parameters, so this action is + * implemented as a singleton instance exposed by {@link //INSTANCE}.
+ */ +class LexerSkipAction extends LexerAction { + constructor() { + super(LexerActionType.SKIP); + } + + execute(lexer) { + lexer.skip(); + } + + toString() { + return "skip"; + } +} + +// Provides a singleton instance of this parameterless lexer action. +LexerSkipAction.INSTANCE = new LexerSkipAction(); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerChannelAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Implements the {@code channel} lexer action by calling + * {@link Lexer//setChannel} with the assigned channel. + * Constructs a new {@code channel} action with the specified channel value. + * @param channel The channel value to pass to {@link Lexer//setChannel} + */ +class LexerChannelAction extends LexerAction { + constructor(channel) { + super(LexerActionType.CHANNEL); + this.channel = channel; + } + + /** + *This action is implemented by calling {@link Lexer//setChannel} with the + * value provided by {@link //getChannel}.
+ */ + execute(lexer) { + lexer._channel = this.channel; + } + + updateHashCode(hash) { + hash.update(this.actionType, this.channel); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerChannelAction)) { + return false; + } else { + return this.channel === other.channel; + } + } + + toString() { + return "channel(" + this.channel + ")"; + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerCustomAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Executes a custom lexer action by calling {@link Recognizer//action} with the + * rule and action indexes assigned to the custom action. The implementation of + * a custom action is added to the generated code for the lexer in an override + * of {@link Recognizer//action} when the grammar is compiled. + * + *This class may represent embedded actions created with the {...}
+ * syntax in ANTLR 4, as well as actions created for lexer commands where the
+ * command argument could not be evaluated when the grammar was compiled.
Custom actions are implemented by calling {@link Lexer//action} with the + * appropriate rule and action indexes.
+ */ + execute(lexer) { + lexer.action(null, this.ruleIndex, this.actionIndex); + } + + updateHashCode(hash) { + hash.update(this.actionType, this.ruleIndex, this.actionIndex); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerCustomAction)) { + return false; + } else { + return this.ruleIndex === other.ruleIndex && this.actionIndex === other.actionIndex; + } + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerMoreAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Implements the {@code more} lexer action by calling {@link Lexer//more}. + * + *The {@code more} command does not have any parameters, so this action is + * implemented as a singleton instance exposed by {@link //INSTANCE}.
+ */ +class LexerMoreAction extends LexerAction { + constructor() { + super(LexerActionType.MORE); + } + + /** + *This action is implemented by calling {@link Lexer//popMode}.
+ */ + execute(lexer) { + lexer.more(); + } + + toString() { + return "more"; + } +} + +LexerMoreAction.INSTANCE = new LexerMoreAction(); + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerTypeAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Implements the {@code type} lexer action by calling {@link Lexer//setType} + * with the assigned type + */ + +class LexerTypeAction extends LexerAction { + constructor(type) { + super(LexerActionType.TYPE); + this.type = type; + } + + execute(lexer) { + lexer.type = this.type; + } + + updateHashCode(hash) { + hash.update(this.actionType, this.type); + } + + equals(other) { + if(this === other) { + return true; + } else if (! (other instanceof LexerTypeAction)) { + return false; + } else { + return this.type === other.type; + } + } + + toString() { + return "type(" + this.type + ")"; + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerPushModeAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Implements the {@code pushMode} lexer action by calling + * {@link Lexer//pushMode} with the assigned mode + */ +class LexerPushModeAction extends LexerAction { + constructor(mode) { + super(LexerActionType.PUSH_MODE); + this.mode = mode; + } + + /** + *This action is implemented by calling {@link Lexer//pushMode} with the + * value provided by {@link //getMode}.
+ */ + execute(lexer) { + lexer.pushMode(this.mode); + } + + updateHashCode(hash) { + hash.update(this.actionType, this.mode); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerPushModeAction)) { + return false; + } else { + return this.mode === other.mode; + } + } + + toString() { + return "pushMode(" + this.mode + ")"; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerPopModeAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. + * + *The {@code popMode} command does not have any parameters, so this action is + * implemented as a singleton instance exposed by {@link //INSTANCE}.
+ */ +class LexerPopModeAction extends LexerAction { + constructor() { + super(LexerActionType.POP_MODE); + } + + /** + *This action is implemented by calling {@link Lexer//popMode}.
+ */ + execute(lexer) { + lexer.popMode(); + } + + toString() { + return "popMode"; + } +} + +LexerPopModeAction.INSTANCE = new LexerPopModeAction(); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerModeAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Implements the {@code mode} lexer action by calling {@link Lexer//mode} with + * the assigned mode + */ +class LexerModeAction extends LexerAction { + constructor(mode) { + super(LexerActionType.MODE); + this.mode = mode; + } + + /** + *This action is implemented by calling {@link Lexer//mode} with the + * value provided by {@link //getMode}.
+ */ + execute(lexer) { + lexer.mode(this.mode); + } + + updateHashCode(hash) { + hash.update(this.actionType, this.mode); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerModeAction)) { + return false; + } else { + return this.mode === other.mode; + } + } + + toString() { + return "mode(" + this.mode + ")"; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/ATNDeserializer.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +const SERIALIZED_VERSION = 4; + +function initArray( length, value) { + const tmp = []; + tmp[length-1] = value; + return tmp.map(function(i) {return value;}); +} + +class ATNDeserializer { + constructor(options) { + + if ( options=== undefined || options === null ) { + options = ATNDeserializationOptions.defaultOptions; + } + this.deserializationOptions = options; + this.stateFactories = null; + this.actionFactories = null; + } + + deserialize(data) { + const legacy = this.reset(data); + this.checkVersion(legacy); + if(legacy) + this.skipUUID(); + const atn = this.readATN(); + this.readStates(atn, legacy); + this.readRules(atn, legacy); + this.readModes(atn); + const sets = []; + this.readSets(atn, sets, this.readInt.bind(this)); + if(legacy) + this.readSets(atn, sets, this.readInt32.bind(this)); + this.readEdges(atn, sets); + this.readDecisions(atn); + this.readLexerActions(atn, legacy); + this.markPrecedenceDecisions(atn); + this.verifyATN(atn); + if (this.deserializationOptions.generateRuleBypassTransitions && atn.grammarType === ATNType.PARSER ) { + this.generateRuleBypassTransitions(atn); + // re-verify after modification + this.verifyATN(atn); + } + return atn; + } + + reset(data) { + const version = data.charCodeAt ? data.charCodeAt(0) : data[0]; + if(version === SERIALIZED_VERSION - 1) { + const adjust = function (c) { + const v = c.charCodeAt(0); + return v > 1 ? v - 2 : v + 65534; + }; + const temp = data.split("").map(adjust); + // don't adjust the first value since that's the version number + temp[0] = data.charCodeAt(0); + this.data = temp; + this.pos = 0; + return true; + } else { + this.data = data + this.pos = 0; + return false; + } + } + + skipUUID() { + let count = 0; + while(count++ < 8) + this.readInt(); + } + + checkVersion(legacy) { + const version = this.readInt(); + if ( !legacy && version !== SERIALIZED_VERSION ) { + throw ("Could not deserialize ATN with version " + version + " (expected " + SERIALIZED_VERSION + ")."); + } + } + + readATN() { + const grammarType = this.readInt(); + const maxTokenType = this.readInt(); + return new ATN(grammarType, maxTokenType); + } + + readStates(atn, legacy) { + let j, pair, stateNumber; + const loopBackStateNumbers = []; + const endStateNumbers = []; + const nstates = this.readInt(); + for(let i=0; iThis method updates {@link //dipsIntoOuterContext} and + * {@link //hasSemanticContext} when necessary.
+ */ + add(config, mergeCache) { + if (mergeCache === undefined) { + mergeCache = null; + } + if (this.readOnly) { + throw "This set is readonly"; + } + if (config.semanticContext !== SemanticContext.NONE) { + this.hasSemanticContext = true; + } + if (config.reachesIntoOuterContext > 0) { + this.dipsIntoOuterContext = true; + } + const existing = this.configLookup.add(config); + if (existing === config) { + this.cachedHashCode = -1; + this.configs.push(config); // track order here + return true; + } + // a previous (s,i,pi,_), merge with it and save result + const rootIsWildcard = !this.fullCtx; + const merged = merge(existing.context, config.context, rootIsWildcard, mergeCache); + /** + * no need to check for existing.context, config.context in cache + * since only way to create new graphs is "call rule" and here. We + * cache at both places + */ + existing.reachesIntoOuterContext = Math.max( existing.reachesIntoOuterContext, config.reachesIntoOuterContext); + // make sure to preserve the precedence filter suppression during the merge + if (config.precedenceFilterSuppressed) { + existing.precedenceFilterSuppressed = true; + } + existing.context = merged; // replace context; no need to alt mapping + return true; + } + + getStates() { + const states = new HashSet(); + for (let i = 0; i < this.configs.length; i++) { + states.add(this.configs[i].state); + } + return states; + } + + getPredicates() { + const preds = []; + for (let i = 0; i < this.configs.length; i++) { + const c = this.configs[i].semanticContext; + if (c !== SemanticContext.NONE) { + preds.push(c.semanticContext); + } + } + return preds; + } + + optimizeConfigs(interpreter) { + if (this.readOnly) { + throw "This set is readonly"; + } + if (this.configLookup.length === 0) { + return; + } + for (let i = 0; i < this.configs.length; i++) { + const config = this.configs[i]; + config.context = interpreter.getCachedContext(config.context); + } + } + + addAll(coll) { + for (let i = 0; i < coll.length; i++) { + this.add(coll[i]); + } + return false; + } + + equals(other) { + return this === other || + (other instanceof ATNConfigSet && + equalArrays(this.configs, other.configs) && + this.fullCtx === other.fullCtx && + this.uniqueAlt === other.uniqueAlt && + this.conflictingAlts === other.conflictingAlts && + this.hasSemanticContext === other.hasSemanticContext && + this.dipsIntoOuterContext === other.dipsIntoOuterContext); + } + + hashCode() { + const hash = new HashCode(); + hash.update(this.configs); + return hash.finish(); + } + + updateHashCode(hash) { + if (this.readOnly) { + if (this.cachedHashCode === -1) { + this.cachedHashCode = this.hashCode(); + } + hash.update(this.cachedHashCode); + } else { + hash.update(this.hashCode()); + } + } + + isEmpty() { + return this.configs.length === 0; + } + + contains(item) { + if (this.configLookup === null) { + throw "This method is not implemented for readonly sets."; + } + return this.configLookup.contains(item); + } + + containsFast(item) { + if (this.configLookup === null) { + throw "This method is not implemented for readonly sets."; + } + return this.configLookup.containsFast(item); + } + + clear() { + if (this.readOnly) { + throw "This set is readonly"; + } + this.configs = []; + this.cachedHashCode = -1; + this.configLookup = new HashSet(); + } + + setReadonly(readOnly) { + this.readOnly = readOnly; + if (readOnly) { + this.configLookup = null; // can't mod, no need for lookup cache + } + } + + toString() { + return arrayToString(this.configs) + + (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + + (this.uniqueAlt !== ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + + (this.conflictingAlts !== null ? ",conflictingAlts=" + this.conflictingAlts : "") + + (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : ""); + } + + get items(){ + return this.configs; + } + + get length(){ + return this.configs.length; + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/dfa/DFAState.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + +/** + * A DFA state represents a set of possible ATN configurations. + * As Aho, Sethi, Ullman p. 117 says "The DFA uses its state + * to keep track of all possible states the ATN can be in after + * reading each input symbol. That is to say, after reading + * input a1a2..an, the DFA is in a state that represents the + * subset T of the states of the ATN that are reachable from the + * ATN's start state along some path labeled a1a2..an." + * In conventional NFA→DFA conversion, therefore, the subset T + * would be a bitset representing the set of states the + * ATN could be in. We need to track the alt predicted by each + * state as well, however. More importantly, we need to maintain + * a stack of states, tracking the closure operations as they + * jump from rule to rule, emulating rule invocations (method calls). + * I have to add a stack to simulate the proper lookahead sequences for + * the underlying LL grammar from which the ATN was derived. + * + *I use a set of ATNConfig objects not simple states. An ATNConfig + * is both a state (ala normal conversion) and a RuleContext describing + * the chain of rules (if any) followed to arrive at that state.
+ * + *A DFA state may have multiple references to a particular state, + * but with different ATN contexts (with same or different alts) + * meaning that state was reached via a different set of rule invocations.
+ */ +class DFAState { + constructor(stateNumber, configs) { + if (stateNumber === null) { + stateNumber = -1; + } + if (configs === null) { + configs = new ATNConfigSet(); + } + this.stateNumber = stateNumber; + this.configs = configs; + /** + * {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) + * {@link Token//EOF} maps to {@code edges[0]}. + */ + this.edges = null; + this.isAcceptState = false; + /** + * if accept state, what ttype do we match or alt do we predict? + * This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link//predicates} + * {@code !=null} or {@link //requiresFullContext}. + */ + this.prediction = 0; + this.lexerActionExecutor = null; + /** + * Indicates that this state was created during SLL prediction that + * discovered a conflict between the configurations in the state. Future + * {@link ParserATNSimulator//execATN} invocations immediately jumped doing + * full context prediction if this field is true. + */ + this.requiresFullContext = false; + /** + * During SLL parsing, this is a list of predicates associated with the + * ATN configurations of the DFA state. When we have predicates, + * {@link //requiresFullContext} is {@code false} since full context + * prediction evaluates predicates + * on-the-fly. If this is not null, then {@link //prediction} is + * {@link ATN//INVALID_ALT_NUMBER}. + * + *We only use these for non-{@link //requiresFullContext} but + * conflicting states. That + * means we know from the context (it's $ or we don't dip into outer + * context) that it's an ambiguity not a conflict.
+ * + *This list is computed by {@link + * ParserATNSimulator//predicateDFAState}.
+ */ + this.predicates = null; + return this; + } + + /** + * Get the set of all alts mentioned by all ATN configurations in this + * DFA state. + */ + getAltSet() { + const alts = new HashSet(); + if (this.configs !== null) { + for (let i = 0; i < this.configs.length; i++) { + const c = this.configs[i]; + alts.add(c.alt); + } + } + if (alts.length === 0) { + return null; + } else { + return alts; + } + } + + /** + * Two {@link DFAState} instances are equal if their ATN configuration sets + * are the same. This method is used to see if a state already exists. + * + *Because the number of alternatives and number of ATN configurations are + * finite, there is a finite number of DFA states that can be processed. + * This is necessary to show that the algorithm terminates.
+ * + *Cannot test the DFA state numbers here because in + * {@link ParserATNSimulator//addDFAState} we need to know if any other state + * exists that has this exact set of ATN configurations. The + * {@link //stateNumber} is irrelevant.
+ */ + equals(other) { + // compare set of ATN configurations in this set with other + return this === other || + (other instanceof DFAState && + this.configs.equals(other.configs)); + } + + toString() { + let s = "" + this.stateNumber + ":" + this.configs; + if(this.isAcceptState) { + s = s + "=>"; + if (this.predicates !== null) + s = s + this.predicates; + else + s = s + this.prediction; + } + return s; + } + + hashCode() { + const hash = new HashCode(); + hash.update(this.configs); + return hash.finish(); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/ATNSimulator.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + +class ATNSimulator { + constructor(atn, sharedContextCache) { + /** + * The context cache maps all PredictionContext objects that are == + * to a single cached copy. This cache is shared across all contexts + * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + * to use only cached nodes/graphs in addDFAState(). We don't want to + * fill this during closure() since there are lots of contexts that + * pop up but are not used ever again. It also greatly slows down closure(). + * + *This cache makes a huge difference in memory and a little bit in speed. + * For the Java grammar on java.*, it dropped the memory requirements + * at the end from 25M to 16M. We don't store any of the full context + * graphs in the DFA because they are limited to local context only, + * but apparently there's a lot of repetition there as well. We optimize + * the config contexts before storing the config set in the DFA states + * by literally rebuilding them with cached subgraphs only.
+ * + *I tried a cache for use during closure operations, that was + * whacked after each adaptivePredict(). It cost a little bit + * more time I think and doesn't save on the overall footprint + * so it's not worth the complexity.
+ */ + this.atn = atn; + this.sharedContextCache = sharedContextCache; + return this; + } + + getCachedContext(context) { + if (this.sharedContextCache ===null) { + return context; + } + const visited = new HashMap_HashMap(); + return getCachedPredictionContext(context, this.sharedContextCache, visited); + } +} + +// Must distinguish between missing edge and edge we know leads nowhere/// +ATNSimulator.ERROR = new DFAState(0x7FFFFFFF, new ATNConfigSet()); + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/OrderedATNConfigSet.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +class OrderedATNConfigSet extends ATNConfigSet { + constructor() { + super(); + this.configLookup = new HashSet(); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/LexerATNConfig.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +class LexerATNConfig extends ATNConfig { + constructor(params, config) { + super(params, config); + + // This is the backing field for {@link //getLexerActionExecutor}. + const lexerActionExecutor = params.lexerActionExecutor || null; + this.lexerActionExecutor = lexerActionExecutor || (config!==null ? config.lexerActionExecutor : null); + this.passedThroughNonGreedyDecision = config!==null ? this.checkNonGreedyDecision(config, this.state) : false; + this.hashCodeForConfigSet = LexerATNConfig.prototype.hashCode; + this.equalsForConfigSet = LexerATNConfig.prototype.equals; + return this; + } + + updateHashCode(hash) { + hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext, this.passedThroughNonGreedyDecision, this.lexerActionExecutor); + } + + equals(other) { + return this === other || + (other instanceof LexerATNConfig && + this.passedThroughNonGreedyDecision === other.passedThroughNonGreedyDecision && + (this.lexerActionExecutor ? this.lexerActionExecutor.equals(other.lexerActionExecutor) : !other.lexerActionExecutor) && + super.equals(other)); + } + + checkNonGreedyDecision(source, target) { + return source.passedThroughNonGreedyDecision || + (target instanceof DecisionState) && target.nonGreedy; + } +} + + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/action/LexerIndexedCustomAction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +/** + * This implementation of {@link LexerAction} is used for tracking input offsets + * for position-dependent actions within a {@link LexerActionExecutor}. + * + *This action is not serialized as part of the ATN, and is only required for + * position-dependent lexer actions which appear at a location other than the + * end of a rule. For more information about DFA optimizations employed for + * lexer actions, see {@link LexerActionExecutor//append} and + * {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+ * + * Constructs a new indexed custom action by associating a character offset + * with a {@link LexerAction}. + * + *Note: This class is only required for lexer actions for which + * {@link LexerAction//isPositionDependent} returns {@code true}.
+ * + * @param offset The offset into the input {@link CharStream}, relative to + * the token start index, at which the specified lexer action should be + * executed. + * @param action The lexer action to execute at a particular offset in the + * input {@link CharStream}. + */ + + + +class LexerIndexedCustomAction extends LexerAction { + constructor(offset, action) { + super(action.actionType); + this.offset = offset; + this.action = action; + this.isPositionDependent = true; + } + + /** + *This method calls {@link //execute} on the result of {@link //getAction} + * using the provided {@code lexer}.
+ */ + execute(lexer) { + // assume the input stream position was properly set by the calling code + this.action.execute(lexer); + } + + updateHashCode(hash) { + hash.update(this.actionType, this.offset, this.action); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerIndexedCustomAction)) { + return false; + } else { + return this.offset === other.offset && this.action === other.action; + } + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/LexerActionExecutor.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +class LexerActionExecutor { + /** + * Represents an executor for a sequence of lexer actions which traversed during + * the matching operation of a lexer rule (token). + * + *The executor tracks position information for position-dependent lexer actions + * efficiently, ensuring that actions appearing only at the end of the rule do + * not cause bloating of the {@link DFA} created for the lexer.
+ */ + constructor(lexerActions) { + this.lexerActions = lexerActions === null ? [] : lexerActions; + /** + * Caches the result of {@link //hashCode} since the hash code is an element + * of the performance-critical {@link LexerATNConfig//hashCode} operation + */ + this.cachedHashCode = HashCode.hashStuff(lexerActions); // "".join([str(la) for la in + // lexerActions])) + return this; + } + + /** + * Creates a {@link LexerActionExecutor} which encodes the current offset + * for position-dependent lexer actions. + * + *Normally, when the executor encounters lexer actions where + * {@link LexerAction//isPositionDependent} returns {@code true}, it calls + * {@link IntStream//seek} on the input {@link CharStream} to set the input + * position to the end of the current token. This behavior provides + * for efficient DFA representation of lexer actions which appear at the end + * of a lexer rule, even when the lexer rule matches a variable number of + * characters.
+ * + *Prior to traversing a match transition in the ATN, the current offset + * from the token start index is assigned to all position-dependent lexer + * actions which have not already been assigned a fixed offset. By storing + * the offsets relative to the token start index, the DFA representation of + * lexer actions which appear in the middle of tokens remains efficient due + * to sharing among tokens of the same length, regardless of their absolute + * position in the input stream.
+ * + *If the current executor already has offsets assigned to all + * position-dependent lexer actions, the method returns {@code this}.
+ * + * @param offset The current offset to assign to all position-dependent + * lexer actions which do not already have offsets assigned. + * + * @return {LexerActionExecutor} A {@link LexerActionExecutor} which stores input stream offsets + * for all position-dependent lexer actions. + */ + fixOffsetBeforeMatch(offset) { + let updatedLexerActions = null; + for (let i = 0; i < this.lexerActions.length; i++) { + if (this.lexerActions[i].isPositionDependent && + !(this.lexerActions[i] instanceof LexerIndexedCustomAction)) { + if (updatedLexerActions === null) { + updatedLexerActions = this.lexerActions.concat([]); + } + updatedLexerActions[i] = new LexerIndexedCustomAction(offset, + this.lexerActions[i]); + } + } + if (updatedLexerActions === null) { + return this; + } else { + return new LexerActionExecutor(updatedLexerActions); + } + } + + /** + * Execute the actions encapsulated by this executor within the context of a + * particular {@link Lexer}. + * + *This method calls {@link IntStream//seek} to set the position of the + * {@code input} {@link CharStream} prior to calling + * {@link LexerAction//execute} on a position-dependent action. Before the + * method returns, the input position will be restored to the same position + * it was in when the method was invoked.
+ * + * @param lexer The lexer instance. + * @param input The input stream which is the source for the current token. + * When this method is called, the current {@link IntStream//index} for + * {@code input} should be the start of the following token, i.e. 1 + * character past the end of the current token. + * @param startIndex The token start index. This value may be passed to + * {@link IntStream//seek} to set the {@code input} position to the beginning + * of the token. + */ + execute(lexer, input, startIndex) { + let requiresSeek = false; + const stopIndex = input.index; + try { + for (let i = 0; i < this.lexerActions.length; i++) { + let lexerAction = this.lexerActions[i]; + if (lexerAction instanceof LexerIndexedCustomAction) { + const offset = lexerAction.offset; + input.seek(startIndex + offset); + lexerAction = lexerAction.action; + requiresSeek = (startIndex + offset) !== stopIndex; + } else if (lexerAction.isPositionDependent) { + input.seek(stopIndex); + requiresSeek = false; + } + lexerAction.execute(lexer); + } + } finally { + if (requiresSeek) { + input.seek(stopIndex); + } + } + } + + hashCode() { + return this.cachedHashCode; + } + + updateHashCode(hash) { + hash.update(this.cachedHashCode); + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof LexerActionExecutor)) { + return false; + } else if (this.cachedHashCode != other.cachedHashCode) { + return false; + } else if (this.lexerActions.length != other.lexerActions.length) { + return false; + } else { + const numActions = this.lexerActions.length + for (let idx = 0; idx < numActions; ++idx) { + if (!this.lexerActions[idx].equals(other.lexerActions[idx])) { + return false; + } + } + return true; + } + } + + /** + * Creates a {@link LexerActionExecutor} which executes the actions for + * the input {@code lexerActionExecutor} followed by a specified + * {@code lexerAction}. + * + * @param lexerActionExecutor The executor for actions already traversed by + * the lexer while matching a token within a particular + * {@link LexerATNConfig}. If this is {@code null}, the method behaves as + * though it were an empty executor. + * @param lexerAction The lexer action to execute after the actions + * specified in {@code lexerActionExecutor}. + * + * @return {LexerActionExecutor} A {@link LexerActionExecutor} for executing the combine actions + * of {@code lexerActionExecutor} and {@code lexerAction}. + */ + static append(lexerActionExecutor, lexerAction) { + if (lexerActionExecutor === null) { + return new LexerActionExecutor([ lexerAction ]); + } + const lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]); + return new LexerActionExecutor(lexerActions); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/LexerATNSimulator.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + + + + + + + + + +function resetSimState(sim) { + sim.index = -1; + sim.line = 0; + sim.column = -1; + sim.dfaState = null; +} + +class SimState { + constructor() { + resetSimState(this); + } + + reset() { + resetSimState(this); + } +} + +class LexerATNSimulator extends ATNSimulator { + /** + * When we hit an accept state in either the DFA or the ATN, we + * have to notify the character stream to start buffering characters + * via {@link IntStream//mark} and record the current state. The current sim state + * includes the current index into the input, the current line, + * and current character position in that line. Note that the Lexer is + * tracking the starting line and characterization of the token. These + * variables track the "state" of the simulator when it hits an accept state. + * + *We track these variables separately for the DFA and ATN simulation + * because the DFA simulation often has to fail over to the ATN + * simulation. If the ATN simulation fails, we need the DFA to fall + * back to its previously accepted state, if any. If the ATN succeeds, + * then the ATN does the accept and the DFA simulator that invoked it + * can simply return the predicted token type.
+ */ + constructor(recog, atn, decisionToDFA, sharedContextCache) { + super(atn, sharedContextCache); + this.decisionToDFA = decisionToDFA; + this.recog = recog; + /** + * The current token's starting index into the character stream. + * Shared across DFA to ATN simulation in case the ATN fails and the + * DFA did not have a previous accept state. In this case, we use the + * ATN-generated exception object + */ + this.startIndex = -1; + // line number 1..n within the input/// + this.line = 1; + /** + * The index of the character relative to the beginning of the line + * 0..n-1 + */ + this.column = 0; + this.mode = Lexer.DEFAULT_MODE; + /** + * Used during DFA/ATN exec to record the most recent accept configuration + * info + */ + this.prevAccept = new SimState(); + } + + copyState(simulator) { + this.column = simulator.column; + this.line = simulator.line; + this.mode = simulator.mode; + this.startIndex = simulator.startIndex; + } + + match(input, mode) { + this.mode = mode; + const mark = input.mark(); + try { + this.startIndex = input.index; + this.prevAccept.reset(); + const dfa = this.decisionToDFA[mode]; + if (dfa.s0 === null) { + return this.matchATN(input); + } else { + return this.execATN(input, dfa.s0); + } + } finally { + input.release(mark); + } + } + + reset() { + this.prevAccept.reset(); + this.startIndex = -1; + this.line = 1; + this.column = 0; + this.mode = Lexer.DEFAULT_MODE; + } + + matchATN(input) { + const startState = this.atn.modeToStartState[this.mode]; + + if (LexerATNSimulator.debug) { + console.log("matchATN mode " + this.mode + " start: " + startState); + } + const old_mode = this.mode; + const s0_closure = this.computeStartState(input, startState); + const suppressEdge = s0_closure.hasSemanticContext; + s0_closure.hasSemanticContext = false; + + const next = this.addDFAState(s0_closure); + if (!suppressEdge) { + this.decisionToDFA[this.mode].s0 = next; + } + + const predict = this.execATN(input, next); + + if (LexerATNSimulator.debug) { + console.log("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString()); + } + return predict; + } + + execATN(input, ds0) { + if (LexerATNSimulator.debug) { + console.log("start state closure=" + ds0.configs); + } + if (ds0.isAcceptState) { + // allow zero-length tokens + this.captureSimState(this.prevAccept, input, ds0); + } + let t = input.LA(1); + let s = ds0; // s is current/from DFA state + + for (; ;) { // while more work + if (LexerATNSimulator.debug) { + console.log("execATN loop starting closure: " + s.configs); + } + + /** + * As we move src->trg, src->trg, we keep track of the previous trg to + * avoid looking up the DFA state again, which is expensive. + * If the previous target was already part of the DFA, we might + * be able to avoid doing a reach operation upon t. If s!=null, + * it means that semantic predicates didn't prevent us from + * creating a DFA state. Once we know s!=null, we check to see if + * the DFA state has an edge already for t. If so, we can just reuse + * it's configuration set; there's no point in re-computing it. + * This is kind of like doing DFA simulation within the ATN + * simulation because DFA simulation is really just a way to avoid + * computing reach/closure sets. Technically, once we know that + * we have a previously added DFA state, we could jump over to + * the DFA simulator. But, that would mean popping back and forth + * a lot and making things more complicated algorithmically. + * This optimization makes a lot of sense for loops within DFA. + * A character will take us back to an existing DFA state + * that already has lots of edges out of it. e.g., .* in comments. + * print("Target for:" + str(s) + " and:" + str(t)) + */ + let target = this.getExistingTargetState(s, t); + // print("Existing:" + str(target)) + if (target === null) { + target = this.computeTargetState(input, s, t); + // print("Computed:" + str(target)) + } + if (target === ATNSimulator.ERROR) { + break; + } + // If this is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if (t !== Token.EOF) { + this.consume(input); + } + if (target.isAcceptState) { + this.captureSimState(this.prevAccept, input, target); + if (t === Token.EOF) { + break; + } + } + t = input.LA(1); + s = target; // flip; current DFA target becomes new src/from state + } + return this.failOrAccept(this.prevAccept, input, s.configs, t); + } + + /** + * Get an existing target state for an edge in the DFA. If the target state + * for the edge has not yet been computed or is otherwise not available, + * this method returns {@code null}. + * + * @param s The current DFA state + * @param t The next input symbol + * @return The existing target DFA state for the given input symbol + * {@code t}, or {@code null} if the target state for this edge is not + * already cached + */ + getExistingTargetState(s, t) { + if (s.edges === null || t < LexerATNSimulator.MIN_DFA_EDGE || t > LexerATNSimulator.MAX_DFA_EDGE) { + return null; + } + + let target = s.edges[t - LexerATNSimulator.MIN_DFA_EDGE]; + if (target === undefined) { + target = null; + } + if (LexerATNSimulator.debug && target !== null) { + console.log("reuse state " + s.stateNumber + " edge to " + target.stateNumber); + } + return target; + } + + /** + * Compute a target state for an edge in the DFA, and attempt to add the + * computed state and corresponding edge to the DFA. + * + * @param input The input stream + * @param s The current DFA state + * @param t The next input symbol + * + * @return The computed target DFA state for the given input symbol + * {@code t}. If {@code t} does not lead to a valid DFA state, this method + * returns {@link //ERROR}. + */ + computeTargetState(input, s, t) { + const reach = new OrderedATNConfigSet(); + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + this.getReachableConfigSet(input, s.configs, reach, t); + + if (reach.items.length === 0) { // we got nowhere on t from s + if (!reach.hasSemanticContext) { + // we got nowhere on t, don't throw out this knowledge; it'd + // cause a failover from DFA later. + this.addDFAEdge(s, t, ATNSimulator.ERROR); + } + // stop when we can't match any more char + return ATNSimulator.ERROR; + } + // Add an edge from s to target DFA found/created for reach + return this.addDFAEdge(s, t, null, reach); + } + + failOrAccept(prevAccept, input, reach, t) { + if (this.prevAccept.dfaState !== null) { + const lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor; + this.accept(input, lexerActionExecutor, this.startIndex, + prevAccept.index, prevAccept.line, prevAccept.column); + return prevAccept.dfaState.prediction; + } else { + // if no accept and EOF is first char, return EOF + if (t === Token.EOF && input.index === this.startIndex) { + return Token.EOF; + } + throw new LexerNoViableAltException(this.recog, input, this.startIndex, reach); + } + } + + /** + * Given a starting configuration set, figure out all ATN configurations + * we can reach upon input {@code t}. Parameter {@code reach} is a return + * parameter. + */ + getReachableConfigSet(input, closure, reach, t) { + // this is used to skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + let skipAlt = ATN.INVALID_ALT_NUMBER; + for (let i = 0; i < closure.items.length; i++) { + const cfg = closure.items[i]; + const currentAltReachedAcceptState = (cfg.alt === skipAlt); + if (currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision) { + continue; + } + if (LexerATNSimulator.debug) { + console.log("testing %s at %s\n", this.getTokenName(t), cfg + .toString(this.recog, true)); + } + for (let j = 0; j < cfg.state.transitions.length; j++) { + const trans = cfg.state.transitions[j]; // for each transition + const target = this.getReachableTarget(trans, t); + if (target !== null) { + let lexerActionExecutor = cfg.lexerActionExecutor; + if (lexerActionExecutor !== null) { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - this.startIndex); + } + const treatEofAsEpsilon = (t === Token.EOF); + const config = new LexerATNConfig({state: target, lexerActionExecutor: lexerActionExecutor}, cfg); + if (this.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEofAsEpsilon)) { + // any remaining configs for this alt have a lower priority + // than the one that just reached an accept state. + skipAlt = cfg.alt; + } + } + } + } + } + + accept(input, lexerActionExecutor, startIndex, index, line, charPos) { + if (LexerATNSimulator.debug) { + console.log("ACTION %s\n", lexerActionExecutor); + } + // seek to after last char in token + input.seek(index); + this.line = line; + this.column = charPos; + if (lexerActionExecutor !== null && this.recog !== null) { + lexerActionExecutor.execute(this.recog, input, startIndex); + } + } + + getReachableTarget(trans, t) { + if (trans.matches(t, 0, Lexer.MAX_CHAR_VALUE)) { + return trans.target; + } else { + return null; + } + } + + computeStartState(input, p) { + const initialContext = PredictionContext.EMPTY; + const configs = new OrderedATNConfigSet(); + for (let i = 0; i < p.transitions.length; i++) { + const target = p.transitions[i].target; + const cfg = new LexerATNConfig({state: target, alt: i + 1, context: initialContext}, null); + this.closure(input, cfg, configs, false, false, false); + } + return configs; + } + + /** + * Since the alternatives within any lexer decision are ordered by + * preference, this method stops pursuing the closure as soon as an accept + * state is reached. After the first accept state is reached by depth-first + * search from {@code config}, all other (potentially reachable) states for + * this rule would have a lower priority. + * + * @return {Boolean} {@code true} if an accept state is reached, otherwise + * {@code false}. + */ + closure(input, config, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon) { + let cfg = null; + if (LexerATNSimulator.debug) { + console.log("closure(" + config.toString(this.recog, true) + ")"); + } + if (config.state instanceof RuleStopState) { + if (LexerATNSimulator.debug) { + if (this.recog !== null) { + console.log("closure at %s rule stop %s\n", this.recog.ruleNames[config.state.ruleIndex], config); + } else { + console.log("closure at rule stop %s\n", config); + } + } + if (config.context === null || config.context.hasEmptyPath()) { + if (config.context === null || config.context.isEmpty()) { + configs.add(config); + return true; + } else { + configs.add(new LexerATNConfig({state: config.state, context: PredictionContext.EMPTY}, config)); + currentAltReachedAcceptState = true; + } + } + if (config.context !== null && !config.context.isEmpty()) { + for (let i = 0; i < config.context.length; i++) { + if (config.context.getReturnState(i) !== PredictionContext.EMPTY_RETURN_STATE) { + const newContext = config.context.getParent(i); // "pop" return state + const returnState = this.atn.states[config.context.getReturnState(i)]; + cfg = new LexerATNConfig({state: returnState, context: newContext}, config); + currentAltReachedAcceptState = this.closure(input, cfg, + configs, currentAltReachedAcceptState, speculative, + treatEofAsEpsilon); + } + } + } + return currentAltReachedAcceptState; + } + // optimization + if (!config.state.epsilonOnlyTransitions) { + if (!currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision) { + configs.add(config); + } + } + for (let j = 0; j < config.state.transitions.length; j++) { + const trans = config.state.transitions[j]; + cfg = this.getEpsilonTarget(input, config, trans, configs, speculative, treatEofAsEpsilon); + if (cfg !== null) { + currentAltReachedAcceptState = this.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon); + } + } + return currentAltReachedAcceptState; + } + + // side-effect: can alter configs.hasSemanticContext + getEpsilonTarget(input, config, trans, + configs, speculative, treatEofAsEpsilon) { + let cfg = null; + if (trans.serializationType === Transition.RULE) { + const newContext = SingletonPredictionContext.create(config.context, trans.followState.stateNumber); + cfg = new LexerATNConfig({state: trans.target, context: newContext}, config); + } else if (trans.serializationType === Transition.PRECEDENCE) { + throw "Precedence predicates are not supported in lexers."; + } else if (trans.serializationType === Transition.PREDICATE) { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for this "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // this predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? ; + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since this is when we + // test them, we cannot cash the DFA state target of ID. + + if (LexerATNSimulator.debug) { + console.log("EVAL rule " + trans.ruleIndex + ":" + trans.predIndex); + } + configs.hasSemanticContext = true; + if (this.evaluatePredicate(input, trans.ruleIndex, trans.predIndex, speculative)) { + cfg = new LexerATNConfig({state: trans.target}, config); + } + } else if (trans.serializationType === Transition.ACTION) { + if (config.context === null || config.context.hasEmptyPath()) { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In this case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + const lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor, + this.atn.lexerActions[trans.actionIndex]); + cfg = new LexerATNConfig({state: trans.target, lexerActionExecutor: lexerActionExecutor}, config); + } else { + // ignore actions in referenced rules + cfg = new LexerATNConfig({state: trans.target}, config); + } + } else if (trans.serializationType === Transition.EPSILON) { + cfg = new LexerATNConfig({state: trans.target}, config); + } else if (trans.serializationType === Transition.ATOM || + trans.serializationType === Transition.RANGE || + trans.serializationType === Transition.SET) { + if (treatEofAsEpsilon) { + if (trans.matches(Token.EOF, 0, Lexer.MAX_CHAR_VALUE)) { + cfg = new LexerATNConfig({state: trans.target}, config); + } + } + } + return cfg; + } + + /** + * Evaluate a predicate specified in the lexer. + * + *If {@code speculative} is {@code true}, this method was called before + * {@link //consume} for the matched character. This method should call + * {@link //consume} before evaluating the predicate to ensure position + * sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine}, + * and {@link Lexer//getcolumn}, properly reflect the current + * lexer state. This method should restore {@code input} and the simulator + * to the original state before returning (i.e. undo the actions made by the + * call to {@link //consume}.
+ * + * @param input The input stream. + * @param ruleIndex The rule containing the predicate. + * @param predIndex The index of the predicate within the rule. + * @param speculative {@code true} if the current index in {@code input} is + * one character before the predicate's location. + * + * @return {@code true} if the specified predicate evaluates to + * {@code true}. + */ + evaluatePredicate(input, ruleIndex, + predIndex, speculative) { + // assume true if no recognizer was provided + if (this.recog === null) { + return true; + } + if (!speculative) { + return this.recog.sempred(null, ruleIndex, predIndex); + } + const savedcolumn = this.column; + const savedLine = this.line; + const index = input.index; + const marker = input.mark(); + try { + this.consume(input); + return this.recog.sempred(null, ruleIndex, predIndex); + } finally { + this.column = savedcolumn; + this.line = savedLine; + input.seek(index); + input.release(marker); + } + } + + captureSimState(settings, input, dfaState) { + settings.index = input.index; + settings.line = this.line; + settings.column = this.column; + settings.dfaState = dfaState; + } + + addDFAEdge(from_, tk, to, cfgs) { + if (to === undefined) { + to = null; + } + if (cfgs === undefined) { + cfgs = null; + } + if (to === null && cfgs !== null) { + // leading to this call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes this edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to resynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // / + const suppressEdge = cfgs.hasSemanticContext; + cfgs.hasSemanticContext = false; + + to = this.addDFAState(cfgs); + + if (suppressEdge) { + return to; + } + } + // add the edge + if (tk < LexerATNSimulator.MIN_DFA_EDGE || tk > LexerATNSimulator.MAX_DFA_EDGE) { + // Only track edges within the DFA bounds + return to; + } + if (LexerATNSimulator.debug) { + console.log("EDGE " + from_ + " -> " + to + " upon " + tk); + } + if (from_.edges === null) { + // make room for tokens 1..n and -1 masquerading as index 0 + from_.edges = []; + } + from_.edges[tk - LexerATNSimulator.MIN_DFA_EDGE] = to; // connect + + return to; + } + + /** + * Add a new DFA state if there isn't one with this set of + * configurations already. This method also detects the first + * configuration containing an ATN rule stop state. Later, when + * traversing the DFA, we will know which rule to accept. + */ + addDFAState(configs) { + const proposed = new DFAState(null, configs); + let firstConfigWithRuleStopState = null; + for (let i = 0; i < configs.items.length; i++) { + const cfg = configs.items[i]; + if (cfg.state instanceof RuleStopState) { + firstConfigWithRuleStopState = cfg; + break; + } + } + if (firstConfigWithRuleStopState !== null) { + proposed.isAcceptState = true; + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor; + proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; + } + const dfa = this.decisionToDFA[this.mode]; + const existing = dfa.states.get(proposed); + if (existing !== null) { + return existing; + } + const newState = proposed; + newState.stateNumber = dfa.states.length; + configs.setReadonly(true); + newState.configs = configs; + dfa.states.add(newState); + return newState; + } + + getDFA(mode) { + return this.decisionToDFA[mode]; + } + +// Get the text matched so far for the current token. + getText(input) { + // index is first lookahead char, don't include. + return input.getText(this.startIndex, input.index - 1); + } + + consume(input) { + const curChar = input.LA(1); + if (curChar === "\n".charCodeAt(0)) { + this.line += 1; + this.column = 0; + } else { + this.column += 1; + } + input.consume(); + } + + getTokenName(tt) { + if (tt === -1) { + return "EOF"; + } else { + return "'" + String.fromCharCode(tt) + "'"; + } + } +} + +LexerATNSimulator.debug = false; +LexerATNSimulator.dfa_debug = false; + +LexerATNSimulator.MIN_DFA_EDGE = 0; +LexerATNSimulator.MAX_DFA_EDGE = 127; // forces unicode to stay in ATN + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/dfa/PredPrediction.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +/** + * Map a predicate to a predicted alternative. + */ +class PredPrediction { + constructor(pred, alt) { + this.alt = alt; + this.pred = pred; + } + + toString() { + return "(" + this.pred + ", " + this.alt + ")"; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/misc/AltDict.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +class AltDict { + + constructor() { + this.data = {}; + } + + get(key) { + return this.data["k-" + key] || null; + } + + set(key, value) { + this.data["k-" + key] = value; + } + + values() { + return Object.keys(this.data).filter(key => key.startsWith("k-")).map(key => this.data[key], this); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/PredictionMode.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + + + + + +/** + * This enumeration defines the prediction modes available in ANTLR 4 along with + * utility methods for analyzing configuration sets for conflicts and/or + * ambiguities. + */ +const PredictionMode = { + /** + * The SLL(*) prediction mode. This prediction mode ignores the current + * parser context when making predictions. This is the fastest prediction + * mode, and provides correct results for many grammars. This prediction + * mode is more powerful than the prediction mode provided by ANTLR 3, but + * may result in syntax errors for grammar and input combinations which are + * not SLL. + * + *+ * When using this prediction mode, the parser will either return a correct + * parse tree (i.e. the same parse tree that would be returned with the + * {@link //LL} prediction mode), or it will report a syntax error. If a + * syntax error is encountered when using the {@link //SLL} prediction mode, + * it may be due to either an actual syntax error in the input or indicate + * that the particular combination of grammar and input requires the more + * powerful {@link //LL} prediction abilities to complete successfully.
+ * + *+ * This prediction mode does not provide any guarantees for prediction + * behavior for syntactically-incorrect inputs.
+ */ + SLL: 0, + + /** + * The LL(*) prediction mode. This prediction mode allows the current parser + * context to be used for resolving SLL conflicts that occur during + * prediction. This is the fastest prediction mode that guarantees correct + * parse results for all combinations of grammars with syntactically correct + * inputs. + * + *+ * When using this prediction mode, the parser will make correct decisions + * for all syntactically-correct grammar and input combinations. However, in + * cases where the grammar is truly ambiguous this prediction mode might not + * report a precise answer for exactly which alternatives are + * ambiguous.
+ * + *+ * This prediction mode does not provide any guarantees for prediction + * behavior for syntactically-incorrect inputs.
+ */ + LL: 1, + + /** + * + * The LL(*) prediction mode with exact ambiguity detection. In addition to + * the correctness guarantees provided by the {@link //LL} prediction mode, + * this prediction mode instructs the prediction algorithm to determine the + * complete and exact set of ambiguous alternatives for every ambiguous + * decision encountered while parsing. + * + *+ * This prediction mode may be used for diagnosing ambiguities during + * grammar development. Due to the performance overhead of calculating sets + * of ambiguous alternatives, this prediction mode should be avoided when + * the exact results are not necessary.
+ * + *+ * This prediction mode does not provide any guarantees for prediction + * behavior for syntactically-incorrect inputs.
+ */ + LL_EXACT_AMBIG_DETECTION: 2, + + /** + * + * Computes the SLL prediction termination condition. + * + *+ * This method computes the SLL prediction termination condition for both of + * the following cases.
+ * + *COMBINED SLL+LL PARSING
+ * + *When LL-fallback is enabled upon SLL conflict, correct predictions are + * ensured regardless of how the termination condition is computed by this + * method. Due to the substantially higher cost of LL prediction, the + * prediction should only fall back to LL when the additional lookahead + * cannot lead to a unique SLL prediction.
+ * + *Assuming combined SLL+LL parsing, an SLL configuration set with only + * conflicting subsets should fall back to full LL, even if the + * configuration sets don't resolve to the same alternative (e.g. + * {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + * configuration, SLL could continue with the hopes that more lookahead will + * resolve via one of those non-conflicting configurations.
+ * + *Here's the prediction termination rule them: SLL (for SLL+LL parsing) + * stops when it sees only conflicting configuration subsets. In contrast, + * full LL keeps going when there is uncertainty.
+ * + *HEURISTIC
+ * + *As a heuristic, we stop prediction when we see any conflicting subset + * unless we see a state that only has one alternative associated with it. + * The single-alt-state thing lets prediction continue upon rules like + * (otherwise, it would admit defeat too soon):
+ * + *{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}
+ * + *When the ATN simulation reaches the state before {@code ';'}, it has a + * DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally + * {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + * processing this node because alternative to has another way to continue, + * via {@code [6|2|[]]}.
+ * + *It also let's us continue for this rule:
+ * + *{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}
+ * + *After matching input A, we reach the stop state for rule A, state 1. + * State 8 is the state right before B. Clearly alternatives 1 and 2 + * conflict and no amount of further lookahead will separate the two. + * However, alternative 3 will be able to continue and so we do not stop + * working on this state. In the previous example, we're concerned with + * states associated with the conflicting alternatives. Here alt 3 is not + * associated with the conflicting configs, but since we can continue + * looking for input reasonably, don't declare the state done.
+ * + *PURE SLL PARSING
+ * + *To handle pure SLL parsing, all we have to do is make sure that we + * combine stack contexts for configurations that differ only by semantic + * predicate. From there, we can do the usual SLL termination heuristic.
+ * + *PREDICATES IN SLL+LL PARSING
+ * + *SLL decisions don't evaluate predicates until after they reach DFA stop + * states because they need to create the DFA cache that works in all + * semantic situations. In contrast, full LL evaluates predicates collected + * during start state computation so it can ignore predicates thereafter. + * This means that SLL termination detection can totally ignore semantic + * predicates.
+ * + *Implementation-wise, {@link ATNConfigSet} combines stack contexts but not + * semantic predicate contexts so we might see two configurations like the + * following.
+ * + *{@code (s, 1, x, {}), (s, 1, x', {p})}
+ * + *Before testing these configurations against others, we have to merge + * {@code x} and {@code x'} (without modifying the existing configurations). + * For example, we test {@code (x+x')==x''} when looking for conflicts in + * the following configurations.
+ * + *{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+ * + *If the configuration set has predicates (as indicated by + * {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of + * the configurations to strip out all of the predicates so that a standard + * {@link ATNConfigSet} will merge everything ignoring predicates.
+ */ + hasSLLConflictTerminatingPrediction: function( mode, configs) { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to match additional input so we terminate prediction. + // + if (PredictionMode.allConfigsInRuleStopStates(configs)) { + return true; + } + // pure SLL mode parsing + if (mode === PredictionMode.SLL) { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL; costs more time + // since we'll often fail over anyway. + if (configs.hasSemanticContext) { + // dup configs, tossing out semantic predicates + const dup = new ATNConfigSet(); + for(let i=0;iThe basic idea is to split the set of configurations {@code C}, into + * conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + * non-conflicting configurations. Two configurations conflict if they have + * identical {@link ATNConfig//state} and {@link ATNConfig//context} values + * but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} + * and {@code (s, j, ctx, _)} for {@code i!=j}.
+ * + *Reduce these configuration subsets to the set of possible alternatives. + * You can compute the alternative subsets in one pass as follows:
+ * + *{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in + * {@code C} holding {@code s} and {@code ctx} fixed.
+ * + *Or in pseudo-code, for each configuration {@code c} in {@code C}:
+ * + *+ * map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not + * alt and not pred + *+ * + *
The values in {@code map} are the set of {@code A_s,ctx} sets.
+ * + *If {@code |A_s,ctx|=1} then there is no conflict associated with + * {@code s} and {@code ctx}.
+ * + *Reduce the subsets to singletons by choosing a minimum of each subset. If + * the union of these alternative subsets is a singleton, then no amount of + * more lookahead will help us. We will always pick that alternative. If, + * however, there is more than one alternative, then we are uncertain which + * alternative to predict and must continue looking for resolution. We may + * or may not discover an ambiguity in the future, even if there are no + * conflicting subsets this round.
+ * + *The biggest sin is to terminate early because it means we've made a + * decision but were uncertain as to the eventual outcome. We haven't used + * enough lookahead. On the other hand, announcing a conflict too late is no + * big deal; you will still have the conflict. It's just inefficient. It + * might even look until the end of file.
+ * + *No special consideration for semantic predicates is required because + * predicates are evaluated on-the-fly for full LL prediction, ensuring that + * no configuration contains a semantic context during the termination + * check.
+ * + *CONFLICTING CONFIGS
+ * + *Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict + * when {@code i!=j} but {@code x=x'}. Because we merge all + * {@code (s, i, _)} configurations together, that means that there are at + * most {@code n} configurations associated with state {@code s} for + * {@code n} possible alternatives in the decision. The merged stacks + * complicate the comparison of configuration contexts {@code x} and + * {@code x'}. Sam checks to see if one is a subset of the other by calling + * merge and checking to see if the merged result is either {@code x} or + * {@code x'}. If the {@code x} associated with lowest alternative {@code i} + * is the superset, then {@code i} is the only possible prediction since the + * others resolve to {@code min(i)} as well. However, if {@code x} is + * associated with {@code j>i} then at least one stack configuration for + * {@code j} is not in conflict with alternative {@code i}. The algorithm + * should keep going, looking for more lookahead due to the uncertainty.
+ * + *For simplicity, I'm doing a equality check between {@code x} and + * {@code x'} that lets the algorithm continue to consume lookahead longer + * than necessary. The reason I like the equality is of course the + * simplicity but also because that is the test you need to detect the + * alternatives that are actually in conflict.
+ * + *CONTINUE/STOP RULE
+ * + *Continue if union of resolved alternative sets from non-conflicting and + * conflicting alternative subsets has more than one alternative. We are + * uncertain about which alternative to predict.
+ * + *The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + * alternatives are still in the running for the amount of input we've + * consumed at this point. The conflicting sets let us to strip away + * configurations that won't lead to more states because we resolve + * conflicts to the configuration with a minimum alternate for the + * conflicting set.
+ * + *CASES
+ * + *EXACT AMBIGUITY DETECTION
+ * + *If all states report the same conflicting set of alternatives, then we + * know we have the exact ambiguity set.
+ * + *|A_i|>1
and
+ * A_i = A_j
for all i, j.
In other words, we continue examining lookahead until all {@code A_i} + * have more than one alternative and all {@code A_i} are the same. If + * {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate + * because the resolved set is {@code {1}}. To determine what the real + * ambiguity is, we have to know whether the ambiguity is between one and + * two or one and three so we keep going. We can only stop prediction when + * we need exact ambiguity detection when the sets look like + * {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+ */ + resolvesToJustOneViableAlt: function(altsets) { + return PredictionMode.getSingleViableAlt(altsets); + }, + + /** + * Determines if every alternative subset in {@code altsets} contains more + * than one alternative. + * + * @param altsets a collection of alternative subsets + * @return {@code true} if every {@link BitSet} in {@code altsets} has + * {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} + */ + allSubsetsConflict: function(altsets) { + return ! PredictionMode.hasNonConflictingAltSet(altsets); + }, + /** + * Determines if any single alternative subset in {@code altsets} contains + * exactly one alternative. + * + * @param altsets a collection of alternative subsets + * @return {@code true} if {@code altsets} contains a {@link BitSet} with + * {@link BitSet//cardinality cardinality} 1, otherwise {@code false} + */ + hasNonConflictingAltSet: function(altsets) { + for(let i=0;i+ * map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} + *+ */ + getStateToAltMap: function(configs) { + const m = new AltDict(); + configs.items.map(function(c) { + let alts = m.get(c.state); + if (alts === null) { + alts = new BitSet(); + m.set(c.state, alts); + } + alts.add(c.alt); + }); + return m; + }, + + hasStateAssociatedWithOneAlt: function(configs) { + const values = PredictionMode.getStateToAltMap(configs).values(); + for(let i=0;i
+ * All of that is done without using the outer context because we want to create + * a DFA that is not dependent upon the rule invocation stack when we do a + * prediction. One DFA works in all contexts. We avoid using context not + * necessarily because it's slower, although it can be, but because of the DFA + * caching problem. The closure routine only considers the rule invocation stack + * created during prediction beginning in the decision rule. For example, if + * prediction occurs without invoking another rule's ATN, there are no context + * stacks in the configurations. When lack of context leads to a conflict, we + * don't know if it's an ambiguity or a weakness in the strong LL(*) parsing + * strategy (versus full LL(*)).
+ * + *+ * When SLL yields a configuration set with conflict, we rewind the input and + * retry the ATN simulation, this time using full outer context without adding + * to the DFA. Configuration context stacks will be the full invocation stacks + * from the start rule. If we get a conflict using full context, then we can + * definitively say we have a true ambiguity for that input sequence. If we + * don't get a conflict, it implies that the decision is sensitive to the outer + * context. (It is not context-sensitive in the sense of context-sensitive + * grammars.)
+ * + *+ * The next time we reach this DFA state with an SLL conflict, through DFA + * simulation, we will again retry the ATN simulation using full context mode. + * This is slow because we can't save the results and have to "interpret" the + * ATN each time we get that input.
+ * + *+ * CACHING FULL CONTEXT PREDICTIONS
+ * + *+ * We could cache results from full context to predicted alternative easily and + * that saves a lot of time but doesn't work in presence of predicates. The set + * of visible predicates from the ATN start state changes depending on the + * context, because closure can fall off the end of a rule. I tried to cache + * tuples (stack context, semantic context, predicted alt) but it was slower + * than interpreting and much more complicated. Also required a huge amount of + * memory. The goal is not to create the world's fastest parser anyway. I'd like + * to keep this algorithm simple. By launching multiple threads, we can improve + * the speed of parsing across a large number of files.
+ * + *+ * There is no strict ordering between the amount of input used by SLL vs LL, + * which makes it really hard to build a cache for full context. Let's say that + * we have input A B C that leads to an SLL conflict with full context X. That + * implies that using X we might only use A B but we could also use A B C D to + * resolve conflict. Input A B C D could predict alternative 1 in one position + * in the input and A B C E could predict alternative 2 in another position in + * input. The conflicting SLL configurations could still be non-unique in the + * full context prediction, which would lead us to requiring more input than the + * original A B C. To make a prediction cache work, we have to track the exact + * input used during the previous prediction. That amounts to a cache that maps + * X to a specific DFA for that context.
+ * + *+ * Something should be done for left-recursive expression predictions. They are + * likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry + * with full LL thing Sam does.
+ * + *+ * AVOIDING FULL CONTEXT PREDICTION
+ * + *+ * We avoid doing full context retry when the outer context is empty, we did not + * dip into the outer context by falling off the end of the decision state rule, + * or when we force SLL mode.
+ * + *+ * As an example of the not dip into outer context case, consider as super + * constructor calls versus function calls. One grammar might look like + * this:
+ * + *+ * ctorBody + * : '{' superCall? stat* '}' + * ; + *+ * + *
+ * Or, you might see something like
+ * + *+ * stat + * : superCall ';' + * | expression ';' + * | ... + * ; + *+ * + *
+ * In both cases I believe that no closure operations will dip into the outer + * context. In the first case ctorBody in the worst case will stop at the '}'. + * In the 2nd case it should stop at the ';'. Both cases should stay within the + * entry rule and not dip into the outer context.
+ * + *+ * PREDICATES
+ * + *+ * Predicates are always evaluated if present in either SLL or LL both. SLL and + * LL simulation deals with predicates differently. SLL collects predicates as + * it performs closure operations like ANTLR v3 did. It delays predicate + * evaluation until it reaches and accept state. This allows us to cache the SLL + * ATN simulation whereas, if we had evaluated predicates on-the-fly during + * closure, the DFA state configuration sets would be different and we couldn't + * build up a suitable DFA.
+ * + *+ * When building a DFA accept state during ATN simulation, we evaluate any + * predicates and return the sole semantically valid alternative. If there is + * more than 1 alternative, we report an ambiguity. If there are 0 alternatives, + * we throw an exception. Alternatives without predicates act like they have + * true predicates. The simple way to think about it is to strip away all + * alternatives with false predicates and choose the minimum alternative that + * remains.
+ * + *+ * When we start in the DFA and reach an accept state that's predicated, we test + * those and return the minimum semantically viable alternative. If no + * alternatives are viable, we throw an exception.
+ * + *+ * During full LL ATN simulation, closure always evaluates predicates and + * on-the-fly. This is crucial to reducing the configuration set size during + * closure. It hits a landmine when parsing with the Java grammar, for example, + * without this on-the-fly evaluation.
+ * + *+ * SHARING DFA
+ * + *+ * All instances of the same parser share the same decision DFAs through a + * static field. Each instance gets its own ATN simulator but they share the + * same {@link //decisionToDFA} field. They also share a + * {@link PredictionContextCache} object that makes sure that all + * {@link PredictionContext} objects are shared among the DFA states. This makes + * a big size difference.
+ * + *+ * THREAD SAFETY
+ * + *+ * The {@link ParserATNSimulator} locks on the {@link //decisionToDFA} field when + * it adds a new DFA object to that array. {@link //addDFAEdge} + * locks on the DFA for the current decision when setting the + * {@link DFAState//edges} field. {@link //addDFAState} locks on + * the DFA for the current decision when looking up a DFA state to see if it + * already exists. We must make sure that all requests to add DFA states that + * are equivalent result in the same shared DFA object. This is because lots of + * threads will be trying to update the DFA at once. The + * {@link //addDFAState} method also locks inside the DFA lock + * but this time on the shared context cache when it rebuilds the + * configurations' {@link PredictionContext} objects using cached + * subgraphs/nodes. No other locking occurs, even during DFA simulation. This is + * safe as long as we can guarantee that all threads referencing + * {@code s.edge[t]} get the same physical target {@link DFAState}, or + * {@code null}. Once into the DFA, the DFA simulation does not reference the + * {@link DFA//states} map. It follows the {@link DFAState//edges} field to new + * targets. The DFA simulator will either find {@link DFAState//edges} to be + * {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or + * {@code dfa.edges[t]} to be non-null. The + * {@link //addDFAEdge} method could be racing to set the field + * but in either case the DFA simulator works; if {@code null}, and requests ATN + * simulation. It could also race trying to get {@code dfa.edges[t]}, but either + * way it will work because it's not doing a test and set operation.
+ * + *+ * Starting with SLL then failing to combined SLL/LL (Two-Stage + * Parsing)
+ * + *+ * Sam pointed out that if SLL does not give a syntax error, then there is no + * point in doing full LL, which is slower. We only have to try LL if we get a + * syntax error. For maximum speed, Sam starts the parser set to pure SLL + * mode with the {@link BailErrorStrategy}:
+ * + *+ * parser.{@link Parser//getInterpreter() getInterpreter()}.{@link //setPredictionMode setPredictionMode}{@code (}{@link PredictionMode//SLL}{@code )}; + * parser.{@link Parser//setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}()); + *+ * + *
+ * If it does not get a syntax error, then we're done. If it does get a syntax + * error, we need to retry with the combined SLL/LL strategy.
+ * + *+ * The reason this works is as follows. If there are no SLL conflicts, then the + * grammar is SLL (at least for that input set). If there is an SLL conflict, + * the full LL analysis must yield a set of viable alternatives which is a + * subset of the alternatives reported by SLL. If the LL set is a singleton, + * then the grammar is LL but not SLL. If the LL set is the same size as the SLL + * set, the decision is SLL. If the LL set has size > 1, then that decision + * is truly ambiguous on the current input. If the LL set is smaller, then the + * SLL conflict resolution might choose an alternative that the full LL would + * rule out as a possibility based upon better context information. If that's + * the case, then the SLL parse will definitely get an error because the full LL + * analysis says it's not viable. If SLL conflict resolution chooses an + * alternative within the LL set, them both SLL and LL would choose the same + * alternative because they both choose the minimum of multiple conflicting + * alternatives.
+ * + *+ * Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and + * a smaller LL set called s. If s is {@code {2, 3}}, then SLL + * parsing will get an error because SLL will pursue alternative 1. If + * s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will + * choose the same alternative because alternative one is the minimum of either + * set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax + * error. If s is {@code {1}} then SLL will succeed.
+ * + *+ * Of course, if the input is invalid, then we will get an error for sure in + * both SLL and LL parsing. Erroneous input will therefore require 2 passes over + * the input.
+ */ +class ParserATNSimulator extends ATNSimulator { + constructor(parser, atn, decisionToDFA, sharedContextCache) { + super(atn, sharedContextCache); + this.parser = parser; + this.decisionToDFA = decisionToDFA; + // SLL, LL, or LL + exact ambig detection?// + this.predictionMode = atn_PredictionMode.LL; + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + this._input = null; + this._startIndex = 0; + this._outerContext = null; + this._dfa = null; + /** + * Each prediction operation uses a cache for merge of prediction contexts. + * Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + * isn't synchronized but we're ok since two threads shouldn't reuse same + * parser/atnsim object because it can only handle one input at a time. + * This maps graphs a and b to merged result c. (a,b)→c. We can avoid + * the merge if we ever see a and b again. Note that (b,a)→c should + * also be examined during cache lookup. + */ + this.mergeCache = null; + this.debug = false; + this.debug_closure = false; + this.debug_add = false; + this.debug_list_atn_decisions = false; + this.dfa_debug = false; + this.retry_debug = false; + } + + reset() {} + + adaptivePredict(input, decision, outerContext) { + if (this.debug || this.debug_list_atn_decisions) { + console.log("adaptivePredict decision " + decision + + " exec LA(1)==" + this.getLookaheadName(input) + + " line " + input.LT(1).line + ":" + + input.LT(1).column); + } + this._input = input; + this._startIndex = input.index; + this._outerContext = outerContext; + + const dfa = this.decisionToDFA[decision]; + this._dfa = dfa; + const m = input.mark(); + const index = input.index; + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + try { + let s0; + if (dfa.precedenceDfa) { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()); + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0; + } + if (s0===null) { + if (outerContext===null) { + outerContext = RuleContext.EMPTY; + } + if (this.debug || this.debug_list_atn_decisions) { + console.log("predictATN decision " + dfa.decision + + " exec LA(1)==" + this.getLookaheadName(input) + + ", outerContext=" + outerContext.toString(this.parser.ruleNames)); + } + + const fullCtx = false; + let s0_closure = this.computeStartState(dfa.atnStartState, RuleContext.EMPTY, fullCtx); + + if( dfa.precedenceDfa) { + // If this is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0_closure; // not used for prediction but useful to know start configs anyway + s0_closure = this.applyPrecedenceFilter(s0_closure); + s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); + dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0); + } else { + s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); + dfa.s0 = s0; + } + } + const alt = this.execATN(dfa, s0, input, index, outerContext); + if (this.debug) { + console.log("DFA after predictATN: " + dfa.toString(this.parser.literalNames, this.parser.symbolicNames)); + } + return alt; + } finally { + this._dfa = null; + this.mergeCache = null; // wack cache after each prediction + input.seek(index); + input.release(m); + } + } + + /** + * Performs ATN simulation to compute a predicted alternative based + * upon the remaining input, but also updates the DFA cache to avoid + * having to traverse the ATN again for the same input sequence. + * + * There are some key conditions we're looking for after computing a new + * set of ATN configs (proposed DFA state): + * if the set is empty, there is no viable alternative for current symbol + * does the state uniquely predict an alternative? + * does the state have a conflict that would prevent us from + * putting it on the work list? + * + * We also have some key operations to do: + * add an edge from previous DFA state to potentially new DFA state, D, + * upon current symbol but only if adding to work list, which means in all + * cases except no viable alternative (and possibly non-greedy decisions?) + * collecting predicates and adding semantic context to DFA accept states + * adding rule context to context-sensitive DFA accept states + * consuming an input symbol + * reporting a conflict + * reporting an ambiguity + * reporting a context sensitivity + * reporting insufficient predicates + * + * cover these cases: + * dead end + * single alt + * single alt + preds + * conflict + * conflict + preds + * + */ + execATN(dfa, s0, input, startIndex, outerContext ) { + if (this.debug || this.debug_list_atn_decisions) { + console.log("execATN decision " + dfa.decision + + " exec LA(1)==" + this.getLookaheadName(input) + + " line " + input.LT(1).line + ":" + input.LT(1).column); + } + let alt; + let previousD = s0; + + if (this.debug) { + console.log("s0 = " + s0); + } + let t = input.LA(1); + for(;;) { // while more work + let D = this.getExistingTargetState(previousD, t); + if(D===null) { + D = this.computeTargetState(dfa, previousD, t); + } + if(D===ATNSimulator.ERROR) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + const e = this.noViableAlt(input, outerContext, previousD.configs, startIndex); + input.seek(startIndex); + alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext); + if(alt!==ATN.INVALID_ALT_NUMBER) { + return alt; + } else { + throw e; + } + } + if(D.requiresFullContext && this.predictionMode !== atn_PredictionMode.SLL) { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + let conflictingAlts = null; + if (D.predicates!==null) { + if (this.debug) { + console.log("DFA state has preds in DFA sim LL failover"); + } + const conflictIndex = input.index; + if(conflictIndex !== startIndex) { + input.seek(startIndex); + } + conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true); + if (conflictingAlts.length===1) { + if(this.debug) { + console.log("Full LL avoided"); + } + return conflictingAlts.minValue(); + } + if (conflictIndex !== startIndex) { + // restore the index so reporting the fallback to full + // context occurs with the index at the correct spot + input.seek(conflictIndex); + } + } + if (this.dfa_debug) { + console.log("ctx sensitive state " + outerContext +" in " + D); + } + const fullCtx = true; + const s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx); + this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index); + alt = this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext); + return alt; + } + if (D.isAcceptState) { + if (D.predicates===null) { + return D.prediction; + } + const stopIndex = input.index; + input.seek(startIndex); + const alts = this.evalSemanticContext(D.predicates, outerContext, true); + if (alts.length===0) { + throw this.noViableAlt(input, outerContext, D.configs, startIndex); + } else if (alts.length===1) { + return alts.minValue(); + } else { + // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. + this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs); + return alts.minValue(); + } + } + previousD = D; + + if (t !== Token.EOF) { + input.consume(); + t = input.LA(1); + } + } + } + + /** + * Get an existing target state for an edge in the DFA. If the target state + * for the edge has not yet been computed or is otherwise not available, + * this method returns {@code null}. + * + * @param previousD The current DFA state + * @param t The next input symbol + * @return The existing target DFA state for the given input symbol + * {@code t}, or {@code null} if the target state for this edge is not + * already cached + */ + getExistingTargetState(previousD, t) { + const edges = previousD.edges; + if (edges===null) { + return null; + } else { + return edges[t + 1] || null; + } + } + + /** + * Compute a target state for an edge in the DFA, and attempt to add the + * computed state and corresponding edge to the DFA. + * + * @param dfa The DFA + * @param previousD The current DFA state + * @param t The next input symbol + * + * @return The computed target DFA state for the given input symbol + * {@code t}. If {@code t} does not lead to a valid DFA state, this method + * returns {@link //ERROR + */ + computeTargetState(dfa, previousD, t) { + const reach = this.computeReachSet(previousD.configs, t, false); + if(reach===null) { + this.addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); + return ATNSimulator.ERROR; + } + // create new target state; we'll add to DFA after it's complete + let D = new DFAState(null, reach); + + const predictedAlt = this.getUniqueAlt(reach); + + if (this.debug) { + const altSubSets = atn_PredictionMode.getConflictingAltSubsets(reach); + console.log("SLL altSubSets=" + arrayToString(altSubSets) + + /*", previous=" + previousD.configs + */ + ", configs=" + reach + + ", predict=" + predictedAlt + + ", allSubsetsConflict=" + + atn_PredictionMode.allSubsetsConflict(altSubSets) + ", conflictingAlts=" + + this.getConflictingAlts(reach)); + } + if (predictedAlt!==ATN.INVALID_ALT_NUMBER) { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true; + D.configs.uniqueAlt = predictedAlt; + D.prediction = predictedAlt; + } else if (atn_PredictionMode.hasSLLConflictTerminatingPrediction(this.predictionMode, reach)) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = this.getConflictingAlts(reach); + D.requiresFullContext = true; + // in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = true; + D.prediction = D.configs.conflictingAlts.minValue(); + } + if (D.isAcceptState && D.configs.hasSemanticContext) { + this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)); + if( D.predicates!==null) { + D.prediction = ATN.INVALID_ALT_NUMBER; + } + } + // all adds to dfa are done after we've created full D state + D = this.addDFAEdge(dfa, previousD, t, D); + return D; + } + + predicateDFAState(dfaState, decisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + const nalts = decisionState.transitions.length; + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + const altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs); + const altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts); + if (altToPred!==null) { + dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred); + dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.prediction = altsToCollectPredsFrom.minValue(); + } + } + +// comes back with reach.uniqueAlt set to a valid alt + execATNWithFullContext(dfa, D, // how far we got before failing over + s0, + input, + startIndex, + outerContext) { + if (this.debug || this.debug_list_atn_decisions) { + console.log("execATNWithFullContext "+s0); + } + const fullCtx = true; + let foundExactAmbig = false; + let reach; + let previous = s0; + input.seek(startIndex); + let t = input.LA(1); + let predictedAlt = -1; + for (;;) { // while more work + reach = this.computeReachSet(previous, t, fullCtx); + if (reach===null) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + const e = this.noViableAlt(input, outerContext, previous, startIndex); + input.seek(startIndex); + const alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext); + if(alt!==ATN.INVALID_ALT_NUMBER) { + return alt; + } else { + throw e; + } + } + const altSubSets = atn_PredictionMode.getConflictingAltSubsets(reach); + if(this.debug) { + console.log("LL altSubSets=" + altSubSets + ", predict=" + + atn_PredictionMode.getUniqueAlt(altSubSets) + ", resolvesToJustOneViableAlt=" + + atn_PredictionMode.resolvesToJustOneViableAlt(altSubSets)); + } + reach.uniqueAlt = this.getUniqueAlt(reach); + // unique prediction? + if(reach.uniqueAlt!==ATN.INVALID_ALT_NUMBER) { + predictedAlt = reach.uniqueAlt; + break; + } else if (this.predictionMode !== atn_PredictionMode.LL_EXACT_AMBIG_DETECTION) { + predictedAlt = atn_PredictionMode.resolvesToJustOneViableAlt(altSubSets); + if(predictedAlt !== ATN.INVALID_ALT_NUMBER) { + break; + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if (atn_PredictionMode.allSubsetsConflict(altSubSets) && atn_PredictionMode.allSubsetsEqual(altSubSets)) { + foundExactAmbig = true; + predictedAlt = atn_PredictionMode.getSingleViableAlt(altSubSets); + break; + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach; + if( t !== Token.EOF) { + input.consume(); + t = input.LA(1); + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if (reach.uniqueAlt !== ATN.INVALID_ALT_NUMBER ) { + this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index); + return predictedAlt; + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: + + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In this case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve this without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + this.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, null, reach); + + return predictedAlt; + } + + computeReachSet(closure, t, fullCtx) { + if (this.debug) { + console.log("in computeReachSet, starting closure: " + closure); + } + if( this.mergeCache===null) { + this.mergeCache = new DoubleDict(); + } + const intermediate = new ATNConfigSet(fullCtx); + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative matching the longest overall sequence is + // chosen when multiple such configurations can match the input. + + let skippedStopStates = null; + + // First figure out where we can reach on input t + for (let i=0; i+ * The prediction context must be considered by this filter to address + * situations like the following. + *
+ *
+ *
+ * grammar TA;
+ * prog: statement* EOF;
+ * statement: letterA | statement letterA 'b' ;
+ * letterA: 'a';
+ *
+ *
+ * + * If the above grammar, the ATN state immediately before the token + * reference {@code 'a'} in {@code letterA} is reachable from the left edge + * of both the primary and closure blocks of the left-recursive rule + * {@code statement}. The prediction context associated with each of these + * configurations distinguishes between them, and prevents the alternative + * which stepped out to {@code prog} (and then back in to {@code statement} + * from being eliminated by the filter. + *
+ * + * @param configs The configuration set computed by + * {@link //computeStartState} as the start state for the DFA. + * @return The transformed configuration set representing the start state + * for a precedence DFA at a particular precedence level (determined by + * calling {@link Parser//getPrecedence}) + */ + applyPrecedenceFilter(configs) { + let config; + const statesFromAlt1 = []; + const configSet = new ATNConfigSet(configs.fullCtx); + for(let i=0; i+ * In some scenarios, the algorithm described above could predict an + * alternative which will result in a {@link FailedPredicateException} in + * the parser. Specifically, this could occur if the only configuration + * capable of successfully parsing to the end of the decision rule is + * blocked by a semantic predicate. By choosing this alternative within + * {@link //adaptivePredict} instead of throwing a + * {@link NoViableAltException}, the resulting + * {@link FailedPredicateException} in the parser will identify the specific + * predicate which is preventing the parser from successfully parsing the + * decision rule, which helps developers identify and correct logic errors + * in semantic predicates. + *
+ * + * @param configs The ATN configurations which were valid immediately before + * the {@link //ERROR} state was reached + * @param outerContext The is the \gamma_0 initial parser context from the paper + * or the parser stack at the instant before prediction commences. + * + * @return The value to return from {@link //adaptivePredict}, or + * {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not + * identified and {@link //adaptivePredict} should report an error instead + */ + getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs, outerContext) { + const cfgs = this.splitAccordingToSemanticValidity(configs, outerContext); + const semValidConfigs = cfgs[0]; + const semInvalidConfigs = cfgs[1]; + let alt = this.getAltThatFinishedDecisionEntryRule(semValidConfigs); + if (alt!==ATN.INVALID_ALT_NUMBER) { // semantically/syntactically viable path exists + return alt; + } + // Is there a syntactically valid path with a failed pred? + if (semInvalidConfigs.items.length>0) { + alt = this.getAltThatFinishedDecisionEntryRule(semInvalidConfigs); + if (alt!==ATN.INVALID_ALT_NUMBER) { // syntactically viable path exists + return alt; + } + } + return ATN.INVALID_ALT_NUMBER; + } + + getAltThatFinishedDecisionEntryRule(configs) { + const alts = []; + for(let i=0;iIf {@code D} is {@link //ERROR}, this method returns {@link //ERROR} and + * does not change the DFA.
+ * + * @param dfa The dfa + * @param D The DFA state to add + * @return The state stored in the DFA. This will be either the existing + * state if {@code D} is already in the DFA, or {@code D} itself if the + * state was not already present + */ + addDFAState(dfa, D) { + if (D === ATNSimulator.ERROR) { + return D; + } + const existing = dfa.states.get(D); + if(existing!==null) { + return existing; + } + D.stateNumber = dfa.states.length; + if (! D.configs.readOnly) { + D.configs.optimizeConfigs(this); + D.configs.setReadonly(true); + } + dfa.states.add(D); + if (this.debug) { + console.log("adding new DFA state: " + D); + } + return D; + } + + reportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex) { + if (this.debug || this.retry_debug) { + const interval = new Interval(startIndex, stopIndex + 1); + console.log("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + + ", input=" + this.parser.getTokenStream().getText(interval)); + } + if (this.parser!==null) { + this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs); + } + } + + reportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex) { + if (this.debug || this.retry_debug) { + const interval = new Interval(startIndex, stopIndex + 1); + console.log("reportContextSensitivity decision=" + dfa.decision + ":" + configs + + ", input=" + this.parser.getTokenStream().getText(interval)); + } + if (this.parser!==null) { + this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs); + } + } + + // If context sensitive parsing, we know it's ambiguity not conflict// + reportAmbiguity(dfa, D, startIndex, stopIndex, + exact, ambigAlts, configs ) { + if (this.debug || this.retry_debug) { + const interval = new Interval(startIndex, stopIndex + 1); + console.log("reportAmbiguity " + ambigAlts + ":" + configs + + ", input=" + this.parser.getTokenStream().getText(interval)); + } + if (this.parser!==null) { + this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs); + } + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/index.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + +/* harmony default export */ const atn = ({ ATN: ATN, ATNDeserializer: ATNDeserializer, LexerATNSimulator: LexerATNSimulator, ParserATNSimulator: ParserATNSimulator, PredictionMode: atn_PredictionMode }); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/dfa/DFASerializer.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +/** + * A DFA walker that knows how to dump them to serialized strings. + */ +class DFASerializer { + constructor(dfa, literalNames, symbolicNames) { + this.dfa = dfa; + this.literalNames = literalNames || []; + this.symbolicNames = symbolicNames || []; + } + + toString() { + if(this.dfa.s0 === null) { + return null; + } + let buf = ""; + const states = this.dfa.sortedStates(); + for(let i=0; iThe default implementation simply calls {@link //endErrorCondition} to + * ensure that the handler is not in error recovery mode.
+ */ + reset(recognizer) { + this.endErrorCondition(recognizer); + } + + /** + * This method is called to enter error recovery mode when a recognition + * exception is reported. + * + * @param recognizer the parser instance + */ + beginErrorCondition(recognizer) { + this.errorRecoveryMode = true; + } + + inErrorRecoveryMode(recognizer) { + return this.errorRecoveryMode; + } + + /** + * This method is called to leave error recovery mode after recovering from + * a recognition exception. + * @param recognizer + */ + endErrorCondition(recognizer) { + this.errorRecoveryMode = false; + this.lastErrorStates = null; + this.lastErrorIndex = -1; + } + + /** + * {@inheritDoc} + *The default implementation simply calls {@link //endErrorCondition}.
+ */ + reportMatch(recognizer) { + this.endErrorCondition(recognizer); + } + + /** + * {@inheritDoc} + * + *The default implementation returns immediately if the handler is already + * in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} + * and dispatches the reporting task based on the runtime type of {@code e} + * according to the following table.
+ * + *The default implementation resynchronizes the parser by consuming tokens + * until we find one in the resynchronization set--loosely the set of tokens + * that can follow the current rule.
+ * + */ + recover(recognizer, e) { + if (this.lastErrorIndex===recognizer.getInputStream().index && + this.lastErrorStates !== null && this.lastErrorStates.indexOf(recognizer.state)>=0) { + // uh oh, another error at same token index and previously-visited + // state in ATN; must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop; this is a failsafe. + recognizer.consume(); + } + this.lastErrorIndex = recognizer._input.index; + if (this.lastErrorStates === null) { + this.lastErrorStates = []; + } + this.lastErrorStates.push(recognizer.state); + const followSet = this.getErrorRecoverySet(recognizer) + this.consumeUntil(recognizer, followSet); + } + + /** + * The default implementation of {@link ANTLRErrorStrategy//sync} makes sure + * that the current lookahead symbol is consistent with what were expecting + * at this point in the ATN. You can call this anytime but ANTLR only + * generates code to check before subrules/loops and each iteration. + * + *Implements Jim Idle's magic sync mechanism in closures and optional + * subrules. E.g.,
+ * + *+ * a : sync ( stuff sync )* ; + * sync : {consume to what can follow sync} ; + *+ * + * At the start of a sub rule upon error, {@link //sync} performs single + * token deletion, if possible. If it can't do that, it bails on the current + * rule and uses the default error recovery, which consumes until the + * resynchronization set of the current rule. + * + *
If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + * with an empty alternative), then the expected set includes what follows + * the subrule.
+ * + *During loop iteration, it consumes until it sees a token that can start a + * sub rule or what follows loop. Yes, that is pretty aggressive. We opt to + * stay in the loop as long as possible.
+ * + *ORIGINS
+ * + *Previous versions of ANTLR did a poor job of their recovery within loops. + * A single mismatch token or missing token would force the parser to bail + * out of the entire rules surrounding the loop. So, for rule
+ * + *+ * classDef : 'class' ID '{' member* '}' + *+ * + * input with an extra token between members would force the parser to + * consume until it found the next class definition rather than the next + * member definition of the current class. + * + *
This functionality cost a little bit of effort because the parser has to + * compare token set at the start of the loop and at each iteration. If for + * some reason speed is suffering for you, you can turn off this + * functionality by simply overriding this method as a blank { }.
+ * + */ + sync(recognizer) { + // If already recovering, don't try to sync + if (this.inErrorRecoveryMode(recognizer)) { + return; + } + const s = recognizer._interp.atn.states[recognizer.state]; + const la = recognizer.getTokenStream().LA(1); + // try cheaper subset first; might get lucky. seems to shave a wee bit off + const nextTokens = recognizer.atn.nextTokens(s); + if(nextTokens.contains(la)) { + this.nextTokensContext = null; + this.nextTokenState = ATNState.INVALID_STATE_NUMBER; + return; + } else if (nextTokens.contains(Token.EPSILON)) { + if(this.nextTokensContext === null) { + // It's possible the next token won't match information tracked + // by sync is restricted for performance. + this.nextTokensContext = recognizer._ctx; + this.nextTokensState = recognizer._stateNumber; + } + return; + } + switch (s.stateType) { + case ATNState.BLOCK_START: + case ATNState.STAR_BLOCK_START: + case ATNState.PLUS_BLOCK_START: + case ATNState.STAR_LOOP_ENTRY: + // report error and recover if possible + if( this.singleTokenDeletion(recognizer) !== null) { + return; + } else { + throw new InputMismatchException(recognizer); + } + case ATNState.PLUS_LOOP_BACK: + case ATNState.STAR_LOOP_BACK: + { + this.reportUnwantedToken(recognizer); + const expecting = new IntervalSet(); + expecting.addSet(recognizer.getExpectedTokens()); + const whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)); + this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule); + } + break; + default: + // do nothing if we can't identify the exact kind of ATN state + } + } + + /** + * This is called by {@link //reportError} when the exception is a + * {@link NoViableAltException}. + * + * @see //reportError + * + * @param recognizer the parser instance + * @param e the recognition exception + */ + reportNoViableAlternative(recognizer, e) { + const tokens = recognizer.getTokenStream() + let input + if(tokens !== null) { + if (e.startToken.type===Token.EOF) { + input = "This method is called when {@link //singleTokenDeletion} identifies + * single-token deletion as a viable recovery strategy for a mismatched + * input error.
+ * + *The default implementation simply returns if the handler is already in + * error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to + * enter error recovery mode, followed by calling + * {@link Parser//notifyErrorListeners}.
+ * + * @param recognizer the parser instance + * + */ + reportUnwantedToken(recognizer) { + if (this.inErrorRecoveryMode(recognizer)) { + return; + } + this.beginErrorCondition(recognizer); + const t = recognizer.getCurrentToken() + const tokenName = this.getTokenErrorDisplay(t) + const expecting = this.getExpectedTokens(recognizer) + const msg = "extraneous input " + tokenName + " expecting " + + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + recognizer.notifyErrorListeners(msg, t, null); + } + + /** + * This method is called to report a syntax error which requires the + * insertion of a missing token into the input stream. At the time this + * method is called, the missing token has not yet been inserted. When this + * method returns, {@code recognizer} is in error recovery mode. + * + *This method is called when {@link //singleTokenInsertion} identifies + * single-token insertion as a viable recovery strategy for a mismatched + * input error.
+ * + *The default implementation simply returns if the handler is already in + * error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to + * enter error recovery mode, followed by calling + * {@link Parser//notifyErrorListeners}.
+ * + * @param recognizer the parser instance + */ + reportMissingToken(recognizer) { + if ( this.inErrorRecoveryMode(recognizer)) { + return; + } + this.beginErrorCondition(recognizer); + const t = recognizer.getCurrentToken() + const expecting = this.getExpectedTokens(recognizer) + const msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + + " at " + this.getTokenErrorDisplay(t) + recognizer.notifyErrorListeners(msg, t, null); + } + + /** + *The default implementation attempts to recover from the mismatched input + * by using single token insertion and deletion as described below. If the + * recovery attempt fails, this method throws an + * {@link InputMismatchException}.
+ * + *EXTRA TOKEN (single token deletion)
+ * + *{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the + * right token, however, then assume {@code LA(1)} is some extra spurious + * token and delete it. Then consume and return the next token (which was + * the {@code LA(2)} token) as the successful result of the match operation.
+ * + *This recovery strategy is implemented by {@link + * //singleTokenDeletion}.
+ * + *MISSING TOKEN (single token insertion)
+ * + *If current token (at {@code LA(1)}) is consistent with what could come + * after the expected {@code LA(1)} token, then assume the token is missing + * and use the parser's {@link TokenFactory} to create it on the fly. The + * "insertion" is performed by returning the created token as the successful + * result of the match operation.
+ * + *This recovery strategy is implemented by {@link + * //singleTokenInsertion}.
+ * + *EXAMPLE
+ * + *For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When + * the parser returns from the nested call to {@code expr}, it will have + * call chain:
+ * + *+ * stat → expr → atom + *+ * + * and it will be trying to match the {@code ')'} at this point in the + * derivation: + * + *
+ * => ID '=' '(' INT ')' ('+' atom)* ';' + * ^ + *+ * + * The attempt to match {@code ')'} will fail when it sees {@code ';'} and + * call {@link //recoverInline}. To recover, it sees that {@code LA(1)==';'} + * is in the set of tokens that can follow the {@code ')'} token reference + * in rule {@code atom}. It can assume that you forgot the {@code ')'}. + */ + recoverInline(recognizer) { + // SINGLE TOKEN DELETION + const matchedSymbol = this.singleTokenDeletion(recognizer) + if (matchedSymbol !== null) { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.consume(); + return matchedSymbol; + } + // SINGLE TOKEN INSERTION + if (this.singleTokenInsertion(recognizer)) { + return this.getMissingSymbol(recognizer); + } + // even that didn't work; must throw the exception + throw new InputMismatchException(recognizer); + } + + /** + * This method implements the single-token insertion inline error recovery + * strategy. It is called by {@link //recoverInline} if the single-token + * deletion strategy fails to recover from the mismatched input. If this + * method returns {@code true}, {@code recognizer} will be in error recovery + * mode. + * + *
This method determines whether or not single-token insertion is viable by + * checking if the {@code LA(1)} input symbol could be successfully matched + * if it were instead the {@code LA(2)} symbol. If this method returns + * {@code true}, the caller is responsible for creating and inserting a + * token with the correct type to produce this behavior.
+ * + * @param recognizer the parser instance + * @return {@code true} if single-token insertion is a viable recovery + * strategy for the current mismatched input, otherwise {@code false} + */ + singleTokenInsertion(recognizer) { + const currentSymbolType = recognizer.getTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token; error recovery + // is free to conjure up and insert the missing token + const atn = recognizer._interp.atn + const currentState = atn.states[recognizer.state] + const next = currentState.transitions[0].target + const expectingAtLL2 = atn.nextTokens(next, recognizer._ctx) + if (expectingAtLL2.contains(currentSymbolType) ){ + this.reportMissingToken(recognizer); + return true; + } else { + return false; + } + } + + /** + * This method implements the single-token deletion inline error recovery + * strategy. It is called by {@link //recoverInline} to attempt to recover + * from mismatched input. If this method returns null, the parser and error + * handler state will not have changed. If this method returns non-null, + * {@code recognizer} will not be in error recovery mode since the + * returned token was a successful match. + * + *If the single-token deletion is successful, this method calls + * {@link //reportUnwantedToken} to report the error, followed by + * {@link Parser//consume} to actually "delete" the extraneous token. Then, + * before returning {@link //reportMatch} is called to signal a successful + * match.
+ * + * @param recognizer the parser instance + * @return the successfully matched {@link Token} instance if single-token + * deletion successfully recovers from the mismatched input, otherwise + * {@code null} + */ + singleTokenDeletion(recognizer) { + const nextTokenType = recognizer.getTokenStream().LA(2) + const expecting = this.getExpectedTokens(recognizer) + if (expecting.contains(nextTokenType)) { + this.reportUnwantedToken(recognizer); + // print("recoverFromMismatchedToken deleting " \ + // + str(recognizer.getTokenStream().LT(1)) \ + // + " since " + str(recognizer.getTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.consume(); // simply delete extra token + // we want to return the token we're actually matching + const matchedSymbol = recognizer.getCurrentToken() + this.reportMatch(recognizer); // we know current token is correct + return matchedSymbol; + } else { + return null; + } + } + + /** + * Conjure up a missing token during error recovery. + * + * The recognizer attempts to recover from single missing + * symbols. But, actions might refer to that missing symbol. + * For example, x=ID {f($x);}. The action clearly assumes + * that there has been an identifier matched previously and that + * $x points at that token. If that token is missing, but + * the next token in the stream is what we want we assume that + * this token is missing and we keep going. Because we + * have to return some token to replace the missing token, + * we have to conjure one up. This method gives the user control + * over the tokens returned for missing tokens. Mostly, + * you will want to create something special for identifier + * tokens. For literals such as '{' and ',', the default + * action in the parser or tree parser works. It simply creates + * a CommonToken of the appropriate type. The text will be the token. + * If you change what tokens must be created by the lexer, + * override this method to create the appropriate tokens. + * + */ + getMissingSymbol(recognizer) { + const currentSymbol = recognizer.getCurrentToken() + const expecting = this.getExpectedTokens(recognizer) + const expectedTokenType = expecting.first() // get any element + let tokenText + if (expectedTokenType===Token.EOF) { + tokenText = "+ * This error strategy is useful in the following scenarios.
+ * + *+ * {@code myparser.setErrorHandler(new BailErrorStrategy());}
+ * + * @see Parser//setErrorHandler(ANTLRErrorStrategy) + * */ +class BailErrorStrategy extends DefaultErrorStrategy { + + constructor() { + super(); + } + + /** + * Instead of recovering from exception {@code e}, re-throw it wrapped + * in a {@link ParseCancellationException} so it is not caught by the + * rule function catches. Use {@link Exception//getCause()} to get the + * original {@link RecognitionException}. + */ + recover(recognizer, e) { + let context = recognizer._ctx + while (context !== null) { + context.exception = e; + context = context.parentCtx; + } + throw new ParseCancellationException(e); + } + + /** + * Make sure we don't attempt to recover inline; if the parser + * successfully recovers, it won't throw an exception. + */ + recoverInline(recognizer) { + this.recover(recognizer, new InputMismatchException(recognizer)); + } + +// Make sure we don't attempt to recover from problems in subrules.// + sync(recognizer) { + // pass + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/error/index.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + + + + + +/* harmony default export */ const error = ({ + RecognitionException: RecognitionException, NoViableAltException: NoViableAltException, LexerNoViableAltException: LexerNoViableAltException, InputMismatchException: InputMismatchException, FailedPredicateException: FailedPredicateException, + DiagnosticErrorListener: DiagnosticErrorListener, BailErrorStrategy: BailErrorStrategy, DefaultErrorStrategy: DefaultErrorStrategy, ErrorListener: ErrorListener +}); + +// EXTERNAL MODULE: fs (ignored) +var fs_ignored_ = __webpack_require__(262); +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/CharStreams.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +/** + * Utility functions to create InputStreams from various sources. + * + * All returned InputStreams support the full range of Unicode + * up to U+10FFFF (the default behavior of InputStream only supports + * code points up to U+FFFF). + */ +/* harmony default export */ const CharStreams = ({ + // Creates an InputStream from a string. + fromString: function(str) { + return new InputStream(str, true); + }, + + /** + * Asynchronously creates an InputStream from a blob given the + * encoding of the bytes in that blob (defaults to 'utf8' if + * encoding is null). + * + * Invokes onLoad(result) on success, onError(error) on + * failure. + */ + fromBlob: function(blob, encoding, onLoad, onError) { + const reader = new window.FileReader(); + reader.onload = function(e) { + const is = new InputStream(e.target.result, true); + onLoad(is); + }; + reader.onerror = onError; + reader.readAsText(blob, encoding); + }, + + /** + * Creates an InputStream from a Buffer given the + * encoding of the bytes in that buffer (defaults to 'utf8' if + * encoding is null). + */ + fromBuffer: function(buffer, encoding) { + return new InputStream(buffer.toString(encoding), true); + }, + + /** Asynchronously creates an InputStream from a file on disk given + * the encoding of the bytes in that file (defaults to 'utf8' if + * encoding is null). + * + * Invokes callback(error, result) on completion. + */ + fromPath: function(path, encoding, callback) { + fs_ignored_.readFile(path, encoding, function(err, data) { + let is = null; + if (data !== null) { + is = new InputStream(data, true); + } + callback(err, is); + }); + }, + + /** + * Synchronously creates an InputStream given a path to a file + * on disk and the encoding of the bytes in that file (defaults to + * 'utf8' if encoding is null). + */ + fromPathSync: function(path, encoding) { + const data = fs_ignored_.readFileSync(path, encoding); + return new InputStream(data, true); + } +}); + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/FileStream.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +/** + * This is an InputStream that is loaded from a file all at once + * when you construct the object. + */ +class FileStream extends InputStream { + constructor(fileName, decodeToUnicodeCodePoints) { + const data = fs_ignored_.readFileSync(fileName, "utf8"); + super(data, decodeToUnicodeCodePoints); + this.fileName = fileName; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/TraceListener.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + +class TraceListener extends ParseTreeListener { + constructor(parser) { + super(); + this.parser = parser; + } + + enterEveryRule(ctx) { + console.log("enter " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); + } + + visitTerminal(node) { + console.log("consume " + node.symbol + " rule " + this.parser.ruleNames[this.parser._ctx.ruleIndex]); + } + + exitEveryRule(ctx) { + console.log("exit " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/Parser.js +/* Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + + + + + + + +class Parser extends Recognizer { + /** + * this is all the parsing support code essentially; most of it is error + * recovery stuff. + */ + constructor(input) { + super(); + // The input stream. + this._input = null; + /** + * The error handling strategy for the parser. The default value is a new + * instance of {@link DefaultErrorStrategy}. + */ + this._errHandler = new DefaultErrorStrategy(); + this._precedenceStack = []; + this._precedenceStack.push(0); + /** + * The {@link ParserRuleContext} object for the currently executing rule. + * this is always non-null during the parsing process. + */ + this._ctx = null; + /** + * Specifies whether or not the parser should construct a parse tree during + * the parsing process. The default value is {@code true}. + */ + this.buildParseTrees = true; + /** + * When {@link //setTrace}{@code (true)} is called, a reference to the + * {@link TraceListener} is stored here so it can be easily removed in a + * later call to {@link //setTrace}{@code (false)}. The listener itself is + * implemented as a parser listener so this field is not directly used by + * other parser methods. + */ + this._tracer = null; + /** + * The list of {@link ParseTreeListener} listeners registered to receive + * events during the parse. + */ + this._parseListeners = null; + /** + * The number of syntax errors reported during parsing. this value is + * incremented each time {@link //notifyErrorListeners} is called. + */ + this._syntaxErrors = 0; + this.setInputStream(input); + } + + // reset the parser's state + reset() { + if (this._input !== null) { + this._input.seek(0); + } + this._errHandler.reset(this); + this._ctx = null; + this._syntaxErrors = 0; + this.setTrace(false); + this._precedenceStack = []; + this._precedenceStack.push(0); + if (this._interp !== null) { + this._interp.reset(); + } + } + + /** + * Match current input symbol against {@code ttype}. If the symbol type + * matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are + * called to complete the match process. + * + *If the symbol type does not match, + * {@link ANTLRErrorStrategy//recoverInline} is called on the current error + * strategy to attempt recovery. If {@link //getBuildParseTree} is + * {@code true} and the token index of the symbol returned by + * {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to + * the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+ * + * @param ttype the token type to match + * @return the matched symbol + * @throws RecognitionException if the current input symbol did not match + * {@code ttype} and the error strategy could not recover from the + * mismatched symbol + */ + match(ttype) { + let t = this.getCurrentToken(); + if (t.type === ttype) { + this._errHandler.reportMatch(this); + this.consume(); + } else { + t = this._errHandler.recoverInline(this); + if (this.buildParseTrees && t.tokenIndex === -1) { + // we must have conjured up a new token during single token + // insertion + // if it's not the current symbol + this._ctx.addErrorNode(t); + } + } + return t; + } + + /** + * Match current input symbol as a wildcard. If the symbol type matches + * (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch} + * and {@link //consume} are called to complete the match process. + * + *If the symbol type does not match, + * {@link ANTLRErrorStrategy//recoverInline} is called on the current error + * strategy to attempt recovery. If {@link //getBuildParseTree} is + * {@code true} and the token index of the symbol returned by + * {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to + * the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+ * + * @return the matched symbol + * @throws RecognitionException if the current input symbol did not match + * a wildcard and the error strategy could not recover from the mismatched + * symbol + */ + matchWildcard() { + let t = this.getCurrentToken(); + if (t.type > 0) { + this._errHandler.reportMatch(this); + this.consume(); + } else { + t = this._errHandler.recoverInline(this); + if (this._buildParseTrees && t.tokenIndex === -1) { + // we must have conjured up a new token during single token + // insertion + // if it's not the current symbol + this._ctx.addErrorNode(t); + } + } + return t; + } + + getParseListeners() { + return this._parseListeners || []; + } + + /** + * Registers {@code listener} to receive events during the parsing process. + * + *To support output-preserving grammar transformations (including but not + * limited to left-recursion removal, automated left-factoring, and + * optimized code generation), calls to listener methods during the parse + * may differ substantially from calls made by + * {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In + * particular, rule entry and exit events may occur in a different order + * during the parse than after the parser. In addition, calls to certain + * rule entry methods may be omitted.
+ * + *With the following specific exceptions, calls to listener events are + * deterministic, i.e. for identical input the calls to listener + * methods will be the same.
+ * + *If {@code listener} is {@code null} or has not been added as a parse + * listener, this method does nothing.
+ * @param listener the listener to remove + */ + removeParseListener(listener) { + if (this._parseListeners !== null) { + const idx = this._parseListeners.indexOf(listener); + if (idx >= 0) { + this._parseListeners.splice(idx, 1); + } + if (this._parseListeners.length === 0) { + this._parseListeners = null; + } + } + } + + // Remove all parse listeners. + removeParseListeners() { + this._parseListeners = null; + } + + // Notify any parse listeners of an enter rule event. + triggerEnterRuleEvent() { + if (this._parseListeners !== null) { + const ctx = this._ctx; + this._parseListeners.forEach(function (listener) { + listener.enterEveryRule(ctx); + ctx.enterRule(listener); + }); + } + } + + /** + * Notify any parse listeners of an exit rule event. + * @see //addParseListener + */ + triggerExitRuleEvent() { + if (this._parseListeners !== null) { + // reverse order walk of listeners + const ctx = this._ctx; + this._parseListeners.slice(0).reverse().forEach(function (listener) { + ctx.exitRule(listener); + listener.exitEveryRule(ctx); + }); + } + } + + getTokenFactory() { + return this._input.tokenSource._factory; + } + + // Tell our token source and error strategy about a new way to create tokens. + setTokenFactory(factory) { + this._input.tokenSource._factory = factory; + } + + /** + * The ATN with bypass alternatives is expensive to create so we create it + * lazily. + * + * @throws UnsupportedOperationException if the current parser does not + * implement the {@link //getSerializedATN()} method. + */ + getATNWithBypassAlts() { + const serializedAtn = this.getSerializedATN(); + if (serializedAtn === null) { + throw "The current parser does not support an ATN with bypass alternatives."; + } + let result = this.bypassAltsAtnCache[serializedAtn]; + if (result === null) { + const deserializationOptions = new ATNDeserializationOptions(); + deserializationOptions.generateRuleBypassTransitions = true; + result = new ATNDeserializer(deserializationOptions) + .deserialize(serializedAtn); + this.bypassAltsAtnCache[serializedAtn] = result; + } + return result; + } + + getInputStream() { + return this.getTokenStream(); + } + + setInputStream(input) { + this.setTokenStream(input); + } + + getTokenStream() { + return this._input; + } + + // Set the token stream and reset the parser. + setTokenStream(input) { + this._input = null; + this.reset(); + this._input = input; + } + + /** + * Match needs to return the current input symbol, which gets put + * into the label for the associated token ref; e.g., x=ID. + */ + getCurrentToken() { + return this._input.LT(1); + } + + notifyErrorListeners(msg, offendingToken, err) { + offendingToken = offendingToken || null; + err = err || null; + if (offendingToken === null) { + offendingToken = this.getCurrentToken(); + } + this._syntaxErrors += 1; + const line = offendingToken.line; + const column = offendingToken.column; + const listener = this.getErrorListenerDispatch(); + listener.syntaxError(this, offendingToken, line, column, msg, err); + } + + /** + * Consume and return the {@linkplain //getCurrentToken current symbol}. + * + *E.g., given the following input with {@code A} being the current + * lookahead symbol, this function moves the cursor to {@code B} and returns + * {@code A}.
+ * + *+ * A B + * ^ + *+ * + * If the parser is not in error recovery mode, the consumed symbol is added + * to the parse tree using {@link ParserRuleContext//addChild(Token)}, and + * {@link ParseTreeListener//visitTerminal} is called on any parse listeners. + * If the parser is in error recovery mode, the consumed symbol is + * added to the parse tree using + * {@link ParserRuleContext//addErrorNode(Token)}, and + * {@link ParseTreeListener//visitErrorNode} is called on any parse + * listeners. + */ + consume() { + const o = this.getCurrentToken(); + if (o.type !== Token.EOF) { + this.getInputStream().consume(); + } + const hasListener = this._parseListeners !== null && this._parseListeners.length > 0; + if (this.buildParseTrees || hasListener) { + let node; + if (this._errHandler.inErrorRecoveryMode(this)) { + node = this._ctx.addErrorNode(o); + } else { + node = this._ctx.addTokenNode(o); + } + node.invokingState = this.state; + if (hasListener) { + this._parseListeners.forEach(function (listener) { + if (node instanceof ErrorNode || (node.isErrorNode !== undefined && node.isErrorNode())) { + listener.visitErrorNode(node); + } else if (node instanceof TerminalNode) { + listener.visitTerminal(node); + } + }); + } + } + return o; + } + + addContextToParseTree() { + // add current context to parent if we have a parent + if (this._ctx.parentCtx !== null) { + this._ctx.parentCtx.addChild(this._ctx); + } + } + + /** + * Always called by generated parsers upon entry to a rule. Access field + * {@link //_ctx} get the current context. + */ + enterRule(localctx, state, ruleIndex) { + this.state = state; + this._ctx = localctx; + this._ctx.start = this._input.LT(1); + if (this.buildParseTrees) { + this.addContextToParseTree(); + } + this.triggerEnterRuleEvent(); + } + + exitRule() { + this._ctx.stop = this._input.LT(-1); + // trigger event on _ctx, before it reverts to parent + this.triggerExitRuleEvent(); + this.state = this._ctx.invokingState; + this._ctx = this._ctx.parentCtx; + } + + enterOuterAlt(localctx, altNum) { + localctx.setAltNumber(altNum); + // if we have new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if (this.buildParseTrees && this._ctx !== localctx) { + if (this._ctx.parentCtx !== null) { + this._ctx.parentCtx.removeLastChild(); + this._ctx.parentCtx.addChild(localctx); + } + } + this._ctx = localctx; + } + + /** + * Get the precedence level for the top-most precedence rule. + * + * @return The precedence level for the top-most precedence rule, or -1 if + * the parser context is not nested within a precedence rule. + */ + getPrecedence() { + if (this._precedenceStack.length === 0) { + return -1; + } else { + return this._precedenceStack[this._precedenceStack.length - 1]; + } + } + + enterRecursionRule(localctx, state, ruleIndex, precedence) { + this.state = state; + this._precedenceStack.push(precedence); + this._ctx = localctx; + this._ctx.start = this._input.LT(1); + this.triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules + } + + // Like {@link //enterRule} but for recursive rules. + pushNewRecursionContext(localctx, state, ruleIndex) { + const previous = this._ctx; + previous.parentCtx = localctx; + previous.invokingState = state; + previous.stop = this._input.LT(-1); + + this._ctx = localctx; + this._ctx.start = previous.start; + if (this.buildParseTrees) { + this._ctx.addChild(previous); + } + this.triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules + } + + unrollRecursionContexts(parentCtx) { + this._precedenceStack.pop(); + this._ctx.stop = this._input.LT(-1); + const retCtx = this._ctx; // save current ctx (return value) + // unroll so _ctx is as it was before call to recursive method + const parseListeners = this.getParseListeners(); + if (parseListeners !== null && parseListeners.length > 0) { + while (this._ctx !== parentCtx) { + this.triggerExitRuleEvent(); + this._ctx = this._ctx.parentCtx; + } + } else { + this._ctx = parentCtx; + } + // hook into tree + retCtx.parentCtx = parentCtx; + if (this.buildParseTrees && parentCtx !== null) { + // add return ctx into invoking rule's tree + parentCtx.addChild(retCtx); + } + } + + getInvokingContext(ruleIndex) { + let ctx = this._ctx; + while (ctx !== null) { + if (ctx.ruleIndex === ruleIndex) { + return ctx; + } + ctx = ctx.parentCtx; + } + return null; + } + + precpred(localctx, precedence) { + return precedence >= this._precedenceStack[this._precedenceStack.length - 1]; + } + + inContext(context) { + // TODO: useful in parser? + return false; + } + + /** + * Checks whether or not {@code symbol} can follow the current state in the + * ATN. The behavior of this method is equivalent to the following, but is + * implemented such that the complete context-sensitive follow set does not + * need to be explicitly constructed. + * + *
+ * return getExpectedTokens().contains(symbol); + *+ * + * @param symbol the symbol type to check + * @return {@code true} if {@code symbol} can follow the current state in + * the ATN, otherwise {@code false}. + */ + isExpectedToken(symbol) { + const atn = this._interp.atn; + let ctx = this._ctx; + const s = atn.states[this.state]; + let following = atn.nextTokens(s); + if (following.contains(symbol)) { + return true; + } + if (!following.contains(Token.EPSILON)) { + return false; + } + while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { + const invokingState = atn.states[ctx.invokingState]; + const rt = invokingState.transitions[0]; + following = atn.nextTokens(rt.followState); + if (following.contains(symbol)) { + return true; + } + ctx = ctx.parentCtx; + } + if (following.contains(Token.EPSILON) && symbol === Token.EOF) { + return true; + } else { + return false; + } + } + + /** + * Computes the set of input symbols which could follow the current parser + * state and context, as given by {@link //getState} and {@link //getContext}, + * respectively. + * + * @see ATN//getExpectedTokens(int, RuleContext) + */ + getExpectedTokens() { + return this._interp.atn.getExpectedTokens(this.state, this._ctx); + } + + getExpectedTokensWithinCurrentRule() { + const atn = this._interp.atn; + const s = atn.states[this.state]; + return atn.nextTokens(s); + } + + // Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. + getRuleIndex(ruleName) { + const ruleIndex = this.getRuleIndexMap()[ruleName]; + if (ruleIndex !== null) { + return ruleIndex; + } else { + return -1; + } + } + + /** + * Return List<String> of the rule names in your parser instance + * leading up to a call to the current rule. You could override if + * you want more details such as the file/line info of where + * in the ATN a rule is invoked. + * + * this is very useful for error messages. + */ + getRuleInvocationStack(p) { + p = p || null; + if (p === null) { + p = this._ctx; + } + const stack = []; + while (p !== null) { + // compute what follows who invoked us + const ruleIndex = p.ruleIndex; + if (ruleIndex < 0) { + stack.push("n/a"); + } else { + stack.push(this.ruleNames[ruleIndex]); + } + p = p.parentCtx; + } + return stack; + } + + // For debugging and other purposes. + getDFAStrings() { + return this._interp.decisionToDFA.toString(); + } + + // For debugging and other purposes. + dumpDFA() { + let seenOne = false; + for (let i = 0; i < this._interp.decisionToDFA.length; i++) { + const dfa = this._interp.decisionToDFA[i]; + if (dfa.states.length > 0) { + if (seenOne) { + console.log(); + } + this.printer.println("Decision " + dfa.decision + ":"); + this.printer.print(dfa.toString(this.literalNames, this.symbolicNames)); + seenOne = true; + } + } + } + + /* + " printer = function() {\r\n" + + " this.println = function(s) { document.getElementById('output') += s + '\\n'; }\r\n" + + " this.print = function(s) { document.getElementById('output') += s; }\r\n" + + " };\r\n" + + */ + getSourceName() { + return this._input.sourceName; + } + + /** + * During a parse is sometimes useful to listen in on the rule entry and exit + * events as well as token matches. this is for quick and dirty debugging. + */ + setTrace(trace) { + if (!trace) { + this.removeParseListener(this._tracer); + this._tracer = null; + } else { + if (this._tracer !== null) { + this.removeParseListener(this._tracer); + } + this._tracer = new TraceListener(this); + this.addParseListener(this._tracer); + } + } +} + +/** + * this field maps from the serialized ATN string to the deserialized {@link + * ATN} with + * bypass alternatives. + * + * @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() + */ +Parser.bypassAltsAtnCache = {}; + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/atn/PredictionContextCache.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + +/** + * Used to cache {@link PredictionContext} objects. Its used for the shared + * context cash associated with contexts in DFA states. This cache + * can be used for both lexers and parsers. + */ +class PredictionContextCache { + + constructor() { + this.cache = new HashMap_HashMap(); + } + + /** + * Add a context to the cache and return it. If the context already exists, + * return that one instead and do not add a new context to the cache. + * Protect shared cache from unsafe thread access. + */ + add(ctx) { + if (ctx === PredictionContext.EMPTY) { + return PredictionContext.EMPTY; + } + const existing = this.cache.get(ctx) || null; + if (existing !== null) { + return existing; + } + this.cache.set(ctx, ctx); + return ctx; + } + + get(ctx) { + return this.cache.get(ctx) || null; + } + + get length(){ + return this.cache.length; + } +} + +;// CONCATENATED MODULE: ../node_modules/antlr4/src/antlr4/tree/TerminalNodeImpl.js +/* Copyright (c) 2012-2022 The ANTLR Project Contributors. All rights reserved. + * Use is of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + + + + +class TerminalNodeImpl extends TerminalNode { + constructor(symbol) { + super(); + this.parentCtx = null; + this.symbol = symbol; + } + + getChild(i) { + return null; + } + + getSymbol() { + return this.symbol; + } + + getParent() { + return this.parentCtx; + } + + getPayload() { + return this.symbol; + } + + getSourceInterval() { + if (this.symbol === null) { + return Interval.INVALID_INTERVAL; + } + const tokenIndex = this.symbol.tokenIndex; + return new Interval(tokenIndex, tokenIndex); + } + + getChildCount() { + return 0; + } + + accept(visitor) { + return visitor.visitTerminal(this); + } + + getText() { + return this.symbol.text; + } + + toString() { + if (this.symbol.type === Token.EOF) { + return "
-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //getText}, and {@link //source} -// will be constructed from the result of {@link Token//getTokenSource} and -// {@link Token//getInputStream}.
-// -// @param oldToken The token to copy. -// -CommonToken.prototype.clone = function() { - var t = new CommonToken(this.source, this.type, this.channel, this.start, - this.stop); - t.tokenIndex = this.tokenIndex; - t.line = this.line; - t.column = this.column; - t.text = this.text; - return t; -}; - -Object.defineProperty(CommonToken.prototype, "text", { - get : function() { - if (this._text !== null) { - return this._text; - } - var input = this.getInputStream(); - if (input === null) { - return null; - } - var n = input.size; - if (this.start < n && this.stop < n) { - return input.getText(this.start, this.stop); - } else { - return "If the state number is not known, this method returns -1.
- -// -// Gets the set of input symbols which could potentially follow the -// previously matched symbol at the time this exception was thrown. -// -//If the set of expected tokens is not known and could not be computed, -// this method returns {@code null}.
-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code null} if the information is not available. -// / -RecognitionException.prototype.getExpectedTokens = function() { - if (this.recognizer!==null) { - return this.recognizer.atn.getExpectedTokens(this.offendingState, this.ctx); - } else { - return null; - } -}; - -RecognitionException.prototype.toString = function() { - return this.message; -}; - -function LexerNoViableAltException(lexer, input, startIndex, deadEndConfigs) { - RecognitionException.call(this, {message:"", recognizer:lexer, input:input, ctx:null}); - this.startIndex = startIndex; - this.deadEndConfigs = deadEndConfigs; - return this; -} - -LexerNoViableAltException.prototype = Object.create(RecognitionException.prototype); -LexerNoViableAltException.prototype.constructor = LexerNoViableAltException; - -LexerNoViableAltException.prototype.toString = function() { - var symbol = ""; - if (this.startIndex >= 0 && this.startIndex < this.input.size) { - symbol = this.input.getText((this.startIndex,this.startIndex)); - } - return "LexerNoViableAltException" + symbol; -}; - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by reportNoViableAlternative() -// -function NoViableAltException(recognizer, input, startToken, offendingToken, deadEndConfigs, ctx) { - ctx = ctx || recognizer._ctx; - offendingToken = offendingToken || recognizer.getCurrentToken(); - startToken = startToken || recognizer.getCurrentToken(); - input = input || recognizer.getInputStream(); - RecognitionException.call(this, {message:"", recognizer:recognizer, input:input, ctx:ctx}); - // Which configurations did we try at input.index() that couldn't match - // input.LT(1)?// - this.deadEndConfigs = deadEndConfigs; - // The token object at the start index; the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - this.startToken = startToken; - this.offendingToken = offendingToken; -} - -NoViableAltException.prototype = Object.create(RecognitionException.prototype); -NoViableAltException.prototype.constructor = NoViableAltException; - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not match the expected token. -// -function InputMismatchException(recognizer) { - RecognitionException.call(this, {message:"", recognizer:recognizer, input:recognizer.getInputStream(), ctx:recognizer._ctx}); - this.offendingToken = recognizer.getCurrentToken(); -} - -InputMismatchException.prototype = Object.create(RecognitionException.prototype); -InputMismatchException.prototype.constructor = InputMismatchException; - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -function FailedPredicateException(recognizer, predicate, message) { - RecognitionException.call(this, {message:this.formatMessage(predicate,message || null), recognizer:recognizer, - input:recognizer.getInputStream(), ctx:recognizer._ctx}); - var s = recognizer._interp.atn.states[recognizer.state]; - var trans = s.transitions[0]; - if (trans instanceof PredicateTransition) { - this.ruleIndex = trans.ruleIndex; - this.predicateIndex = trans.predIndex; - } else { - this.ruleIndex = 0; - this.predicateIndex = 0; - } - this.predicate = predicate; - this.offendingToken = recognizer.getCurrentToken(); - return this; -} - -FailedPredicateException.prototype = Object.create(RecognitionException.prototype); -FailedPredicateException.prototype.constructor = FailedPredicateException; - -FailedPredicateException.prototype.formatMessage = function(predicate, message) { - if (message !==null) { - return message; - } else { - return "failed predicate: {" + predicate + "}?"; - } -}; - -function ParseCancellationException() { - Error.call(this); - Error.captureStackTrace(this, ParseCancellationException); - return this; -} - -ParseCancellationException.prototype = Object.create(Error.prototype); -ParseCancellationException.prototype.constructor = ParseCancellationException; - -exports.RecognitionException = RecognitionException; -exports.NoViableAltException = NoViableAltException; -exports.LexerNoViableAltException = LexerNoViableAltException; -exports.InputMismatchException = InputMismatchException; -exports.FailedPredicateException = FailedPredicateException; -exports.ParseCancellationException = ParseCancellationException; - - -/***/ }), -/* 6 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/// - -var RuleContext = __webpack_require__(14).RuleContext; -var Hash = __webpack_require__(0).Hash; -var Map = __webpack_require__(0).Map; - -function PredictionContext(cachedHashCode) { - this.cachedHashCode = cachedHashCode; -} - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -PredictionContext.EMPTY = null; - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EMPTY_RETURN_STATE}. -// / -PredictionContext.EMPTY_RETURN_STATE = 0x7FFFFFFF; - -PredictionContext.globalNodeCount = 1; -PredictionContext.id = PredictionContext.globalNodeCount; - -// Stores the computed hash code of this {@link PredictionContext}. The hash -// code is computed in parts to match the following reference algorithm. -// -//-// private int referenceHashCode() { -// int hash = {@link MurmurHash//initialize MurmurHash.initialize}({@link -// //INITIAL_HASH}); -// -// for (int i = 0; i < {@link //size()}; i++) { -// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent -// getParent}(i)); -// } -// -// for (int i = 0; i < {@link //size()}; i++) { -// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link -// //getReturnState getReturnState}(i)); -// } -// -// hash = {@link MurmurHash//finish MurmurHash.finish}(hash, 2// {@link -// //size()}); -// return hash; -// } -//-// / - -// This means only the {@link //EMPTY} context is in set. -PredictionContext.prototype.isEmpty = function() { - return this === PredictionContext.EMPTY; -}; - -PredictionContext.prototype.hasEmptyPath = function() { - return this.getReturnState(this.length - 1) === PredictionContext.EMPTY_RETURN_STATE; -}; - -PredictionContext.prototype.hashCode = function() { - return this.cachedHashCode; -}; - - -PredictionContext.prototype.updateHashCode = function(hash) { - hash.update(this.cachedHashCode); -}; -/* -function calculateHashString(parent, returnState) { - return "" + parent + returnState; -} -*/ - -// Used to cache {@link PredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -function PredictionContextCache() { - this.cache = new Map(); - return this; -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a new context to the cache. -// Protect shared cache from unsafe thread access. -// -PredictionContextCache.prototype.add = function(ctx) { - if (ctx === PredictionContext.EMPTY) { - return PredictionContext.EMPTY; - } - var existing = this.cache.get(ctx) || null; - if (existing !== null) { - return existing; - } - this.cache.put(ctx, ctx); - return ctx; -}; - -PredictionContextCache.prototype.get = function(ctx) { - return this.cache.get(ctx) || null; -}; - -Object.defineProperty(PredictionContextCache.prototype, "length", { - get : function() { - return this.cache.length; - } -}); - -function SingletonPredictionContext(parent, returnState) { - var hashCode = 0; - var hash = new Hash(); - if(parent !== null) { - hash.update(parent, returnState); - } else { - hash.update(1); - } - hashCode = hash.finish(); - PredictionContext.call(this, hashCode); - this.parentCtx = parent; - this.returnState = returnState; -} - -SingletonPredictionContext.prototype = Object.create(PredictionContext.prototype); -SingletonPredictionContext.prototype.contructor = SingletonPredictionContext; - -SingletonPredictionContext.create = function(parent, returnState) { - if (returnState === PredictionContext.EMPTY_RETURN_STATE && parent === null) { - // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY; - } else { - return new SingletonPredictionContext(parent, returnState); - } -}; - -Object.defineProperty(SingletonPredictionContext.prototype, "length", { - get : function() { - return 1; - } -}); - -SingletonPredictionContext.prototype.getParent = function(index) { - return this.parentCtx; -}; - -SingletonPredictionContext.prototype.getReturnState = function(index) { - return this.returnState; -}; - -SingletonPredictionContext.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof SingletonPredictionContext)) { - return false; - } else if (this.hashCode() !== other.hashCode()) { - return false; // can't be same if hash is different - } else { - if(this.returnState !== other.returnState) - return false; - else if(this.parentCtx==null) - return other.parentCtx==null - else - return this.parentCtx.equals(other.parentCtx); - } -}; - -SingletonPredictionContext.prototype.toString = function() { - var up = this.parentCtx === null ? "" : this.parentCtx.toString(); - if (up.length === 0) { - if (this.returnState === PredictionContext.EMPTY_RETURN_STATE) { - return "$"; - } else { - return "" + this.returnState; - } - } else { - return "" + this.returnState + " " + up; - } -}; - -function EmptyPredictionContext() { - SingletonPredictionContext.call(this, null, PredictionContext.EMPTY_RETURN_STATE); - return this; -} - -EmptyPredictionContext.prototype = Object.create(SingletonPredictionContext.prototype); -EmptyPredictionContext.prototype.constructor = EmptyPredictionContext; - -EmptyPredictionContext.prototype.isEmpty = function() { - return true; -}; - -EmptyPredictionContext.prototype.getParent = function(index) { - return null; -}; - -EmptyPredictionContext.prototype.getReturnState = function(index) { - return this.returnState; -}; - -EmptyPredictionContext.prototype.equals = function(other) { - return this === other; -}; - -EmptyPredictionContext.prototype.toString = function() { - return "$"; -}; - -PredictionContext.EMPTY = new EmptyPredictionContext(); - -function ArrayPredictionContext(parents, returnStates) { - // Parent can be null only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // null parent and - // returnState == {@link //EMPTY_RETURN_STATE}. - var h = new Hash(); - h.update(parents, returnStates); - var hashCode = h.finish(); - PredictionContext.call(this, hashCode); - this.parents = parents; - this.returnStates = returnStates; - return this; -} - -ArrayPredictionContext.prototype = Object.create(PredictionContext.prototype); -ArrayPredictionContext.prototype.constructor = ArrayPredictionContext; - -ArrayPredictionContext.prototype.isEmpty = function() { - // since EMPTY_RETURN_STATE can only appear in the last position, we - // don't need to verify that size==1 - return this.returnStates[0] === PredictionContext.EMPTY_RETURN_STATE; -}; - -Object.defineProperty(ArrayPredictionContext.prototype, "length", { - get : function() { - return this.returnStates.length; - } -}); - -ArrayPredictionContext.prototype.getParent = function(index) { - return this.parents[index]; -}; - -ArrayPredictionContext.prototype.getReturnState = function(index) { - return this.returnStates[index]; -}; - -ArrayPredictionContext.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof ArrayPredictionContext)) { - return false; - } else if (this.hashCode() !== other.hashCode()) { - return false; // can't be same if hash is different - } else { - return this.returnStates === other.returnStates && - this.parents === other.parents; - } -}; - -ArrayPredictionContext.prototype.toString = function() { - if (this.isEmpty()) { - return "[]"; - } else { - var s = "["; - for (var i = 0; i < this.returnStates.length; i++) { - if (i > 0) { - s = s + ", "; - } - if (this.returnStates[i] === PredictionContext.EMPTY_RETURN_STATE) { - s = s + "$"; - continue; - } - s = s + this.returnStates[i]; - if (this.parents[i] !== null) { - s = s + " " + this.parents[i]; - } else { - s = s + "null"; - } - } - return s + "]"; - } -}; - -// Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or null. -// / -function predictionContextFromRuleContext(atn, outerContext) { - if (outerContext === undefined || outerContext === null) { - outerContext = RuleContext.EMPTY; - } - // if we are in RuleContext of start rule, s, then PredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if (outerContext.parentCtx === null || outerContext === RuleContext.EMPTY) { - return PredictionContext.EMPTY; - } - // If we have a parent, convert it to a PredictionContext graph - var parent = predictionContextFromRuleContext(atn, outerContext.parentCtx); - var state = atn.states[outerContext.invokingState]; - var transition = state.transitions[0]; - return SingletonPredictionContext.create(parent, transition.followState.stateNumber); -} -/* -function calculateListsHashString(parents, returnStates) { - var s = ""; - parents.map(function(p) { - s = s + p; - }); - returnStates.map(function(r) { - s = s + r; - }); - return s; -} -*/ -function merge(a, b, rootIsWildcard, mergeCache) { - // share same graph if both same - if (a === b) { - return a; - } - if (a instanceof SingletonPredictionContext && b instanceof SingletonPredictionContext) { - return mergeSingletons(a, b, rootIsWildcard, mergeCache); - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if (rootIsWildcard) { - if (a instanceof EmptyPredictionContext) { - return a; - } - if (b instanceof EmptyPredictionContext) { - return b; - } - } - // convert singleton so both are arrays to normalize - if (a instanceof SingletonPredictionContext) { - a = new ArrayPredictionContext([a.getParent()], [a.returnState]); - } - if (b instanceof SingletonPredictionContext) { - b = new ArrayPredictionContext([b.getParent()], [b.returnState]); - } - return mergeArrays(a, b, rootIsWildcard, mergeCache); -} - -// -// Merge two {@link SingletonPredictionContext} instances. -// -//
Stack tops equal, parents merge is same; return left graph.
-//
Same stack top, parents differ; merge parents giving array node, then
-// remainders of those graphs. A new root node is created to point to the
-// merged parents.
-//
Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
These local-context merge operations are used when {@code rootIsWildcard} -// is true.
-// -//{@link //EMPTY} is superset of any graph; return {@link //EMPTY}.
-//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY}; return left graph.
-//
Special case of last merge if local context.
-//
These full-context merge operations are used when {@code rootIsWildcard} -// is false.
-// -// -// -//Must keep all contexts; {@link //EMPTY} in array is a special value (and
-// null parent).
-//
Different tops, different parents.
-//
Shared top, same parents.
-//
Shared top, different parents.
-//
Shared top, all shared parents.
-//
Equal tops, merge parents and reduce top to
-// {@link SingletonPredictionContext}.
-//
If {@code context} is {@code null}, it is treated as -// {@link ParserRuleContext//EMPTY}.
-// -// @param stateNumber the ATN state number -// @param context the full parse context -// @return The set of potentially valid input symbols which could follow the -// specified state in the specified context. -// @throws IllegalArgumentException if the ATN does not contain a state with -// number {@code stateNumber} -var Token = __webpack_require__(1).Token; - -ATN.prototype.getExpectedTokens = function( stateNumber, ctx ) { - if ( stateNumber < 0 || stateNumber >= this.states.length ) { - throw("Invalid state number."); - } - var s = this.states[stateNumber]; - var following = this.nextTokens(s); - if (!following.contains(Token.EPSILON)) { - return following; - } - var expected = new IntervalSet(); - expected.addSet(following); - expected.removeOne(Token.EPSILON); - while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { - var invokingState = this.states[ctx.invokingState]; - var rt = invokingState.transitions[0]; - following = this.nextTokens(rt.followState); - expected.addSet(following); - expected.removeOne(Token.EPSILON); - ctx = ctx.parentCtx; - } - if (following.contains(Token.EPSILON)) { - expected.addOne(Token.EOF); - } - return expected; -}; - -ATN.INVALID_ALT_NUMBER = 0; - -exports.ATN = ATN; - -/***/ }), -/* 8 */ -/***/ (function(module, exports, __webpack_require__) { - -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -// - -// An ATN transition between any two ATN states. Subclasses define -// atom, set, epsilon, action, predicate, rule transitions. -// -//This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.
-// -//Since we never have to change the ATN transitions once we construct it, -// we can fix these transitions as specific classes. The DFA transitions -// on the other hand need to update the labels as it adds transitions to -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.
- -var Token = __webpack_require__(1).Token; -var Interval = __webpack_require__(2).Interval; -var IntervalSet = __webpack_require__(2).IntervalSet; -var Predicate = __webpack_require__(10).Predicate; -var PrecedencePredicate = __webpack_require__(10).PrecedencePredicate; - -function Transition (target) { - // The target of this transition. - if (target===undefined || target===null) { - throw "target cannot be null."; - } - this.target = target; - // Are we epsilon, action, sempred? - this.isEpsilon = false; - this.label = null; - return this; -} - // constants for serialization -Transition.EPSILON = 1; -Transition.RANGE = 2; -Transition.RULE = 3; -Transition.PREDICATE = 4; // e.g., {isType(input.LT(1))}? -Transition.ATOM = 5; -Transition.ACTION = 6; -Transition.SET = 7; // ~(A|B) or ~atom, wildcard, which convert to next 2 -Transition.NOT_SET = 8; -Transition.WILDCARD = 9; -Transition.PRECEDENCE = 10; - -Transition.serializationNames = [ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE" - ]; - -Transition.serializationTypes = { - EpsilonTransition: Transition.EPSILON, - RangeTransition: Transition.RANGE, - RuleTransition: Transition.RULE, - PredicateTransition: Transition.PREDICATE, - AtomTransition: Transition.ATOM, - ActionTransition: Transition.ACTION, - SetTransition: Transition.SET, - NotSetTransition: Transition.NOT_SET, - WildcardTransition: Transition.WILDCARD, - PrecedencePredicateTransition: Transition.PRECEDENCE - }; - - -// TODO: make all transitions sets? no, should remove set edges -function AtomTransition(target, label) { - Transition.call(this, target); - this.label_ = label; // The token type or character value; or, signifies special label. - this.label = this.makeLabel(); - this.serializationType = Transition.ATOM; - return this; -} - -AtomTransition.prototype = Object.create(Transition.prototype); -AtomTransition.prototype.constructor = AtomTransition; - -AtomTransition.prototype.makeLabel = function() { - var s = new IntervalSet(); - s.addOne(this.label_); - return s; -}; - -AtomTransition.prototype.matches = function( symbol, minVocabSymbol, maxVocabSymbol) { - return this.label_ === symbol; -}; - -AtomTransition.prototype.toString = function() { - return this.label_; -}; - -function RuleTransition(ruleStart, ruleIndex, precedence, followState) { - Transition.call(this, ruleStart); - this.ruleIndex = ruleIndex; // ptr to the rule definition object for this rule ref - this.precedence = precedence; - this.followState = followState; // what node to begin computations following ref to rule - this.serializationType = Transition.RULE; - this.isEpsilon = true; - return this; -} - -RuleTransition.prototype = Object.create(Transition.prototype); -RuleTransition.prototype.constructor = RuleTransition; - -RuleTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - - -function EpsilonTransition(target, outermostPrecedenceReturn) { - Transition.call(this, target); - this.serializationType = Transition.EPSILON; - this.isEpsilon = true; - this.outermostPrecedenceReturn = outermostPrecedenceReturn; - return this; -} - -EpsilonTransition.prototype = Object.create(Transition.prototype); -EpsilonTransition.prototype.constructor = EpsilonTransition; - -EpsilonTransition.prototype.matches = function( symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - -EpsilonTransition.prototype.toString = function() { - return "epsilon"; -}; - -function RangeTransition(target, start, stop) { - Transition.call(this, target); - this.serializationType = Transition.RANGE; - this.start = start; - this.stop = stop; - this.label = this.makeLabel(); - return this; -} - -RangeTransition.prototype = Object.create(Transition.prototype); -RangeTransition.prototype.constructor = RangeTransition; - -RangeTransition.prototype.makeLabel = function() { - var s = new IntervalSet(); - s.addRange(this.start, this.stop); - return s; -}; - -RangeTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= this.start && symbol <= this.stop; -}; - -RangeTransition.prototype.toString = function() { - return "'" + String.fromCharCode(this.start) + "'..'" + String.fromCharCode(this.stop) + "'"; -}; - -function AbstractPredicateTransition(target) { - Transition.call(this, target); - return this; -} - -AbstractPredicateTransition.prototype = Object.create(Transition.prototype); -AbstractPredicateTransition.prototype.constructor = AbstractPredicateTransition; - -function PredicateTransition(target, ruleIndex, predIndex, isCtxDependent) { - AbstractPredicateTransition.call(this, target); - this.serializationType = Transition.PREDICATE; - this.ruleIndex = ruleIndex; - this.predIndex = predIndex; - this.isCtxDependent = isCtxDependent; // e.g., $i ref in pred - this.isEpsilon = true; - return this; -} - -PredicateTransition.prototype = Object.create(AbstractPredicateTransition.prototype); -PredicateTransition.prototype.constructor = PredicateTransition; - -PredicateTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - -PredicateTransition.prototype.getPredicate = function() { - return new Predicate(this.ruleIndex, this.predIndex, this.isCtxDependent); -}; - -PredicateTransition.prototype.toString = function() { - return "pred_" + this.ruleIndex + ":" + this.predIndex; -}; - -function ActionTransition(target, ruleIndex, actionIndex, isCtxDependent) { - Transition.call(this, target); - this.serializationType = Transition.ACTION; - this.ruleIndex = ruleIndex; - this.actionIndex = actionIndex===undefined ? -1 : actionIndex; - this.isCtxDependent = isCtxDependent===undefined ? false : isCtxDependent; // e.g., $i ref in pred - this.isEpsilon = true; - return this; -} - -ActionTransition.prototype = Object.create(Transition.prototype); -ActionTransition.prototype.constructor = ActionTransition; - - -ActionTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - -ActionTransition.prototype.toString = function() { - return "action_" + this.ruleIndex + ":" + this.actionIndex; -}; - - -// A transition containing a set of values. -function SetTransition(target, set) { - Transition.call(this, target); - this.serializationType = Transition.SET; - if (set !==undefined && set !==null) { - this.label = set; - } else { - this.label = new IntervalSet(); - this.label.addOne(Token.INVALID_TYPE); - } - return this; -} - -SetTransition.prototype = Object.create(Transition.prototype); -SetTransition.prototype.constructor = SetTransition; - -SetTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return this.label.contains(symbol); -}; - - -SetTransition.prototype.toString = function() { - return this.label.toString(); -}; - -function NotSetTransition(target, set) { - SetTransition.call(this, target, set); - this.serializationType = Transition.NOT_SET; - return this; -} - -NotSetTransition.prototype = Object.create(SetTransition.prototype); -NotSetTransition.prototype.constructor = NotSetTransition; - -NotSetTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && - !SetTransition.prototype.matches.call(this, symbol, minVocabSymbol, maxVocabSymbol); -}; - -NotSetTransition.prototype.toString = function() { - return '~' + SetTransition.prototype.toString.call(this); -}; - -function WildcardTransition(target) { - Transition.call(this, target); - this.serializationType = Transition.WILDCARD; - return this; -} - -WildcardTransition.prototype = Object.create(Transition.prototype); -WildcardTransition.prototype.constructor = WildcardTransition; - - -WildcardTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; -}; - -WildcardTransition.prototype.toString = function() { - return "."; -}; - -function PrecedencePredicateTransition(target, precedence) { - AbstractPredicateTransition.call(this, target); - this.serializationType = Transition.PRECEDENCE; - this.precedence = precedence; - this.isEpsilon = true; - return this; -} - -PrecedencePredicateTransition.prototype = Object.create(AbstractPredicateTransition.prototype); -PrecedencePredicateTransition.prototype.constructor = PrecedencePredicateTransition; - -PrecedencePredicateTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - -PrecedencePredicateTransition.prototype.getPredicate = function() { - return new PrecedencePredicate(this.precedence); -}; - -PrecedencePredicateTransition.prototype.toString = function() { - return this.precedence + " >= _p"; -}; - -exports.Transition = Transition; -exports.AtomTransition = AtomTransition; -exports.SetTransition = SetTransition; -exports.NotSetTransition = NotSetTransition; -exports.RuleTransition = RuleTransition; -exports.ActionTransition = ActionTransition; -exports.EpsilonTransition = EpsilonTransition; -exports.RangeTransition = RangeTransition; -exports.WildcardTransition = WildcardTransition; -exports.PredicateTransition = PredicateTransition; -exports.PrecedencePredicateTransition = PrecedencePredicateTransition; -exports.AbstractPredicateTransition = AbstractPredicateTransition; - -/***/ }), -/* 9 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -// -// Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track -// info about the set, with support for combining similar configurations using a -// graph-structured stack. -/// - -var ATN = __webpack_require__(7).ATN; -var Utils = __webpack_require__(0); -var Hash = Utils.Hash; -var Set = Utils.Set; -var SemanticContext = __webpack_require__(10).SemanticContext; -var merge = __webpack_require__(6).merge; - -function hashATNConfig(c) { - return c.hashCodeForConfigSet(); -} - -function equalATNConfigs(a, b) { - if ( a===b ) { - return true; - } else if ( a===null || b===null ) { - return false; - } else - return a.equalsForConfigSet(b); - } - - -function ATNConfigSet(fullCtx) { - // - // The reason that we need this is because we don't want the hash map to use - // the standard hash code and equals. We need all configurations with the - // same - // {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively - // doubles - // the number of objects associated with ATNConfigs. The other solution is - // to - // use a hash table that lets us specify the equals/hashcode operation. - // All configs but hashed by (s, i, _, pi) not including context. Wiped out - // when we go readonly as this set becomes a DFA state. - this.configLookup = new Set(hashATNConfig, equalATNConfigs); - // Indicates that this configuration set is part of a full context - // LL prediction. It will be used to determine how to merge $. With SLL - // it's a wildcard whereas it is not for LL context merge. - this.fullCtx = fullCtx === undefined ? true : fullCtx; - // Indicates that the set of configurations is read-only. Do not - // allow any code to manipulate the set; DFA states will point at - // the sets and they must not change. This does not protect the other - // fields; in particular, conflictingAlts is set after - // we've made this readonly. - this.readOnly = false; - // Track the elements as they are added to the set; supports get(i)/// - this.configs = []; - - // TODO: these fields make me pretty uncomfortable but nice to pack up info - // together, saves recomputation - // TODO: can we track conflicts as they are added to save scanning configs - // later? - this.uniqueAlt = 0; - this.conflictingAlts = null; - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from this. - this.hasSemanticContext = false; - this.dipsIntoOuterContext = false; - - this.cachedHashCode = -1; - - return this; -} - -// Adding a new config means merging contexts with existing configs for -// {@code (s, i, pi, _)}, where {@code s} is the -// {@link ATNConfig//state}, {@code i} is the {@link ATNConfig//alt}, and -// {@code pi} is the {@link ATNConfig//semanticContext}. We use -// {@code (s,i,pi)} as key. -// -//This method updates {@link //dipsIntoOuterContext} and -// {@link //hasSemanticContext} when necessary.
-// / -ATNConfigSet.prototype.add = function(config, mergeCache) { - if (mergeCache === undefined) { - mergeCache = null; - } - if (this.readOnly) { - throw "This set is readonly"; - } - if (config.semanticContext !== SemanticContext.NONE) { - this.hasSemanticContext = true; - } - if (config.reachesIntoOuterContext > 0) { - this.dipsIntoOuterContext = true; - } - var existing = this.configLookup.add(config); - if (existing === config) { - this.cachedHashCode = -1; - this.configs.push(config); // track order here - return true; - } - // a previous (s,i,pi,_), merge with it and save result - var rootIsWildcard = !this.fullCtx; - var merged = merge(existing.context, config.context, rootIsWildcard, mergeCache); - // no need to check for existing.context, config.context in cache - // since only way to create new graphs is "call rule" and here. We - // cache at both places. - existing.reachesIntoOuterContext = Math.max( existing.reachesIntoOuterContext, config.reachesIntoOuterContext); - // make sure to preserve the precedence filter suppression during the merge - if (config.precedenceFilterSuppressed) { - existing.precedenceFilterSuppressed = true; - } - existing.context = merged; // replace context; no need to alt mapping - return true; -}; - -ATNConfigSet.prototype.getStates = function() { - var states = new Set(); - for (var i = 0; i < this.configs.length; i++) { - states.add(this.configs[i].state); - } - return states; -}; - -ATNConfigSet.prototype.getPredicates = function() { - var preds = []; - for (var i = 0; i < this.configs.length; i++) { - var c = this.configs[i].semanticContext; - if (c !== SemanticContext.NONE) { - preds.push(c.semanticContext); - } - } - return preds; -}; - -Object.defineProperty(ATNConfigSet.prototype, "items", { - get : function() { - return this.configs; - } -}); - -ATNConfigSet.prototype.optimizeConfigs = function(interpreter) { - if (this.readOnly) { - throw "This set is readonly"; - } - if (this.configLookup.length === 0) { - return; - } - for (var i = 0; i < this.configs.length; i++) { - var config = this.configs[i]; - config.context = interpreter.getCachedContext(config.context); - } -}; - -ATNConfigSet.prototype.addAll = function(coll) { - for (var i = 0; i < coll.length; i++) { - this.add(coll[i]); - } - return false; -}; - -ATNConfigSet.prototype.equals = function(other) { - return this === other || - (other instanceof ATNConfigSet && - Utils.equalArrays(this.configs, other.configs) && - this.fullCtx === other.fullCtx && - this.uniqueAlt === other.uniqueAlt && - this.conflictingAlts === other.conflictingAlts && - this.hasSemanticContext === other.hasSemanticContext && - this.dipsIntoOuterContext === other.dipsIntoOuterContext); -}; - -ATNConfigSet.prototype.hashCode = function() { - var hash = new Hash(); - hash.update(this.configs); - return hash.finish(); -}; - - -ATNConfigSet.prototype.updateHashCode = function(hash) { - if (this.readOnly) { - if (this.cachedHashCode === -1) { - this.cachedHashCode = this.hashCode(); - } - hash.update(this.cachedHashCode); - } else { - hash.update(this.hashCode()); - } -}; - - -Object.defineProperty(ATNConfigSet.prototype, "length", { - get : function() { - return this.configs.length; - } -}); - -ATNConfigSet.prototype.isEmpty = function() { - return this.configs.length === 0; -}; - -ATNConfigSet.prototype.contains = function(item) { - if (this.configLookup === null) { - throw "This method is not implemented for readonly sets."; - } - return this.configLookup.contains(item); -}; - -ATNConfigSet.prototype.containsFast = function(item) { - if (this.configLookup === null) { - throw "This method is not implemented for readonly sets."; - } - return this.configLookup.containsFast(item); -}; - -ATNConfigSet.prototype.clear = function() { - if (this.readOnly) { - throw "This set is readonly"; - } - this.configs = []; - this.cachedHashCode = -1; - this.configLookup = new Set(); -}; - -ATNConfigSet.prototype.setReadonly = function(readOnly) { - this.readOnly = readOnly; - if (readOnly) { - this.configLookup = null; // can't mod, no need for lookup cache - } -}; - -ATNConfigSet.prototype.toString = function() { - return Utils.arrayToString(this.configs) + - (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + - (this.uniqueAlt !== ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + - (this.conflictingAlts !== null ? ",conflictingAlts=" + this.conflictingAlts : "") + - (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : ""); -}; - -function OrderedATNConfigSet() { - ATNConfigSet.call(this); - this.configLookup = new Set(); - return this; -} - -OrderedATNConfigSet.prototype = Object.create(ATNConfigSet.prototype); -OrderedATNConfigSet.prototype.constructor = OrderedATNConfigSet; - -exports.ATNConfigSet = ATNConfigSet; -exports.OrderedATNConfigSet = OrderedATNConfigSet; - - -/***/ }), -/* 10 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -// - -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -// -//I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.
-// - -var Set = __webpack_require__(0).Set; -var Hash = __webpack_require__(0).Hash; - -function SemanticContext() { - return this; -} - -SemanticContext.prototype.hashCode = function() { - var hash = new Hash(); - this.updateHashCode(hash); - return hash.finish(); -}; - -// For context independent predicates, we evaluate them without a local -// context (i.e., null context). That way, we can evaluate them without -// having to create proper rule-specific context during prediction (as -// opposed to the parser, which creates them naturally). In a practical -// sense, this avoids a cast exception from RuleContext to myruleContext. -// -//For context dependent predicates, we must pass in a local context so that -// references such as $arg evaluate properly as _localctx.arg. We only -// capture context dependent predicates in the context in which we begin -// prediction, so we passed in the outer context here in case of context -// dependent predicate evaluation.
-// -SemanticContext.prototype.evaluate = function(parser, outerContext) { -}; - -// -// Evaluate the precedence predicates for the context and reduce the result. -// -// @param parser The parser instance. -// @param outerContext The current parser context object. -// @return The simplified semantic context after precedence predicates are -// evaluated, which will be one of the following values. -//-// The evaluation of predicates by this context is short-circuiting, but -// unordered.
-// -OR.prototype.evaluate = function(parser, outerContext) { - for (var i = 0; i < this.opnds.length; i++) { - if (this.opnds[i].evaluate(parser, outerContext)) { - return true; - } - } - return false; -}; - -OR.prototype.evalPrecedence = function(parser, outerContext) { - var differs = false; - var operands = []; - for (var i = 0; i < this.opnds.length; i++) { - var context = this.opnds[i]; - var evaluated = context.evalPrecedence(parser, outerContext); - differs |= (evaluated !== context); - if (evaluated === SemanticContext.NONE) { - // The OR context is true if any element is true - return SemanticContext.NONE; - } else if (evaluated !== null) { - // Reduce the result by skipping false elements - operands.push(evaluated); - } - } - if (!differs) { - return this; - } - if (operands.length === 0) { - // all elements were false, so the OR context is false - return null; - } - var result = null; - operands.map(function(o) { - return result === null ? o : SemanticContext.orContext(result, o); - }); - return result; -}; - -OR.prototype.toString = function() { - var s = ""; - this.opnds.map(function(o) { - s += "|| " + o.toString(); - }); - return s.length > 3 ? s.slice(3) : s; -}; - -exports.SemanticContext = SemanticContext; -exports.PrecedencePredicate = PrecedencePredicate; -exports.Predicate = Predicate; - - -/***/ }), -/* 11 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/// - -var ATNConfigSet = __webpack_require__(9).ATNConfigSet; -var Utils = __webpack_require__(0); -var Hash = Utils.Hash; -var Set = Utils.Set; - -// Map a predicate to a predicted alternative./// - -function PredPrediction(pred, alt) { - this.alt = alt; - this.pred = pred; - return this; -} - -PredPrediction.prototype.toString = function() { - return "(" + this.pred + ", " + this.alt + ")"; -}; - -// A DFA state represents a set of possible ATN configurations. -// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state -// to keep track of all possible states the ATN can be in after -// reading each input symbol. That is to say, after reading -// input a1a2..an, the DFA is in a state that represents the -// subset T of the states of the ATN that are reachable from the -// ATN's start state along some path labeled a1a2..an." -// In conventional NFA→DFA conversion, therefore, the subset T -// would be a bitset representing the set of states the -// ATN could be in. We need to track the alt predicted by each -// state as well, however. More importantly, we need to maintain -// a stack of states, tracking the closure operations as they -// jump from rule to rule, emulating rule invocations (method calls). -// I have to add a stack to simulate the proper lookahead sequences for -// the underlying LL grammar from which the ATN was derived. -// -//I use a set of ATNConfig objects not simple states. An ATNConfig -// is both a state (ala normal conversion) and a RuleContext describing -// the chain of rules (if any) followed to arrive at that state.
-// -//A DFA state may have multiple references to a particular state, -// but with different ATN contexts (with same or different alts) -// meaning that state was reached via a different set of rule invocations.
-// / - -function DFAState(stateNumber, configs) { - if (stateNumber === null) { - stateNumber = -1; - } - if (configs === null) { - configs = new ATNConfigSet(); - } - this.stateNumber = stateNumber; - this.configs = configs; - // {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) - // {@link Token//EOF} maps to {@code edges[0]}. - this.edges = null; - this.isAcceptState = false; - // if accept state, what ttype do we match or alt do we predict? - // This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link - // //predicates}{@code !=null} or - // {@link //requiresFullContext}. - this.prediction = 0; - this.lexerActionExecutor = null; - // Indicates that this state was created during SLL prediction that - // discovered a conflict between the configurations in the state. Future - // {@link ParserATNSimulator//execATN} invocations immediately jumped doing - // full context prediction if this field is true. - this.requiresFullContext = false; - // During SLL parsing, this is a list of predicates associated with the - // ATN configurations of the DFA state. When we have predicates, - // {@link //requiresFullContext} is {@code false} since full context - // prediction evaluates predicates - // on-the-fly. If this is not null, then {@link //prediction} is - // {@link ATN//INVALID_ALT_NUMBER}. - // - //We only use these for non-{@link //requiresFullContext} but - // conflicting states. That - // means we know from the context (it's $ or we don't dip into outer - // context) that it's an ambiguity not a conflict.
- // - //This list is computed by {@link - // ParserATNSimulator//predicateDFAState}.
- this.predicates = null; - return this; -} - -// Get the set of all alts mentioned by all ATN configurations in this -// DFA state. -DFAState.prototype.getAltSet = function() { - var alts = new Set(); - if (this.configs !== null) { - for (var i = 0; i < this.configs.length; i++) { - var c = this.configs[i]; - alts.add(c.alt); - } - } - if (alts.length === 0) { - return null; - } else { - return alts; - } -}; - -// Two {@link DFAState} instances are equal if their ATN configuration sets -// are the same. This method is used to see if a state already exists. -// -//Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. -// This is necessary to show that the algorithm terminates.
-// -//Cannot test the DFA state numbers here because in -// {@link ParserATNSimulator//addDFAState} we need to know if any other state -// exists that has this exact set of ATN configurations. The -// {@link //stateNumber} is irrelevant.
-DFAState.prototype.equals = function(other) { - // compare set of ATN configurations in this set with other - return this === other || - (other instanceof DFAState && - this.configs.equals(other.configs)); -}; - -DFAState.prototype.toString = function() { - var s = "" + this.stateNumber + ":" + this.configs; - if(this.isAcceptState) { - s = s + "=>"; - if (this.predicates !== null) - s = s + this.predicates; - else - s = s + this.prediction; - } - return s; -}; - -DFAState.prototype.hashCode = function() { - var hash = new Hash(); - hash.update(this.configs); - return hash.finish(); -}; - -exports.DFAState = DFAState; -exports.PredPrediction = PredPrediction; - - -/***/ }), -/* 12 */ -/***/ (function(module, exports, __webpack_require__) { - -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -exports.atn = __webpack_require__(35); -exports.codepointat = __webpack_require__(27); -exports.dfa = __webpack_require__(42); -exports.fromcodepoint = __webpack_require__(28); -exports.tree = __webpack_require__(44); -exports.error = __webpack_require__(45); -exports.Token = __webpack_require__(1).Token; -exports.CharStreams = __webpack_require__(47).CharStreams; -exports.CommonToken = __webpack_require__(1).CommonToken; -exports.InputStream = __webpack_require__(19).InputStream; -exports.FileStream = __webpack_require__(48).FileStream; -exports.CommonTokenStream = __webpack_require__(49).CommonTokenStream; -exports.Lexer = __webpack_require__(15).Lexer; -exports.Parser = __webpack_require__(51).Parser; -var pc = __webpack_require__(6); -exports.PredictionContextCache = pc.PredictionContextCache; -exports.ParserRuleContext = __webpack_require__(18).ParserRuleContext; -exports.Interval = __webpack_require__(2).Interval; -exports.Utils = __webpack_require__(0); - - -/***/ }), -/* 13 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/// - -// A tuple: (ATN state, predicted alt, syntactic, semantic context). -// The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) -// chain used to arrive at the state. The semantic context is -// the tree of semantic predicates encountered before reaching -// an ATN state. -/// - -var DecisionState = __webpack_require__(3).DecisionState; -var SemanticContext = __webpack_require__(10).SemanticContext; -var Hash = __webpack_require__(0).Hash; - - -function checkParams(params, isCfg) { - if(params===null) { - var result = { state:null, alt:null, context:null, semanticContext:null }; - if(isCfg) { - result.reachesIntoOuterContext = 0; - } - return result; - } else { - var props = {}; - props.state = params.state || null; - props.alt = (params.alt === undefined) ? null : params.alt; - props.context = params.context || null; - props.semanticContext = params.semanticContext || null; - if(isCfg) { - props.reachesIntoOuterContext = params.reachesIntoOuterContext || 0; - props.precedenceFilterSuppressed = params.precedenceFilterSuppressed || false; - } - return props; - } -} - -function ATNConfig(params, config) { - this.checkContext(params, config); - params = checkParams(params); - config = checkParams(config, true); - // The ATN state associated with this configuration/// - this.state = params.state!==null ? params.state : config.state; - // What alt (or lexer rule) is predicted by this configuration/// - this.alt = params.alt!==null ? params.alt : config.alt; - // The stack of invoking states leading to the rule/states associated - // with this config. We track only those contexts pushed during - // execution of the ATN simulator. - this.context = params.context!==null ? params.context : config.context; - this.semanticContext = params.semanticContext!==null ? params.semanticContext : - (config.semanticContext!==null ? config.semanticContext : SemanticContext.NONE); - // We cannot execute predicates dependent upon local context unless - // we know for sure we are in the correct context. Because there is - // no way to do this efficiently, we simply cannot evaluate - // dependent predicates unless we are in the rule that initially - // invokes the ATN simulator. - // - // closure() tracks the depth of how far we dip into the - // outer context: depth > 0. Note that it may not be totally - // accurate depth since I don't ever decrement. TODO: make it a boolean then - this.reachesIntoOuterContext = config.reachesIntoOuterContext; - this.precedenceFilterSuppressed = config.precedenceFilterSuppressed; - return this; -} - -ATNConfig.prototype.checkContext = function(params, config) { - if((params.context===null || params.context===undefined) && - (config===null || config.context===null || config.context===undefined)) { - this.context = null; - } -}; - - -ATNConfig.prototype.hashCode = function() { - var hash = new Hash(); - this.updateHashCode(hash); - return hash.finish(); -}; - - -ATNConfig.prototype.updateHashCode = function(hash) { - hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext); -}; - -// An ATN configuration is equal to another if both have -// the same state, they predict the same alternative, and -// syntactic/semantic contexts are the same. - -ATNConfig.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof ATNConfig)) { - return false; - } else { - return this.state.stateNumber===other.state.stateNumber && - this.alt===other.alt && - (this.context===null ? other.context===null : this.context.equals(other.context)) && - this.semanticContext.equals(other.semanticContext) && - this.precedenceFilterSuppressed===other.precedenceFilterSuppressed; - } -}; - - -ATNConfig.prototype.hashCodeForConfigSet = function() { - var hash = new Hash(); - hash.update(this.state.stateNumber, this.alt, this.semanticContext); - return hash.finish(); -}; - - -ATNConfig.prototype.equalsForConfigSet = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof ATNConfig)) { - return false; - } else { - return this.state.stateNumber===other.state.stateNumber && - this.alt===other.alt && - this.semanticContext.equals(other.semanticContext); - } -}; - - -ATNConfig.prototype.toString = function() { - return "(" + this.state + "," + this.alt + - (this.context!==null ? ",[" + this.context.toString() + "]" : "") + - (this.semanticContext !== SemanticContext.NONE ? - ("," + this.semanticContext.toString()) - : "") + - (this.reachesIntoOuterContext>0 ? - (",up=" + this.reachesIntoOuterContext) - : "") + ")"; -}; - - -function LexerATNConfig(params, config) { - ATNConfig.call(this, params, config); - - // This is the backing field for {@link //getLexerActionExecutor}. - var lexerActionExecutor = params.lexerActionExecutor || null; - this.lexerActionExecutor = lexerActionExecutor || (config!==null ? config.lexerActionExecutor : null); - this.passedThroughNonGreedyDecision = config!==null ? this.checkNonGreedyDecision(config, this.state) : false; - return this; -} - -LexerATNConfig.prototype = Object.create(ATNConfig.prototype); -LexerATNConfig.prototype.constructor = LexerATNConfig; - -LexerATNConfig.prototype.updateHashCode = function(hash) { - hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext, this.passedThroughNonGreedyDecision, this.lexerActionExecutor); -}; - -LexerATNConfig.prototype.equals = function(other) { - return this === other || - (other instanceof LexerATNConfig && - this.passedThroughNonGreedyDecision == other.passedThroughNonGreedyDecision && - (this.lexerActionExecutor ? this.lexerActionExecutor.equals(other.lexerActionExecutor) : !other.lexerActionExecutor) && - ATNConfig.prototype.equals.call(this, other)); -}; - -LexerATNConfig.prototype.hashCodeForConfigSet = LexerATNConfig.prototype.hashCode; - -LexerATNConfig.prototype.equalsForConfigSet = LexerATNConfig.prototype.equals; - - -LexerATNConfig.prototype.checkNonGreedyDecision = function(source, target) { - return source.passedThroughNonGreedyDecision || - (target instanceof DecisionState) && target.nonGreedy; -}; - -exports.ATNConfig = ATNConfig; -exports.LexerATNConfig = LexerATNConfig; - -/***/ }), -/* 14 */ -/***/ (function(module, exports, __webpack_require__) { - -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/// - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with this context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked this. Contrast this with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -/// - -var RuleNode = __webpack_require__(4).RuleNode; -var INVALID_INTERVAL = __webpack_require__(4).INVALID_INTERVAL; -var INVALID_ALT_NUMBER = __webpack_require__(7).INVALID_ALT_NUMBER; - -function RuleContext(parent, invokingState) { - RuleNode.call(this); - // What context invoked this rule? - this.parentCtx = parent || null; - // What state invoked the rule associated with this context? - // The "return address" is the followState of invokingState - // If parent is null, this should be -1. - this.invokingState = invokingState || -1; - return this; -} - -RuleContext.prototype = Object.create(RuleNode.prototype); -RuleContext.prototype.constructor = RuleContext; - -RuleContext.prototype.depth = function() { - var n = 0; - var p = this; - while (p !== null) { - p = p.parentCtx; - n += 1; - } - return n; -}; - -// A context is empty if there is no invoking state; meaning nobody call -// current context. -RuleContext.prototype.isEmpty = function() { - return this.invokingState === -1; -}; - -// satisfy the ParseTree / SyntaxTree interface - -RuleContext.prototype.getSourceInterval = function() { - return INVALID_INTERVAL; -}; - -RuleContext.prototype.getRuleContext = function() { - return this; -}; - -RuleContext.prototype.getPayload = function() { - return this; -}; - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//
-// Since tokens on hidden channels (e.g. whitespace or comments) are not
-// added to the parse trees, they will not appear in the output of this
-// method.
-// /
-RuleContext.prototype.getText = function() {
- if (this.getChildCount() === 0) {
- return "";
- } else {
- return this.children.map(function(child) {
- return child.getText();
- }).join("");
- }
-};
-
-// For rule associated with this parse tree internal node, return
-// the outer alternative number used to match the input. Default
-// implementation does not compute nor store this alt num. Create
-// a subclass of ParserRuleContext with backing field and set
-// option contextSuperClass.
-// to set it.
-RuleContext.prototype.getAltNumber = function() { return INVALID_ALT_NUMBER; }
-
-// Set the outer alternative number for this context node. Default
-// implementation does nothing to avoid backing field overhead for
-// trees that don't need it. Create
-// a subclass of ParserRuleContext with backing field and set
-// option contextSuperClass.
-RuleContext.prototype.setAltNumber = function(altNumber) { }
-
-RuleContext.prototype.getChild = function(i) {
- return null;
-};
-
-RuleContext.prototype.getChildCount = function() {
- return 0;
-};
-
-RuleContext.prototype.accept = function(visitor) {
- return visitor.visitChildren(this);
-};
-
-//need to manage circular dependencies, so export now
-exports.RuleContext = RuleContext;
-var Trees = __webpack_require__(20).Trees;
-
-
-// Print out a whole tree, not just a node, in LISP format
-// (root child1 .. childN). Print just a node if this is a leaf.
-//
-
-RuleContext.prototype.toStringTree = function(ruleNames, recog) {
- return Trees.toStringTree(this, ruleNames, recog);
-};
-
-RuleContext.prototype.toString = function(ruleNames, stop) {
- ruleNames = ruleNames || null;
- stop = stop || null;
- var p = this;
- var s = "[";
- while (p !== null && p !== stop) {
- if (ruleNames === null) {
- if (!p.isEmpty()) {
- s += p.invokingState;
- }
- } else {
- var ri = p.ruleIndex;
- var ruleName = (ri >= 0 && ri < ruleNames.length) ? ruleNames[ri]
- : "" + ri;
- s += ruleName;
- }
- if (p.parentCtx !== null && (ruleNames !== null || !p.parentCtx.isEmpty())) {
- s += " ";
- }
- p = p.parentCtx;
- }
- s += "]";
- return s;
-};
-
-
-
-/***/ }),
-/* 15 */
-/***/ (function(module, exports, __webpack_require__) {
-
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-///
-
-// A lexer is recognizer that draws input symbols from a character stream.
-// lexer grammars result in a subclass of this object. A Lexer object
-// uses simplified match() and error recovery mechanisms in the interest of speed.
-
-var Token = __webpack_require__(1).Token;
-var Recognizer = __webpack_require__(24).Recognizer;
-var CommonTokenFactory = __webpack_require__(39).CommonTokenFactory;
-var RecognitionException = __webpack_require__(5).RecognitionException;
-var LexerNoViableAltException = __webpack_require__(5).LexerNoViableAltException;
-
-function TokenSource() {
- return this;
-}
-
-function Lexer(input) {
- Recognizer.call(this);
- this._input = input;
- this._factory = CommonTokenFactory.DEFAULT;
- this._tokenFactorySourcePair = [ this, input ];
-
- this._interp = null; // child classes must populate this
-
- // The goal of all lexer rules/methods is to create a token object.
- // this is an instance variable as multiple rules may collaborate to
- // create a single token. nextToken will return this object after
- // matching lexer rule(s). If you subclass to allow multiple token
- // emissions, then set this to the last token to be matched or
- // something nonnull so that the auto token emit mechanism will not
- // emit another token.
- this._token = null;
-
- // What character index in the stream did the current token start at?
- // Needed, for example, to get the text for current token. Set at
- // the start of nextToken.
- this._tokenStartCharIndex = -1;
-
- // The line on which the first character of the token resides///
- this._tokenStartLine = -1;
-
- // The character position of first character within the line///
- this._tokenStartColumn = -1;
-
- // Once we see EOF on char stream, next token will be EOF.
- // If you have DONE : EOF ; then you see DONE EOF.
- this._hitEOF = false;
-
- // The channel number for the current token///
- this._channel = Token.DEFAULT_CHANNEL;
-
- // The token type for the current token///
- this._type = Token.INVALID_TYPE;
-
- this._modeStack = [];
- this._mode = Lexer.DEFAULT_MODE;
-
- // You can set the text for the current token to override what is in
- // the input char buffer. Use setText() or can set this instance var.
- // /
- this._text = null;
-
- return this;
-}
-
-Lexer.prototype = Object.create(Recognizer.prototype);
-Lexer.prototype.constructor = Lexer;
-
-Lexer.DEFAULT_MODE = 0;
-Lexer.MORE = -2;
-Lexer.SKIP = -3;
-
-Lexer.DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
-Lexer.HIDDEN = Token.HIDDEN_CHANNEL;
-Lexer.MIN_CHAR_VALUE = 0x0000;
-Lexer.MAX_CHAR_VALUE = 0x10FFFF;
-
-Lexer.prototype.reset = function() {
- // wack Lexer state variables
- if (this._input !== null) {
- this._input.seek(0); // rewind the input
- }
- this._token = null;
- this._type = Token.INVALID_TYPE;
- this._channel = Token.DEFAULT_CHANNEL;
- this._tokenStartCharIndex = -1;
- this._tokenStartColumn = -1;
- this._tokenStartLine = -1;
- this._text = null;
-
- this._hitEOF = false;
- this._mode = Lexer.DEFAULT_MODE;
- this._modeStack = [];
-
- this._interp.reset();
-};
-
-// Return a token from this source; i.e., match a token on the char stream.
-Lexer.prototype.nextToken = function() {
- if (this._input === null) {
- throw "nextToken requires a non-null input stream.";
- }
-
- // Mark start location in char stream so unbuffered streams are
- // guaranteed at least have text of current token
- var tokenStartMarker = this._input.mark();
- try {
- while (true) {
- if (this._hitEOF) {
- this.emitEOF();
- return this._token;
- }
- this._token = null;
- this._channel = Token.DEFAULT_CHANNEL;
- this._tokenStartCharIndex = this._input.index;
- this._tokenStartColumn = this._interp.column;
- this._tokenStartLine = this._interp.line;
- this._text = null;
- var continueOuter = false;
- while (true) {
- this._type = Token.INVALID_TYPE;
- var ttype = Lexer.SKIP;
- try {
- ttype = this._interp.match(this._input, this._mode);
- } catch (e) {
- if(e instanceof RecognitionException) {
- this.notifyListeners(e); // report error
- this.recover(e);
- } else {
- console.log(e.stack);
- throw e;
- }
- }
- if (this._input.LA(1) === Token.EOF) {
- this._hitEOF = true;
- }
- if (this._type === Token.INVALID_TYPE) {
- this._type = ttype;
- }
- if (this._type === Lexer.SKIP) {
- continueOuter = true;
- break;
- }
- if (this._type !== Lexer.MORE) {
- break;
- }
- }
- if (continueOuter) {
- continue;
- }
- if (this._token === null) {
- this.emit();
- }
- return this._token;
- }
- } finally {
- // make sure we release marker after match or
- // unbuffered char stream will keep buffering
- this._input.release(tokenStartMarker);
- }
-};
-
-// Instruct the lexer to skip creating a token for current lexer rule
-// and look for another token. nextToken() knows to keep looking when
-// a lexer rule finishes with token set to SKIP_TOKEN. Recall that
-// if token==null at end of any token rule, it creates one for you
-// and emits it.
-// /
-Lexer.prototype.skip = function() {
- this._type = Lexer.SKIP;
-};
-
-Lexer.prototype.more = function() {
- this._type = Lexer.MORE;
-};
-
-Lexer.prototype.mode = function(m) {
- this._mode = m;
-};
-
-Lexer.prototype.pushMode = function(m) {
- if (this._interp.debug) {
- console.log("pushMode " + m);
- }
- this._modeStack.push(this._mode);
- this.mode(m);
-};
-
-Lexer.prototype.popMode = function() {
- if (this._modeStack.length === 0) {
- throw "Empty Stack";
- }
- if (this._interp.debug) {
- console.log("popMode back to " + this._modeStack.slice(0, -1));
- }
- this.mode(this._modeStack.pop());
- return this._mode;
-};
-
-// Set the char stream and reset the lexer
-Object.defineProperty(Lexer.prototype, "inputStream", {
- get : function() {
- return this._input;
- },
- set : function(input) {
- this._input = null;
- this._tokenFactorySourcePair = [ this, this._input ];
- this.reset();
- this._input = input;
- this._tokenFactorySourcePair = [ this, this._input ];
- }
-});
-
-Object.defineProperty(Lexer.prototype, "sourceName", {
- get : function sourceName() {
- return this._input.sourceName;
- }
-});
-
-// By default does not support multiple emits per nextToken invocation
-// for efficiency reasons. Subclass and override this method, nextToken,
-// and getToken (to push tokens into a list and pull from that list
-// rather than a single variable as this implementation does).
-// /
-Lexer.prototype.emitToken = function(token) {
- this._token = token;
-};
-
-// The standard method called to automatically emit a token at the
-// outermost lexical rule. The token object should point into the
-// char buffer start..stop. If there is a text override in 'text',
-// use that to set the token's text. Override this method to emit
-// custom Token objects or provide a new factory.
-// /
-Lexer.prototype.emit = function() {
- var t = this._factory.create(this._tokenFactorySourcePair, this._type,
- this._text, this._channel, this._tokenStartCharIndex, this
- .getCharIndex() - 1, this._tokenStartLine,
- this._tokenStartColumn);
- this.emitToken(t);
- return t;
-};
-
-Lexer.prototype.emitEOF = function() {
- var cpos = this.column;
- var lpos = this.line;
- var eof = this._factory.create(this._tokenFactorySourcePair, Token.EOF,
- null, Token.DEFAULT_CHANNEL, this._input.index,
- this._input.index - 1, lpos, cpos);
- this.emitToken(eof);
- return eof;
-};
-
-Object.defineProperty(Lexer.prototype, "type", {
- get : function() {
- return this.type;
- },
- set : function(type) {
- this._type = type;
- }
-});
-
-Object.defineProperty(Lexer.prototype, "line", {
- get : function() {
- return this._interp.line;
- },
- set : function(line) {
- this._interp.line = line;
- }
-});
-
-Object.defineProperty(Lexer.prototype, "column", {
- get : function() {
- return this._interp.column;
- },
- set : function(column) {
- this._interp.column = column;
- }
-});
-
-
-// What is the index of the current character of lookahead?///
-Lexer.prototype.getCharIndex = function() {
- return this._input.index;
-};
-
-// Return the text matched so far for the current token or any text override.
-//Set the complete text of this token; it wipes any previous changes to the text.
-Object.defineProperty(Lexer.prototype, "text", {
- get : function() {
- if (this._text !== null) {
- return this._text;
- } else {
- return this._interp.getText(this._input);
- }
- },
- set : function(text) {
- this._text = text;
- }
-});
-// Return a list of all Token objects in input char stream.
-// Forces load of all tokens. Does not include EOF token.
-// /
-Lexer.prototype.getAllTokens = function() {
- var tokens = [];
- var t = this.nextToken();
- while (t.type !== Token.EOF) {
- tokens.push(t);
- t = this.nextToken();
- }
- return tokens;
-};
-
-Lexer.prototype.notifyListeners = function(e) {
- var start = this._tokenStartCharIndex;
- var stop = this._input.index;
- var text = this._input.getText(start, stop);
- var msg = "token recognition error at: '" + this.getErrorDisplay(text) + "'";
- var listener = this.getErrorListenerDispatch();
- listener.syntaxError(this, null, this._tokenStartLine,
- this._tokenStartColumn, msg, e);
-};
-
-Lexer.prototype.getErrorDisplay = function(s) {
- var d = [];
- for (var i = 0; i < s.length; i++) {
- d.push(s[i]);
- }
- return d.join('');
-};
-
-Lexer.prototype.getErrorDisplayForChar = function(c) {
- if (c.charCodeAt(0) === Token.EOF) {
- return "
-// This implementation prints messages to {@link System//err} containing the
-// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
-// the following format. The {@code skip} command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}. This action is implemented by calling {@link Lexer//pushMode} with the
-// value provided by {@link //getMode}. The {@code popMode} command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}. This action is implemented by calling {@link Lexer//popMode}. The {@code more} command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}. This action is implemented by calling {@link Lexer//popMode}. This action is implemented by calling {@link Lexer//mode} with the
-// value provided by {@link //getMode}. This class may represent embedded actions created with the Custom actions are implemented by calling {@link Lexer//action} with the
-// appropriate rule and action indexes. This action is implemented by calling {@link Lexer//setChannel} with the
-// value provided by {@link //getChannel}. This action is not serialized as part of the ATN, and is only required for
-// position-dependent lexer actions which appear at a location other than the
-// end of a rule. For more information about DFA optimizations employed for
-// lexer actions, see {@link LexerActionExecutor//append} and
-// {@link LexerActionExecutor//fixOffsetBeforeMatch}. Note: This class is only required for lexer actions for which
-// {@link LexerAction//isPositionDependent} returns {@code true}. This method calls {@link //execute} on the result of {@link //getAction}
-// using the provided {@code lexer}. Used for XPath and tree pattern compilation. This cache makes a huge difference in memory and a little bit in speed.
- // For the Java grammar on java.*, it dropped the memory requirements
- // at the end from 25M to 16M. We don't store any of the full context
- // graphs in the DFA because they are limited to local context only,
- // but apparently there's a lot of repetition there as well. We optimize
- // the config contexts before storing the config set in the DFA states
- // by literally rebuilding them with cached subgraphs only. I tried a cache for use during closure operations, that was
- // whacked after each adaptivePredict(). It cost a little bit
- // more time I think and doesn't save on the overall footprint
- // so it's not worth the complexity.
-// When using this prediction mode, the parser will either return a correct
-// parse tree (i.e. the same parse tree that would be returned with the
-// {@link //LL} prediction mode), or it will report a syntax error. If a
-// syntax error is encountered when using the {@link //SLL} prediction mode,
-// it may be due to either an actual syntax error in the input or indicate
-// that the particular combination of grammar and input requires the more
-// powerful {@link //LL} prediction abilities to complete successfully.
-// This prediction mode does not provide any guarantees for prediction
-// behavior for syntactically-incorrect inputs.
-// When using this prediction mode, the parser will make correct decisions
-// for all syntactically-correct grammar and input combinations. However, in
-// cases where the grammar is truly ambiguous this prediction mode might not
-// report a precise answer for exactly which alternatives are
-// ambiguous.
-// This prediction mode does not provide any guarantees for prediction
-// behavior for syntactically-incorrect inputs.
-// This prediction mode may be used for diagnosing ambiguities during
-// grammar development. Due to the performance overhead of calculating sets
-// of ambiguous alternatives, this prediction mode should be avoided when
-// the exact results are not necessary.
-// This prediction mode does not provide any guarantees for prediction
-// behavior for syntactically-incorrect inputs.
-// This method computes the SLL prediction termination condition for both of
-// the following cases. COMBINED SLL+LL PARSING When LL-fallback is enabled upon SLL conflict, correct predictions are
-// ensured regardless of how the termination condition is computed by this
-// method. Due to the substantially higher cost of LL prediction, the
-// prediction should only fall back to LL when the additional lookahead
-// cannot lead to a unique SLL prediction. Assuming combined SLL+LL parsing, an SLL configuration set with only
-// conflicting subsets should fall back to full LL, even if the
-// configuration sets don't resolve to the same alternative (e.g.
-// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
-// configuration, SLL could continue with the hopes that more lookahead will
-// resolve via one of those non-conflicting configurations. Here's the prediction termination rule them: SLL (for SLL+LL parsing)
-// stops when it sees only conflicting configuration subsets. In contrast,
-// full LL keeps going when there is uncertainty. HEURISTIC As a heuristic, we stop prediction when we see any conflicting subset
-// unless we see a state that only has one alternative associated with it.
-// The single-alt-state thing lets prediction continue upon rules like
-// (otherwise, it would admit defeat too soon): {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;} When the ATN simulation reaches the state before {@code ';'}, it has a
-// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
-// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
-// processing this node because alternative to has another way to continue,
-// via {@code [6|2|[]]}. It also let's us continue for this rule: {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;} After matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not stop
-// working on this state. In the previous example, we're concerned with
-// states associated with the conflicting alternatives. Here alt 3 is not
-// associated with the conflicting configs, but since we can continue
-// looking for input reasonably, don't declare the state done. PURE SLL PARSING To handle pure SLL parsing, all we have to do is make sure that we
-// combine stack contexts for configurations that differ only by semantic
-// predicate. From there, we can do the usual SLL termination heuristic. PREDICATES IN SLL+LL PARSING SLL decisions don't evaluate predicates until after they reach DFA stop
-// states because they need to create the DFA cache that works in all
-// semantic situations. In contrast, full LL evaluates predicates collected
-// during start state computation so it can ignore predicates thereafter.
-// This means that SLL termination detection can totally ignore semantic
-// predicates. Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
-// semantic predicate contexts so we might see two configurations like the
-// following. {@code (s, 1, x, {}), (s, 1, x', {p})} Before testing these configurations against others, we have to merge
-// {@code x} and {@code x'} (without modifying the existing configurations).
-// For example, we test {@code (x+x')==x''} when looking for conflicts in
-// the following configurations. {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})} If the configuration set has predicates (as indicated by
-// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
-// the configurations to strip out all of the predicates so that a standard
-// {@link ATNConfigSet} will merge everything ignoring predicates.
-// line line:charPositionInLine msg
-//
-//
-ConsoleErrorListener.prototype.syntaxError = function(recognizer, offendingSymbol, line, column, msg, e) {
- console.error("line " + line + ":" + column + " " + msg);
-};
-
-function ProxyErrorListener(delegates) {
- ErrorListener.call(this);
- if (delegates===null) {
- throw "delegates";
- }
- this.delegates = delegates;
- return this;
-}
-
-ProxyErrorListener.prototype = Object.create(ErrorListener.prototype);
-ProxyErrorListener.prototype.constructor = ProxyErrorListener;
-
-ProxyErrorListener.prototype.syntaxError = function(recognizer, offendingSymbol, line, column, msg, e) {
- this.delegates.map(function(d) { d.syntaxError(recognizer, offendingSymbol, line, column, msg, e); });
-};
-
-ProxyErrorListener.prototype.reportAmbiguity = function(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) {
- this.delegates.map(function(d) { d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs); });
-};
-
-ProxyErrorListener.prototype.reportAttemptingFullContext = function(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) {
- this.delegates.map(function(d) { d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs); });
-};
-
-ProxyErrorListener.prototype.reportContextSensitivity = function(recognizer, dfa, startIndex, stopIndex, prediction, configs) {
- this.delegates.map(function(d) { d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs); });
-};
-
-exports.ErrorListener = ErrorListener;
-exports.ConsoleErrorListener = ConsoleErrorListener;
-exports.ProxyErrorListener = ProxyErrorListener;
-
-
-
-/***/ }),
-/* 17 */
-/***/ (function(module, exports) {
-
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-// A DFA walker that knows how to dump them to serialized strings.#/
-
-
-function DFASerializer(dfa, literalNames, symbolicNames) {
- this.dfa = dfa;
- this.literalNames = literalNames || [];
- this.symbolicNames = symbolicNames || [];
- return this;
-}
-
-DFASerializer.prototype.toString = function() {
- if(this.dfa.s0 === null) {
- return null;
- }
- var buf = "";
- var states = this.dfa.sortedStates();
- for(var i=0;i{...}
-// syntax in ANTLR 4, as well as actions created for lexer commands where the
-// command argument could not be evaluated when the grammar was compiled.
-//
-//
-//
The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.
-// -//Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:
-// -//{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.
-// -//Or in pseudo-code, for each configuration {@code c} in {@code C}:
-// -//-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not -// alt and not pred -//-// -//
The values in {@code map} are the set of {@code A_s,ctx} sets.
-// -//If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.
-// -//Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.
-// -//The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal; you will still have the conflict. It's just inefficient. It -// might even look until the end of file.
-// -//No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.
-// -//CONFLICTING CONFIGS
-// -//Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.
-// -//For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.
-// -//CONTINUE/STOP RULE
-// -//Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.
-// -//The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.
-// -//CASES
-// -//EXACT AMBIGUITY DETECTION
-// -//If all states report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.
-// -//|A_i|>1
and
-// A_i = A_j
for all i, j.
In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-// -PredictionMode.resolvesToJustOneViableAlt = function(altsets) { - return PredictionMode.getSingleViableAlt(altsets); -}; - -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -PredictionMode.allSubsetsConflict = function(altsets) { - return ! PredictionMode.hasNonConflictingAltSet(altsets); -}; -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -PredictionMode.hasNonConflictingAltSet = function(altsets) { - for(var i=0;i-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} -//-// -PredictionMode.getStateToAltMap = function(configs) { - var m = new AltDict(); - configs.items.map(function(c) { - var alts = m.get(c.state); - if (alts === null) { - alts = new BitSet(); - m.put(c.state, alts); - } - alts.add(c.alt); - }); - return m; -}; - -PredictionMode.hasStateAssociatedWithOneAlt = function(configs) { - var values = PredictionMode.getStateToAltMap(configs).values(); - for(var i=0;i
The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.
-DefaultErrorStrategy.prototype.reset = function(recognizer) { - this.endErrorCondition(recognizer); -}; - -// -// This method is called to enter error recovery mode when a recognition -// exception is reported. -// -// @param recognizer the parser instance -// -DefaultErrorStrategy.prototype.beginErrorCondition = function(recognizer) { - this.errorRecoveryMode = true; -}; - -DefaultErrorStrategy.prototype.inErrorRecoveryMode = function(recognizer) { - return this.errorRecoveryMode; -}; - -// -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -// -DefaultErrorStrategy.prototype.endErrorCondition = function(recognizer) { - this.errorRecoveryMode = false; - this.lastErrorStates = null; - this.lastErrorIndex = -1; -}; - -// -// {@inheritDoc} -// -//The default implementation simply calls {@link //endErrorCondition}.
-// -DefaultErrorStrategy.prototype.reportMatch = function(recognizer) { - this.endErrorCondition(recognizer); -}; - -// -// {@inheritDoc} -// -//The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the reporting task based on the runtime type of {@code e} -// according to the following table.
-// -//The default implementation resynchronizes the parser by consuming tokens -// until we find one in the resynchronization set--loosely the set of tokens -// that can follow the current rule.
-// -DefaultErrorStrategy.prototype.recover = function(recognizer, e) { - if (this.lastErrorIndex===recognizer.getInputStream().index && - this.lastErrorStates !== null && this.lastErrorStates.indexOf(recognizer.state)>=0) { - // uh oh, another error at same token index and previously-visited - // state in ATN; must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop; this is a failsafe. - recognizer.consume(); - } - this.lastErrorIndex = recognizer._input.index; - if (this.lastErrorStates === null) { - this.lastErrorStates = []; - } - this.lastErrorStates.push(recognizer.state); - var followSet = this.getErrorRecoverySet(recognizer); - this.consumeUntil(recognizer, followSet); -}; - -// The default implementation of {@link ANTLRErrorStrategy//sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at this point in the ATN. You can call this anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//Implements Jim Idle's magic sync mechanism in closures and optional -// subrules. E.g.,
-// -//-// a : sync ( stuff sync )* ; -// sync : {consume to what can follow sync} ; -//-// -// At the start of a sub rule upon error, {@link //sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// resynchronization set of the current rule. -// -//
If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.
-// -//During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.
-// -//ORIGINS
-// -//Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule
-// -//-// classDef : 'class' ID '{' member* '}' -//-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//
This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off this -// functionality by simply overriding this method as a blank { }.
-// -DefaultErrorStrategy.prototype.sync = function(recognizer) { - // If already recovering, don't try to sync - if (this.inErrorRecoveryMode(recognizer)) { - return; - } - var s = recognizer._interp.atn.states[recognizer.state]; - var la = recognizer.getTokenStream().LA(1); - // try cheaper subset first; might get lucky. seems to shave a wee bit off - var nextTokens = recognizer.atn.nextTokens(s); - if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) { - return; - } - switch (s.stateType) { - case ATNState.BLOCK_START: - case ATNState.STAR_BLOCK_START: - case ATNState.PLUS_BLOCK_START: - case ATNState.STAR_LOOP_ENTRY: - // report error and recover if possible - if( this.singleTokenDeletion(recognizer) !== null) { - return; - } else { - throw new InputMismatchException(recognizer); - } - break; - case ATNState.PLUS_LOOP_BACK: - case ATNState.STAR_LOOP_BACK: - this.reportUnwantedToken(recognizer); - var expecting = new IntervalSet(); - expecting.addSet(recognizer.getExpectedTokens()); - var whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)); - this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule); - break; - default: - // do nothing if we can't identify the exact kind of ATN state - } -}; - -// This is called by {@link //reportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //reportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -DefaultErrorStrategy.prototype.reportNoViableAlternative = function(recognizer, e) { - var tokens = recognizer.getTokenStream(); - var input; - if(tokens !== null) { - if (e.startToken.type===Token.EOF) { - input = "This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.
-// -//The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//notifyErrorListeners}.
-// -// @param recognizer the parser instance -// -DefaultErrorStrategy.prototype.reportUnwantedToken = function(recognizer) { - if (this.inErrorRecoveryMode(recognizer)) { - return; - } - this.beginErrorCondition(recognizer); - var t = recognizer.getCurrentToken(); - var tokenName = this.getTokenErrorDisplay(t); - var expecting = this.getExpectedTokens(recognizer); - var msg = "extraneous input " + tokenName + " expecting " + - expecting.toString(recognizer.literalNames, recognizer.symbolicNames); - recognizer.notifyErrorListeners(msg, t, null); -}; -// This method is called to report a syntax error which requires the -// insertion of a missing token into the input stream. At the time this -// method is called, the missing token has not yet been inserted. When this -// method returns, {@code recognizer} is in error recovery mode. -// -//This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.
-// -//The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//notifyErrorListeners}.
-// -// @param recognizer the parser instance -// -DefaultErrorStrategy.prototype.reportMissingToken = function(recognizer) { - if ( this.inErrorRecoveryMode(recognizer)) { - return; - } - this.beginErrorCondition(recognizer); - var t = recognizer.getCurrentToken(); - var expecting = this.getExpectedTokens(recognizer); - var msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + - " at " + this.getTokenErrorDisplay(t); - recognizer.notifyErrorListeners(msg, t, null); -}; - -//The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, this method throws an -// {@link InputMismatchException}.
-// -//EXTRA TOKEN (single token deletion)
-// -//{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the match operation.
-// -//This recovery strategy is implemented by {@link -// //singleTokenDeletion}.
-// -//MISSING TOKEN (single token insertion)
-// -//If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the match operation.
-// -//This recovery strategy is implemented by {@link -// //singleTokenInsertion}.
-// -//EXAMPLE
-// -//For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:
-// -//-// stat → expr → atom -//-// -// and it will be trying to match the {@code ')'} at this point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ';' -// ^ -//-// -// The attempt to match {@code ')'} will fail when it sees {@code ';'} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==';'} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// -DefaultErrorStrategy.prototype.recoverInline = function(recognizer) { - // SINGLE TOKEN DELETION - var matchedSymbol = this.singleTokenDeletion(recognizer); - if (matchedSymbol !== null) { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.consume(); - return matchedSymbol; - } - // SINGLE TOKEN INSERTION - if (this.singleTokenInsertion(recognizer)) { - return this.getMissingSymbol(recognizer); - } - // even that didn't work; must throw the exception - throw new InputMismatchException(recognizer); -}; - -// -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//
This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully matched -// if it were instead the {@code LA(2)} symbol. If this method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce this behavior.
-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -// -DefaultErrorStrategy.prototype.singleTokenInsertion = function(recognizer) { - var currentSymbolType = recognizer.getTokenStream().LA(1); - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token; error recovery - // is free to conjure up and insert the missing token - var atn = recognizer._interp.atn; - var currentState = atn.states[recognizer.state]; - var next = currentState.transitions[0].target; - var expectingAtLL2 = atn.nextTokens(next, recognizer._ctx); - if (expectingAtLL2.contains(currentSymbolType) ){ - this.reportMissingToken(recognizer); - return true; - } else { - return false; - } -}; - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns null, the parser and error -// handler state will not have changed. If this method returns non-null, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful match. -// -//If the single-token deletion is successful, this method calls -// {@link //reportUnwantedToken} to report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //reportMatch} is called to signal a successful -// match.
-// -// @param recognizer the parser instance -// @return the successfully matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code null} -// -DefaultErrorStrategy.prototype.singleTokenDeletion = function(recognizer) { - var nextTokenType = recognizer.getTokenStream().LA(2); - var expecting = this.getExpectedTokens(recognizer); - if (expecting.contains(nextTokenType)) { - this.reportUnwantedToken(recognizer); - // print("recoverFromMismatchedToken deleting " \ - // + str(recognizer.getTokenStream().LT(1)) \ - // + " since " + str(recognizer.getTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.consume(); // simply delete extra token - // we want to return the token we're actually matching - var matchedSymbol = recognizer.getCurrentToken(); - this.reportMatch(recognizer); // we know current token is correct - return matchedSymbol; - } else { - return null; - } -}; - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x);}. The action clearly assumes -// that there has been an identifier matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// this token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override this method to create the appropriate tokens. -// -DefaultErrorStrategy.prototype.getMissingSymbol = function(recognizer) { - var currentSymbol = recognizer.getCurrentToken(); - var expecting = this.getExpectedTokens(recognizer); - var expectedTokenType = expecting.first(); // get any element - var tokenText; - if (expectedTokenType===Token.EOF) { - tokenText = "-// This error strategy is useful in the following scenarios.
-// -//-// {@code myparser.setErrorHandler(new BailErrorStrategy());}
-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) -// -function BailErrorStrategy() { - DefaultErrorStrategy.call(this); - return this; -} - -BailErrorStrategy.prototype = Object.create(DefaultErrorStrategy.prototype); -BailErrorStrategy.prototype.constructor = BailErrorStrategy; - -// Instead of recovering from exception {@code e}, re-throw it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule function catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -// -BailErrorStrategy.prototype.recover = function(recognizer, e) { - var context = recognizer._ctx; - while (context !== null) { - context.exception = e; - context = context.parentCtx; - } - throw new ParseCancellationException(e); -}; - -// Make sure we don't attempt to recover inline; if the parser -// successfully recovers, it won't throw an exception. -// -BailErrorStrategy.prototype.recoverInline = function(recognizer) { - this.recover(recognizer, new InputMismatchException(recognizer)); -}; - -// Make sure we don't attempt to recover from problems in subrules.// -BailErrorStrategy.prototype.sync = function(recognizer) { - // pass -}; - -exports.BailErrorStrategy = BailErrorStrategy; -exports.DefaultErrorStrategy = DefaultErrorStrategy; - - -/***/ }), -/* 30 */ -/***/ (function(module, exports) { - - - -/***/ }), -/* 31 */ -/***/ (function(module, exports, __webpack_require__) { - -// Generated from D:\git\n3dev\N3\grammar\turtlestar.g4 by ANTLR 4.6 -// jshint ignore: start -var antlr4 = __webpack_require__(12); -var turtlestarListener = __webpack_require__(32).turtlestarListener; -var turtlestarVisitor = __webpack_require__(33).turtlestarVisitor; - -var grammarFileName = "turtlestar.g4"; - -var serializedATN = ["\u0003\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd", - "\u00030\u00b6\u0004\u0002\t\u0002\u0004\u0003\t\u0003\u0004\u0004\t", - "\u0004\u0004\u0005\t\u0005\u0004\u0006\t\u0006\u0004\u0007\t\u0007\u0004", - "\b\t\b\u0004\t\t\t\u0004\n\t\n\u0004\u000b\t\u000b\u0004\f\t\f\u0004", - "\r\t\r\u0004\u000e\t\u000e\u0004\u000f\t\u000f\u0004\u0010\t\u0010\u0004", - "\u0011\t\u0011\u0004\u0012\t\u0012\u0004\u0013\t\u0013\u0004\u0014\t", - "\u0014\u0004\u0015\t\u0015\u0004\u0016\t\u0016\u0004\u0017\t\u0017\u0004", - "\u0018\t\u0018\u0003\u0002\u0007\u00022\n\u0002\f\u0002\u000e\u0002", - "5\u000b\u0002\u0003\u0002\u0003\u0002\u0003\u0003\u0003\u0003\u0003", - "\u0003\u0003\u0003\u0005\u0003=\n\u0003\u0003\u0004\u0003\u0004\u0003", - "\u0004\u0003\u0004\u0005\u0004C\n\u0004\u0003\u0005\u0003\u0005\u0003", - "\u0005\u0003\u0005\u0003\u0005\u0003\u0006\u0003\u0006\u0003\u0006\u0003", - "\u0006\u0003\u0007\u0003\u0007\u0003\u0007\u0003\b\u0003\b\u0003\b\u0003", - "\b\u0003\t\u0003\t\u0003\t\u0003\t\u0003\t\u0005\tZ\n\t\u0005\t\\\n", - "\t\u0003\n\u0003\n\u0003\n\u0003\n\u0003\n\u0003\n\u0005\nd\n\n\u0007", - "\nf\n\n\f\n\u000e\ni\u000b\n\u0003\u000b\u0003\u000b\u0003\u000b\u0007", - "\u000bn\n\u000b\f\u000b\u000e\u000bq\u000b\u000b\u0003\f\u0003\f\u0005", - "\fu\n\f\u0003\r\u0003\r\u0003\r\u0003\r\u0005\r{\n\r\u0003\u000e\u0003", - "\u000e\u0003\u000f\u0003\u000f\u0003\u000f\u0003\u000f\u0003\u000f\u0003", - "\u000f\u0005\u000f\u0085\n\u000f\u0003\u0010\u0003\u0010\u0003\u0010", - "\u0003\u0010\u0003\u0010\u0003\u0010\u0003\u0011\u0003\u0011\u0003\u0011", - "\u0005\u0011\u0090\n\u0011\u0003\u0012\u0003\u0012\u0003\u0012\u0003", - "\u0012\u0005\u0012\u0096\n\u0012\u0003\u0013\u0003\u0013\u0003\u0013", - "\u0005\u0013\u009b\n\u0013\u0003\u0014\u0003\u0014\u0003\u0014\u0003", - "\u0014\u0003\u0015\u0003\u0015\u0007\u0015\u00a3\n\u0015\f\u0015\u000e", - "\u0015\u00a6\u000b\u0015\u0003\u0015\u0003\u0015\u0003\u0016\u0003\u0016", - "\u0003\u0016\u0003\u0016\u0005\u0016\u00ae\n\u0016\u0003\u0017\u0003", - "\u0017\u0005\u0017\u00b2\n\u0017\u0003\u0018\u0003\u0018\u0003\u0018", - "\u0002\u0002\u0019\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016", - "\u0018\u001a\u001c\u001e \"$&(*,.\u0002\u0003\u0003\u0002\u0016\u0017", - "\u00bc\u00023\u0003\u0002\u0002\u0002\u0004<\u0003\u0002\u0002\u0002", - "\u0006B\u0003\u0002\u0002\u0002\bD\u0003\u0002\u0002\u0002\nI\u0003", - "\u0002\u0002\u0002\fM\u0003\u0002\u0002\u0002\u000eP\u0003\u0002\u0002", - "\u0002\u0010[\u0003\u0002\u0002\u0002\u0012]\u0003\u0002\u0002\u0002", - "\u0014j\u0003\u0002\u0002\u0002\u0016t\u0003\u0002\u0002\u0002\u0018", - "z\u0003\u0002\u0002\u0002\u001a|\u0003\u0002\u0002\u0002\u001c\u0084", - "\u0003\u0002\u0002\u0002\u001e\u0086\u0003\u0002\u0002\u0002 \u008f", - "\u0003\u0002\u0002\u0002\"\u0095\u0003\u0002\u0002\u0002$\u009a\u0003", - "\u0002\u0002\u0002&\u009c\u0003\u0002\u0002\u0002(\u00a0\u0003\u0002", - "\u0002\u0002*\u00a9\u0003\u0002\u0002\u0002,\u00b1\u0003\u0002\u0002", - "\u0002.\u00b3\u0003\u0002\u0002\u000202\u0005\u0004\u0003\u000210\u0003", - "\u0002\u0002\u000225\u0003\u0002\u0002\u000231\u0003\u0002\u0002\u0002", - "34\u0003\u0002\u0002\u000246\u0003\u0002\u0002\u000253\u0003\u0002\u0002", - "\u000267\u0007\u0002\u0002\u00037\u0003\u0003\u0002\u0002\u00028=\u0005", - "\u0006\u0004\u00029:\u0005\u0010\t\u0002:;\u0007\u0003\u0002\u0002;", - "=\u0003\u0002\u0002\u0002<8\u0003\u0002\u0002\u0002<9\u0003\u0002\u0002", - "\u0002=\u0005\u0003\u0002\u0002\u0002>C\u0005\b\u0005\u0002?C\u0005", - "\n\u0006\u0002@C\u0005\u000e\b\u0002AC\u0005\f\u0007\u0002B>\u0003\u0002", - "\u0002\u0002B?\u0003\u0002\u0002\u0002B@\u0003\u0002\u0002\u0002BA\u0003", - "\u0002\u0002\u0002C\u0007\u0003\u0002\u0002\u0002DE\u0007\u0004\u0002", - "\u0002EF\u0007\u0016\u0002\u0002FG\u0007\u0015\u0002\u0002GH\u0007\u0003", - "\u0002\u0002H\t\u0003\u0002\u0002\u0002IJ\u0007\u0005\u0002\u0002JK", - "\u0007\u0015\u0002\u0002KL\u0007\u0003\u0002\u0002L\u000b\u0003\u0002", - "\u0002\u0002MN\u0007)\u0002\u0002NO\u0007\u0015\u0002\u0002O\r\u0003", - "\u0002\u0002\u0002PQ\u0007*\u0002\u0002QR\u0007\u0016\u0002\u0002RS", - "\u0007\u0015\u0002\u0002S\u000f\u0003\u0002\u0002\u0002TU\u0005\u0018", - "\r\u0002UV\u0005\u0012\n\u0002V\\\u0003\u0002\u0002\u0002WY\u0005&\u0014", - "\u0002XZ\u0005\u0012\n\u0002YX\u0003\u0002\u0002\u0002YZ\u0003\u0002", - "\u0002\u0002Z\\\u0003\u0002\u0002\u0002[T\u0003\u0002\u0002\u0002[W", - "\u0003\u0002\u0002\u0002\\\u0011\u0003\u0002\u0002\u0002]^\u0005\u0016", - "\f\u0002^g\u0005\u0014\u000b\u0002_c\u0007\u0006\u0002\u0002`a\u0005", - "\u0016\f\u0002ab\u0005\u0014\u000b\u0002bd\u0003\u0002\u0002\u0002c", - "`\u0003\u0002\u0002\u0002cd\u0003\u0002\u0002\u0002df\u0003\u0002\u0002", - "\u0002e_\u0003\u0002\u0002\u0002fi\u0003\u0002\u0002\u0002ge\u0003\u0002", - "\u0002\u0002gh\u0003\u0002\u0002\u0002h\u0013\u0003\u0002\u0002\u0002", - "ig\u0003\u0002\u0002\u0002jo\u0005\u001c\u000f\u0002kl\u0007\u0007\u0002", - "\u0002ln\u0005\u001c\u000f\u0002mk\u0003\u0002\u0002\u0002nq\u0003\u0002", - "\u0002\u0002om\u0003\u0002\u0002\u0002op\u0003\u0002\u0002\u0002p\u0015", - "\u0003\u0002\u0002\u0002qo\u0003\u0002\u0002\u0002ru\u0005\u001a\u000e", - "\u0002su\u0007\b\u0002\u0002tr\u0003\u0002\u0002\u0002ts\u0003\u0002", - "\u0002\u0002u\u0017\u0003\u0002\u0002\u0002v{\u0005,\u0017\u0002w{\u0007", - "\u0014\u0002\u0002x{\u0005(\u0015\u0002y{\u0005\u001e\u0010\u0002zv", - "\u0003\u0002\u0002\u0002zw\u0003\u0002\u0002\u0002zx\u0003\u0002\u0002", - "\u0002zy\u0003\u0002\u0002\u0002{\u0019\u0003\u0002\u0002\u0002|}\u0005", - ",\u0017\u0002}\u001b\u0003\u0002\u0002\u0002~\u0085\u0005,\u0017\u0002", - "\u007f\u0085\u0007\u0014\u0002\u0002\u0080\u0085\u0005$\u0013\u0002", - "\u0081\u0085\u0005(\u0015\u0002\u0082\u0085\u0005&\u0014\u0002\u0083", - "\u0085\u0005\u001e\u0010\u0002\u0084~\u0003\u0002\u0002\u0002\u0084", - "\u007f\u0003\u0002\u0002\u0002\u0084\u0080\u0003\u0002\u0002\u0002\u0084", - "\u0081\u0003\u0002\u0002\u0002\u0084\u0082\u0003\u0002\u0002\u0002\u0084", - "\u0083\u0003\u0002\u0002\u0002\u0085\u001d\u0003\u0002\u0002\u0002\u0086", - "\u0087\u0007\t\u0002\u0002\u0087\u0088\u0005 \u0011\u0002\u0088\u0089", - "\u0005\u001a\u000e\u0002\u0089\u008a\u0005\"\u0012\u0002\u008a\u008b", - "\u0007\n\u0002\u0002\u008b\u001f\u0003\u0002\u0002\u0002\u008c\u0090", - "\u0005,\u0017\u0002\u008d\u0090\u0007\u0014\u0002\u0002\u008e\u0090", - "\u0005\u001e\u0010\u0002\u008f\u008c\u0003\u0002\u0002\u0002\u008f\u008d", - "\u0003\u0002\u0002\u0002\u008f\u008e\u0003\u0002\u0002\u0002\u0090!", - "\u0003\u0002\u0002\u0002\u0091\u0096\u0005,\u0017\u0002\u0092\u0096", - "\u0007\u0014\u0002\u0002\u0093\u0096\u0005$\u0013\u0002\u0094\u0096", - "\u0005\u001e\u0010\u0002\u0095\u0091\u0003\u0002\u0002\u0002\u0095\u0092", - "\u0003\u0002\u0002\u0002\u0095\u0093\u0003\u0002\u0002\u0002\u0095\u0094", - "\u0003\u0002\u0002\u0002\u0096#\u0003\u0002\u0002\u0002\u0097\u009b", - "\u0005*\u0016\u0002\u0098\u009b\u0007\u0011\u0002\u0002\u0099\u009b", - "\u0007\u0012\u0002\u0002\u009a\u0097\u0003\u0002\u0002\u0002\u009a\u0098", - "\u0003\u0002\u0002\u0002\u009a\u0099\u0003\u0002\u0002\u0002\u009b%", - "\u0003\u0002\u0002\u0002\u009c\u009d\u0007\u000b\u0002\u0002\u009d\u009e", - "\u0005\u0012\n\u0002\u009e\u009f\u0007\f\u0002\u0002\u009f\'\u0003\u0002", - "\u0002\u0002\u00a0\u00a4\u0007\r\u0002\u0002\u00a1\u00a3\u0005\u001c", - "\u000f\u0002\u00a2\u00a1\u0003\u0002\u0002\u0002\u00a3\u00a6\u0003\u0002", - "\u0002\u0002\u00a4\u00a2\u0003\u0002\u0002\u0002\u00a4\u00a5\u0003\u0002", - "\u0002\u0002\u00a5\u00a7\u0003\u0002\u0002\u0002\u00a6\u00a4\u0003\u0002", - "\u0002\u0002\u00a7\u00a8\u0007\u000e\u0002\u0002\u00a8)\u0003\u0002", - "\u0002\u0002\u00a9\u00ad\u0007\u0013\u0002\u0002\u00aa\u00ae\u0007\u0019", - "\u0002\u0002\u00ab\u00ac\u0007\u000f\u0002\u0002\u00ac\u00ae\u0005,", - "\u0017\u0002\u00ad\u00aa\u0003\u0002\u0002\u0002\u00ad\u00ab\u0003\u0002", - "\u0002\u0002\u00ad\u00ae\u0003\u0002\u0002\u0002\u00ae+\u0003\u0002", - "\u0002\u0002\u00af\u00b2\u0007\u0015\u0002\u0002\u00b0\u00b2\u0005.", - "\u0018\u0002\u00b1\u00af\u0003\u0002\u0002\u0002\u00b1\u00b0\u0003\u0002", - "\u0002\u0002\u00b2-\u0003\u0002\u0002\u0002\u00b3\u00b4\t\u0002\u0002", - "\u0002\u00b4/\u0003\u0002\u0002\u0002\u00133If {@code ctx} is {@code null} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code null} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code null} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// -LL1Analyzer.prototype.LOOK = function(s, stopState, ctx) { - var r = new IntervalSet(); - var seeThruPreds = true; // ignore preds; get all lookahead - ctx = ctx || null; - var lookContext = ctx!==null ? predictionContextFromRuleContext(s.atn, ctx) : null; - this._LOOK(s, stopState, lookContext, r, new Set(), new BitSet(), seeThruPreds, true); - return r; -}; - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//If {@code ctx} is {@code null} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code null} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code null} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code new SetIf {@code speculative} is {@code true}, this method was called before -// {@link //consume} for the matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.
-// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -LexerATNSimulator.prototype.evaluatePredicate = function(input, ruleIndex, - predIndex, speculative) { - // assume true if no recognizer was provided - if (this.recog === null) { - return true; - } - if (!speculative) { - return this.recog.sempred(null, ruleIndex, predIndex); - } - var savedcolumn = this.column; - var savedLine = this.line; - var index = input.index; - var marker = input.mark(); - try { - this.consume(input); - return this.recog.sempred(null, ruleIndex, predIndex); - } finally { - this.column = savedcolumn; - this.line = savedLine; - input.seek(index); - input.release(marker); - } -}; - -LexerATNSimulator.prototype.captureSimState = function(settings, input, dfaState) { - settings.index = input.index; - settings.line = this.line; - settings.column = this.column; - settings.dfaState = dfaState; -}; - -LexerATNSimulator.prototype.addDFAEdge = function(from_, tk, to, cfgs) { - if (to === undefined) { - to = null; - } - if (cfgs === undefined) { - cfgs = null; - } - if (to === null && cfgs !== null) { - // leading to this call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes this edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to resynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - var suppressEdge = cfgs.hasSemanticContext; - cfgs.hasSemanticContext = false; - - to = this.addDFAState(cfgs); - - if (suppressEdge) { - return to; - } - } - // add the edge - if (tk < LexerATNSimulator.MIN_DFA_EDGE || tk > LexerATNSimulator.MAX_DFA_EDGE) { - // Only track edges within the DFA bounds - return to; - } - if (LexerATNSimulator.debug) { - console.log("EDGE " + from_ + " -> " + to + " upon " + tk); - } - if (from_.edges === null) { - // make room for tokens 1..n and -1 masquerading as index 0 - from_.edges = []; - } - from_.edges[tk - LexerATNSimulator.MIN_DFA_EDGE] = to; // connect - - return to; -}; - -// Add a new DFA state if there isn't one with this set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -LexerATNSimulator.prototype.addDFAState = function(configs) { - var proposed = new DFAState(null, configs); - var firstConfigWithRuleStopState = null; - for (var i = 0; i < configs.items.length; i++) { - var cfg = configs.items[i]; - if (cfg.state instanceof RuleStopState) { - firstConfigWithRuleStopState = cfg; - break; - } - } - if (firstConfigWithRuleStopState !== null) { - proposed.isAcceptState = true; - proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor; - proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; - } - var dfa = this.decisionToDFA[this.mode]; - var existing = dfa.states.get(proposed); - if (existing!==null) { - return existing; - } - var newState = proposed; - newState.stateNumber = dfa.states.length; - configs.setReadonly(true); - newState.configs = configs; - dfa.states.add(newState); - return newState; -}; - -LexerATNSimulator.prototype.getDFA = function(mode) { - return this.decisionToDFA[mode]; -}; - -// Get the text matched so far for the current token. -LexerATNSimulator.prototype.getText = function(input) { - // index is first lookahead char, don't include. - return input.getText(this.startIndex, input.index - 1); -}; - -LexerATNSimulator.prototype.consume = function(input) { - var curChar = input.LA(1); - if (curChar === "\n".charCodeAt(0)) { - this.line += 1; - this.column = 0; - } else { - this.column += 1; - } - input.consume(); -}; - -LexerATNSimulator.prototype.getTokenName = function(tt) { - if (tt === -1) { - return "EOF"; - } else { - return "'" + String.fromCharCode(tt) + "'"; - } -}; - -exports.LexerATNSimulator = LexerATNSimulator; - - -/***/ }), -/* 39 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -// - -// -// This default implementation of {@link TokenFactory} creates -// {@link CommonToken} objects. -// - -var CommonToken = __webpack_require__(1).CommonToken; - -function TokenFactory() { - return this; -} - -function CommonTokenFactory(copyText) { - TokenFactory.call(this); - // Indicates whether {@link CommonToken//setText} should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings - // of text from the input after the lexer creates a token (e.g. the - // implementation of {@link CharStream//getText} in - // {@link UnbufferedCharStream} throws an - // {@link UnsupportedOperationException}). Explicitly setting the token text - // allows {@link Token//getText} to be called at any time regardless of the - // input stream implementation. - // - //- // The default value is {@code false} to avoid the performance and memory - // overhead of copying text for every token unless explicitly requested.
- // - this.copyText = copyText===undefined ? false : copyText; - return this; -} - -CommonTokenFactory.prototype = Object.create(TokenFactory.prototype); -CommonTokenFactory.prototype.constructor = CommonTokenFactory; - -// -// The default {@link CommonTokenFactory} instance. -// -//-// This token factory does not explicitly copy token text when constructing -// tokens.
-// -CommonTokenFactory.DEFAULT = new CommonTokenFactory(); - -CommonTokenFactory.prototype.create = function(source, type, text, channel, start, stop, line, column) { - var t = new CommonToken(source, type, channel, start, stop); - t.line = line; - t.column = column; - if (text !==null) { - t.text = text; - } else if (this.copyText && source[1] !==null) { - t.text = source[1].getText(start,stop); - } - return t; -}; - -CommonTokenFactory.prototype.createThin = function(type, text) { - var t = new CommonToken(null, type); - t.text = text; - return t; -}; - -exports.CommonTokenFactory = CommonTokenFactory; - - -/***/ }), -/* 40 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -/// - -// Represents an executor for a sequence of lexer actions which traversed during -// the matching operation of a lexer rule (token). -// -//The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.
- -var hashStuff = __webpack_require__(0).hashStuff; -var LexerIndexedCustomAction = __webpack_require__(23).LexerIndexedCustomAction; - -function LexerActionExecutor(lexerActions) { - this.lexerActions = lexerActions === null ? [] : lexerActions; - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - this.cachedHashCode = hashStuff(lexerActions); // "".join([str(la) for la in - // lexerActions])) - return this; -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while matching a token within a particular -// {@link LexerATNConfig}. If this is {@code null}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -LexerActionExecutor.append = function(lexerActionExecutor, lexerAction) { - if (lexerActionExecutor === null) { - return new LexerActionExecutor([ lexerAction ]); - } - var lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]); - return new LexerActionExecutor(lexerActions); -}; - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule matches a variable number of -// characters.
-// -//Prior to traversing a match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.
-// -//If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.
-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -LexerActionExecutor.prototype.fixOffsetBeforeMatch = function(offset) { - var updatedLexerActions = null; - for (var i = 0; i < this.lexerActions.length; i++) { - if (this.lexerActions[i].isPositionDependent && - !(this.lexerActions[i] instanceof LexerIndexedCustomAction)) { - if (updatedLexerActions === null) { - updatedLexerActions = this.lexerActions.concat([]); - } - updatedLexerActions[i] = new LexerIndexedCustomAction(offset, - this.lexerActions[i]); - } - } - if (updatedLexerActions === null) { - return this; - } else { - return new LexerActionExecutor(updatedLexerActions); - } -}; - -// Execute the actions encapsulated by this executor within the context of a -// particular {@link Lexer}. -// -//This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.
-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When this method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -LexerActionExecutor.prototype.execute = function(lexer, input, startIndex) { - var requiresSeek = false; - var stopIndex = input.index; - try { - for (var i = 0; i < this.lexerActions.length; i++) { - var lexerAction = this.lexerActions[i]; - if (lexerAction instanceof LexerIndexedCustomAction) { - var offset = lexerAction.offset; - input.seek(startIndex + offset); - lexerAction = lexerAction.action; - requiresSeek = (startIndex + offset) !== stopIndex; - } else if (lexerAction.isPositionDependent) { - input.seek(stopIndex); - requiresSeek = false; - } - lexerAction.execute(lexer); - } - } finally { - if (requiresSeek) { - input.seek(stopIndex); - } - } -}; - -LexerActionExecutor.prototype.hashCode = function() { - return this.cachedHashCode; -}; - -LexerActionExecutor.prototype.updateHashCode = function(hash) { - hash.update(this.cachedHashCode); -}; - - -LexerActionExecutor.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof LexerActionExecutor)) { - return false; - } else if (this.cachedHashCode != other.cachedHashCode) { - return false; - } else if (this.lexerActions.length != other.lexerActions.length) { - return false; - } else { - var numActions = this.lexerActions.length - for (var idx = 0; idx < numActions; ++idx) { - if (!this.lexerActions[idx].equals(other.lexerActions[idx])) { - return false; - } - } - return true; - } -}; - -exports.LexerActionExecutor = LexerActionExecutor; - - -/***/ }), -/* 41 */ -/***/ (function(module, exports, __webpack_require__) { - -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -// - -// -// The embodiment of the adaptive LL(*), ALL(*), parsing strategy. -// -//-// The basic complexity of the adaptive strategy makes it harder to understand. -// We begin with ATN simulation to build paths in a DFA. Subsequent prediction -// requests go through the DFA first. If they reach a state without an edge for -// the current symbol, the algorithm fails over to the ATN simulation to -// complete the DFA path for the current input (until it finds a conflict state -// or uniquely predicting state).
-// -//-// All of that is done without using the outer context because we want to create -// a DFA that is not dependent upon the rule invocation stack when we do a -// prediction. One DFA works in all contexts. We avoid using context not -// necessarily because it's slower, although it can be, but because of the DFA -// caching problem. The closure routine only considers the rule invocation stack -// created during prediction beginning in the decision rule. For example, if -// prediction occurs without invoking another rule's ATN, there are no context -// stacks in the configurations. When lack of context leads to a conflict, we -// don't know if it's an ambiguity or a weakness in the strong LL(*) parsing -// strategy (versus full LL(*)).
-// -//-// When SLL yields a configuration set with conflict, we rewind the input and -// retry the ATN simulation, this time using full outer context without adding -// to the DFA. Configuration context stacks will be the full invocation stacks -// from the start rule. If we get a conflict using full context, then we can -// definitively say we have a true ambiguity for that input sequence. If we -// don't get a conflict, it implies that the decision is sensitive to the outer -// context. (It is not context-sensitive in the sense of context-sensitive -// grammars.)
-// -//-// The next time we reach this DFA state with an SLL conflict, through DFA -// simulation, we will again retry the ATN simulation using full context mode. -// This is slow because we can't save the results and have to "interpret" the -// ATN each time we get that input.
-// -//-// CACHING FULL CONTEXT PREDICTIONS
-// -//-// We could cache results from full context to predicted alternative easily and -// that saves a lot of time but doesn't work in presence of predicates. The set -// of visible predicates from the ATN start state changes depending on the -// context, because closure can fall off the end of a rule. I tried to cache -// tuples (stack context, semantic context, predicted alt) but it was slower -// than interpreting and much more complicated. Also required a huge amount of -// memory. The goal is not to create the world's fastest parser anyway. I'd like -// to keep this algorithm simple. By launching multiple threads, we can improve -// the speed of parsing across a large number of files.
-// -//-// There is no strict ordering between the amount of input used by SLL vs LL, -// which makes it really hard to build a cache for full context. Let's say that -// we have input A B C that leads to an SLL conflict with full context X. That -// implies that using X we might only use A B but we could also use A B C D to -// resolve conflict. Input A B C D could predict alternative 1 in one position -// in the input and A B C E could predict alternative 2 in another position in -// input. The conflicting SLL configurations could still be non-unique in the -// full context prediction, which would lead us to requiring more input than the -// original A B C. To make a prediction cache work, we have to track the exact -// input used during the previous prediction. That amounts to a cache that maps -// X to a specific DFA for that context.
-// -//-// Something should be done for left-recursive expression predictions. They are -// likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry -// with full LL thing Sam does.
-// -//-// AVOIDING FULL CONTEXT PREDICTION
-// -//-// We avoid doing full context retry when the outer context is empty, we did not -// dip into the outer context by falling off the end of the decision state rule, -// or when we force SLL mode.
-// -//-// As an example of the not dip into outer context case, consider as super -// constructor calls versus function calls. One grammar might look like -// this:
-// -//-// ctorBody -// : '{' superCall? stat* '}' -// ; -//-// -//
-// Or, you might see something like
-// -//-// stat -// : superCall ';' -// | expression ';' -// | ... -// ; -//-// -//
-// In both cases I believe that no closure operations will dip into the outer -// context. In the first case ctorBody in the worst case will stop at the '}'. -// In the 2nd case it should stop at the ';'. Both cases should stay within the -// entry rule and not dip into the outer context.
-// -//-// PREDICATES
-// -//-// Predicates are always evaluated if present in either SLL or LL both. SLL and -// LL simulation deals with predicates differently. SLL collects predicates as -// it performs closure operations like ANTLR v3 did. It delays predicate -// evaluation until it reaches and accept state. This allows us to cache the SLL -// ATN simulation whereas, if we had evaluated predicates on-the-fly during -// closure, the DFA state configuration sets would be different and we couldn't -// build up a suitable DFA.
-// -//-// When building a DFA accept state during ATN simulation, we evaluate any -// predicates and return the sole semantically valid alternative. If there is -// more than 1 alternative, we report an ambiguity. If there are 0 alternatives, -// we throw an exception. Alternatives without predicates act like they have -// true predicates. The simple way to think about it is to strip away all -// alternatives with false predicates and choose the minimum alternative that -// remains.
-// -//-// When we start in the DFA and reach an accept state that's predicated, we test -// those and return the minimum semantically viable alternative. If no -// alternatives are viable, we throw an exception.
-// -//-// During full LL ATN simulation, closure always evaluates predicates and -// on-the-fly. This is crucial to reducing the configuration set size during -// closure. It hits a landmine when parsing with the Java grammar, for example, -// without this on-the-fly evaluation.
-// -//-// SHARING DFA
-// -//-// All instances of the same parser share the same decision DFAs through a -// static field. Each instance gets its own ATN simulator but they share the -// same {@link //decisionToDFA} field. They also share a -// {@link PredictionContextCache} object that makes sure that all -// {@link PredictionContext} objects are shared among the DFA states. This makes -// a big size difference.
-// -//-// THREAD SAFETY
-// -//-// The {@link ParserATNSimulator} locks on the {@link //decisionToDFA} field when -// it adds a new DFA object to that array. {@link //addDFAEdge} -// locks on the DFA for the current decision when setting the -// {@link DFAState//edges} field. {@link //addDFAState} locks on -// the DFA for the current decision when looking up a DFA state to see if it -// already exists. We must make sure that all requests to add DFA states that -// are equivalent result in the same shared DFA object. This is because lots of -// threads will be trying to update the DFA at once. The -// {@link //addDFAState} method also locks inside the DFA lock -// but this time on the shared context cache when it rebuilds the -// configurations' {@link PredictionContext} objects using cached -// subgraphs/nodes. No other locking occurs, even during DFA simulation. This is -// safe as long as we can guarantee that all threads referencing -// {@code s.edge[t]} get the same physical target {@link DFAState}, or -// {@code null}. Once into the DFA, the DFA simulation does not reference the -// {@link DFA//states} map. It follows the {@link DFAState//edges} field to new -// targets. The DFA simulator will either find {@link DFAState//edges} to be -// {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or -// {@code dfa.edges[t]} to be non-null. The -// {@link //addDFAEdge} method could be racing to set the field -// but in either case the DFA simulator works; if {@code null}, and requests ATN -// simulation. It could also race trying to get {@code dfa.edges[t]}, but either -// way it will work because it's not doing a test and set operation.
-// -//-// Starting with SLL then failing to combined SLL/LL (Two-Stage -// Parsing)
-// -//-// Sam pointed out that if SLL does not give a syntax error, then there is no -// point in doing full LL, which is slower. We only have to try LL if we get a -// syntax error. For maximum speed, Sam starts the parser set to pure SLL -// mode with the {@link BailErrorStrategy}:
-// -//-// parser.{@link Parser//getInterpreter() getInterpreter()}.{@link //setPredictionMode setPredictionMode}{@code (}{@link PredictionMode//SLL}{@code )}; -// parser.{@link Parser//setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}()); -//-// -//
-// If it does not get a syntax error, then we're done. If it does get a syntax -// error, we need to retry with the combined SLL/LL strategy.
-// -//-// The reason this works is as follows. If there are no SLL conflicts, then the -// grammar is SLL (at least for that input set). If there is an SLL conflict, -// the full LL analysis must yield a set of viable alternatives which is a -// subset of the alternatives reported by SLL. If the LL set is a singleton, -// then the grammar is LL but not SLL. If the LL set is the same size as the SLL -// set, the decision is SLL. If the LL set has size > 1, then that decision -// is truly ambiguous on the current input. If the LL set is smaller, then the -// SLL conflict resolution might choose an alternative that the full LL would -// rule out as a possibility based upon better context information. If that's -// the case, then the SLL parse will definitely get an error because the full LL -// analysis says it's not viable. If SLL conflict resolution chooses an -// alternative within the LL set, them both SLL and LL would choose the same -// alternative because they both choose the minimum of multiple conflicting -// alternatives.
-// -//-// Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and -// a smaller LL set called s. If s is {@code {2, 3}}, then SLL -// parsing will get an error because SLL will pursue alternative 1. If -// s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will -// choose the same alternative because alternative one is the minimum of either -// set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax -// error. If s is {@code {1}} then SLL will succeed.
-// -//-// Of course, if the input is invalid, then we will get an error for sure in -// both SLL and LL parsing. Erroneous input will therefore require 2 passes over -// the input.
-// - -var Utils = __webpack_require__(0); -var Set = Utils.Set; -var BitSet = Utils.BitSet; -var DoubleDict = Utils.DoubleDict; -var ATN = __webpack_require__(7).ATN; -var ATNState = __webpack_require__(3).ATNState; -var ATNConfig = __webpack_require__(13).ATNConfig; -var ATNConfigSet = __webpack_require__(9).ATNConfigSet; -var Token = __webpack_require__(1).Token; -var DFAState = __webpack_require__(11).DFAState; -var PredPrediction = __webpack_require__(11).PredPrediction; -var ATNSimulator = __webpack_require__(25).ATNSimulator; -var PredictionMode = __webpack_require__(26).PredictionMode; -var RuleContext = __webpack_require__(14).RuleContext; -var ParserRuleContext = __webpack_require__(18).ParserRuleContext; -var SemanticContext = __webpack_require__(10).SemanticContext; -var StarLoopEntryState = __webpack_require__(3).StarLoopEntryState; -var RuleStopState = __webpack_require__(3).RuleStopState; -var PredictionContext = __webpack_require__(6).PredictionContext; -var Interval = __webpack_require__(2).Interval; -var Transitions = __webpack_require__(8); -var Transition = Transitions.Transition; -var SetTransition = Transitions.SetTransition; -var NotSetTransition = Transitions.NotSetTransition; -var RuleTransition = Transitions.RuleTransition; -var ActionTransition = Transitions.ActionTransition; -var NoViableAltException = __webpack_require__(5).NoViableAltException; - -var SingletonPredictionContext = __webpack_require__(6).SingletonPredictionContext; -var predictionContextFromRuleContext = __webpack_require__(6).predictionContextFromRuleContext; - -function ParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache) { - ATNSimulator.call(this, atn, sharedContextCache); - this.parser = parser; - this.decisionToDFA = decisionToDFA; - // SLL, LL, or LL + exact ambig detection?// - this.predictionMode = PredictionMode.LL; - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - this._input = null; - this._startIndex = 0; - this._outerContext = null; - this._dfa = null; - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)→c. We can avoid - // the merge if we ever see a and b again. Note that (b,a)→c should - // also be examined during cache lookup. - // - this.mergeCache = null; - return this; -} - -ParserATNSimulator.prototype = Object.create(ATNSimulator.prototype); -ParserATNSimulator.prototype.constructor = ParserATNSimulator; - -ParserATNSimulator.prototype.debug = false; -ParserATNSimulator.prototype.debug_closure = false; -ParserATNSimulator.prototype.debug_add = false; -ParserATNSimulator.prototype.debug_list_atn_decisions = false; -ParserATNSimulator.prototype.dfa_debug = false; -ParserATNSimulator.prototype.retry_debug = false; - - -ParserATNSimulator.prototype.reset = function() { -}; - -ParserATNSimulator.prototype.adaptivePredict = function(input, decision, outerContext) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("adaptivePredict decision " + decision + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + input.LT(1).line + ":" + - input.LT(1).column); - } - this._input = input; - this._startIndex = input.index; - this._outerContext = outerContext; - - var dfa = this.decisionToDFA[decision]; - this._dfa = dfa; - var m = input.mark(); - var index = input.index; - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - try { - var s0; - if (dfa.precedenceDfa) { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()); - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0; - } - if (s0===null) { - if (outerContext===null) { - outerContext = RuleContext.EMPTY; - } - if (this.debug || this.debug_list_atn_decisions) { - console.log("predictATN decision " + dfa.decision + - " exec LA(1)==" + this.getLookaheadName(input) + - ", outerContext=" + outerContext.toString(this.parser.ruleNames)); - } - - var fullCtx = false; - var s0_closure = this.computeStartState(dfa.atnStartState, RuleContext.EMPTY, fullCtx); - - if( dfa.precedenceDfa) { - // If this is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0_closure; // not used for prediction but useful to know start configs anyway - s0_closure = this.applyPrecedenceFilter(s0_closure); - s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); - dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0); - } else { - s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); - dfa.s0 = s0; - } - } - var alt = this.execATN(dfa, s0, input, index, outerContext); - if (this.debug) { - console.log("DFA after predictATN: " + dfa.toString(this.parser.literalNames)); - } - return alt; - } finally { - this._dfa = null; - this.mergeCache = null; // wack cache after each prediction - input.seek(index); - input.release(m); - } -}; -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): - // if the set is empty, there is no viable alternative for current symbol - // does the state uniquely predict an alternative? - // does the state have a conflict that would prevent us from - // putting it on the work list? - -// We also have some key operations to do: - // add an edge from previous DFA state to potentially new DFA state, D, - // upon current symbol but only if adding to work list, which means in all - // cases except no viable alternative (and possibly non-greedy decisions?) - // collecting predicates and adding semantic context to DFA accept states - // adding rule context to context-sensitive DFA accept states - // consuming an input symbol - // reporting a conflict - // reporting an ambiguity - // reporting a context sensitivity - // reporting insufficient predicates - -// cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -// -ParserATNSimulator.prototype.execATN = function(dfa, s0, input, startIndex, outerContext ) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("execATN decision " + dfa.decision + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + input.LT(1).line + ":" + input.LT(1).column); - } - var alt; - var previousD = s0; - - if (this.debug) { - console.log("s0 = " + s0); - } - var t = input.LA(1); - while(true) { // while more work - var D = this.getExistingTargetState(previousD, t); - if(D===null) { - D = this.computeTargetState(dfa, previousD, t); - } - if(D===ATNSimulator.ERROR) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision; better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - var e = this.noViableAlt(input, outerContext, previousD.configs, startIndex); - input.seek(startIndex); - alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext); - if(alt!==ATN.INVALID_ALT_NUMBER) { - return alt; - } else { - throw e; - } - } - if(D.requiresFullContext && this.predictionMode !== PredictionMode.SLL) { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - var conflictingAlts = null; - if (D.predicates!==null) { - if (this.debug) { - console.log("DFA state has preds in DFA sim LL failover"); - } - var conflictIndex = input.index; - if(conflictIndex !== startIndex) { - input.seek(startIndex); - } - conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true); - if (conflictingAlts.length===1) { - if(this.debug) { - console.log("Full LL avoided"); - } - return conflictingAlts.minValue(); - } - if (conflictIndex !== startIndex) { - // restore the index so reporting the fallback to full - // context occurs with the index at the correct spot - input.seek(conflictIndex); - } - } - if (this.dfa_debug) { - console.log("ctx sensitive state " + outerContext +" in " + D); - } - var fullCtx = true; - var s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx); - this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index); - alt = this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext); - return alt; - } - if (D.isAcceptState) { - if (D.predicates===null) { - return D.prediction; - } - var stopIndex = input.index; - input.seek(startIndex); - var alts = this.evalSemanticContext(D.predicates, outerContext, true); - if (alts.length===0) { - throw this.noViableAlt(input, outerContext, D.configs, startIndex); - } else if (alts.length===1) { - return alts.minValue(); - } else { - // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. - this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs); - return alts.minValue(); - } - } - previousD = D; - - if (t !== Token.EOF) { - input.consume(); - t = input.LA(1); - } - } -}; -// -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// this method returns {@code null}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code null} if the target state for this edge is not -// already cached -// -ParserATNSimulator.prototype.getExistingTargetState = function(previousD, t) { - var edges = previousD.edges; - if (edges===null) { - return null; - } else { - return edges[t + 1] || null; - } -}; -// -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, this method -// returns {@link //ERROR}. -// -ParserATNSimulator.prototype.computeTargetState = function(dfa, previousD, t) { - var reach = this.computeReachSet(previousD.configs, t, false); - if(reach===null) { - this.addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); - return ATNSimulator.ERROR; - } - // create new target state; we'll add to DFA after it's complete - var D = new DFAState(null, reach); - - var predictedAlt = this.getUniqueAlt(reach); - - if (this.debug) { - var altSubSets = PredictionMode.getConflictingAltSubsets(reach); - console.log("SLL altSubSets=" + Utils.arrayToString(altSubSets) + - ", previous=" + previousD.configs + - ", configs=" + reach + - ", predict=" + predictedAlt + - ", allSubsetsConflict=" + - PredictionMode.allSubsetsConflict(altSubSets) + ", conflictingAlts=" + - this.getConflictingAlts(reach)); - } - if (predictedAlt!==ATN.INVALID_ALT_NUMBER) { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true; - D.configs.uniqueAlt = predictedAlt; - D.prediction = predictedAlt; - } else if (PredictionMode.hasSLLConflictTerminatingPrediction(this.predictionMode, reach)) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.conflictingAlts = this.getConflictingAlts(reach); - D.requiresFullContext = true; - // in SLL-only mode, we will stop at this state and return the minimum alt - D.isAcceptState = true; - D.prediction = D.configs.conflictingAlts.minValue(); - } - if (D.isAcceptState && D.configs.hasSemanticContext) { - this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)); - if( D.predicates!==null) { - D.prediction = ATN.INVALID_ALT_NUMBER; - } - } - // all adds to dfa are done after we've created full D state - D = this.addDFAEdge(dfa, previousD, t, D); - return D; -}; - -ParserATNSimulator.prototype.predicateDFAState = function(dfaState, decisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - var nalts = decisionState.transitions.length; - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - var altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs); - var altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts); - if (altToPred!==null) { - dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred); - dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.prediction = altsToCollectPredsFrom.minValue(); - } -}; - -// comes back with reach.uniqueAlt set to a valid alt -ParserATNSimulator.prototype.execATNWithFullContext = function(dfa, D, // how far we got before failing over - s0, - input, - startIndex, - outerContext) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("execATNWithFullContext "+s0); - } - var fullCtx = true; - var foundExactAmbig = false; - var reach = null; - var previous = s0; - input.seek(startIndex); - var t = input.LA(1); - var predictedAlt = -1; - while (true) { // while more work - reach = this.computeReachSet(previous, t, fullCtx); - if (reach===null) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision; better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - var e = this.noViableAlt(input, outerContext, previous, startIndex); - input.seek(startIndex); - var alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext); - if(alt!==ATN.INVALID_ALT_NUMBER) { - return alt; - } else { - throw e; - } - } - var altSubSets = PredictionMode.getConflictingAltSubsets(reach); - if(this.debug) { - console.log("LL altSubSets=" + altSubSets + ", predict=" + - PredictionMode.getUniqueAlt(altSubSets) + ", resolvesToJustOneViableAlt=" + - PredictionMode.resolvesToJustOneViableAlt(altSubSets)); - } - reach.uniqueAlt = this.getUniqueAlt(reach); - // unique prediction? - if(reach.uniqueAlt!==ATN.INVALID_ALT_NUMBER) { - predictedAlt = reach.uniqueAlt; - break; - } else if (this.predictionMode !== PredictionMode.LL_EXACT_AMBIG_DETECTION) { - predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets); - if(predictedAlt !== ATN.INVALID_ALT_NUMBER) { - break; - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if (PredictionMode.allSubsetsConflict(altSubSets) && PredictionMode.allSubsetsEqual(altSubSets)) { - foundExactAmbig = true; - predictedAlt = PredictionMode.getSingleViableAlt(altSubSets); - break; - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach; - if( t !== Token.EOF) { - input.consume(); - t = input.LA(1); - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if (reach.uniqueAlt !== ATN.INVALID_ALT_NUMBER ) { - this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index); - return predictedAlt; - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In this case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve this without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - this.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, null, reach); - - return predictedAlt; -}; - -ParserATNSimulator.prototype.computeReachSet = function(closure, t, fullCtx) { - if (this.debug) { - console.log("in computeReachSet, starting closure: " + closure); - } - if( this.mergeCache===null) { - this.mergeCache = new DoubleDict(); - } - var intermediate = new ATNConfigSet(fullCtx); - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative matching the longest overall sequence is - // chosen when multiple such configurations can match the input. - - var skippedStopStates = null; - - // First figure out where we can reach on input t - for (var i=0; i