mirror of
https://github.com/IvarK/AntimatterDimensionsSourceCode.git
synced 2024-11-10 06:02:13 +00:00
Unwrap the automator IIFEs
This commit is contained in:
parent
7712bb5c54
commit
897364950d
@ -1,186 +1,184 @@
|
||||
import { AutomatorGrammar } from "./parser";
|
||||
import { AutomatorLexer } from "./lexer";
|
||||
|
||||
(function() {
|
||||
function walkSuggestion(suggestion, prefix, output) {
|
||||
const hasAutocomplete = suggestion.$autocomplete &&
|
||||
suggestion.$autocomplete.startsWith(prefix) && suggestion.$autocomplete !== prefix;
|
||||
const isUnlocked = suggestion.$unlocked ? suggestion.$unlocked() : true;
|
||||
if (hasAutocomplete && isUnlocked) output.add(suggestion.$autocomplete);
|
||||
for (const s of suggestion.categoryMatches) {
|
||||
walkSuggestion(AutomatorLexer.tokenIds[s], prefix, output);
|
||||
}
|
||||
|
||||
function walkSuggestion(suggestion, prefix, output) {
|
||||
const hasAutocomplete = suggestion.$autocomplete &&
|
||||
suggestion.$autocomplete.startsWith(prefix) && suggestion.$autocomplete !== prefix;
|
||||
const isUnlocked = suggestion.$unlocked ? suggestion.$unlocked() : true;
|
||||
if (hasAutocomplete && isUnlocked) output.add(suggestion.$autocomplete);
|
||||
for (const s of suggestion.categoryMatches) {
|
||||
walkSuggestion(AutomatorLexer.tokenIds[s], prefix, output);
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
CodeMirror.registerHelper("lint", "automato", (contents, _, editor) => {
|
||||
const doc = editor.getDoc();
|
||||
const errors = AutomatorGrammar.compile(contents, true).errors;
|
||||
return errors.map(e => ({
|
||||
message: e.info,
|
||||
severity: "error",
|
||||
from: doc.posFromIndex(e.startOffset),
|
||||
to: doc.posFromIndex(e.endOffset + 1),
|
||||
}));
|
||||
});
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
CodeMirror.registerHelper("lint", "automato", (contents, _, editor) => {
|
||||
const doc = editor.getDoc();
|
||||
const errors = AutomatorGrammar.compile(contents, true).errors;
|
||||
return errors.map(e => ({
|
||||
message: e.info,
|
||||
severity: "error",
|
||||
from: doc.posFromIndex(e.startOffset),
|
||||
to: doc.posFromIndex(e.endOffset + 1),
|
||||
}));
|
||||
});
|
||||
|
||||
CodeMirror.registerHelper("hint", "anyword", editor => {
|
||||
const cursor = editor.getDoc().getCursor();
|
||||
let start = cursor.ch;
|
||||
const end = cursor.ch;
|
||||
const line = editor.getLine(cursor.line);
|
||||
while (start && /\w/u.test(line.charAt(start - 1)))--start;
|
||||
const lineStart = line.slice(0, start);
|
||||
const currentPrefix = line.slice(start, end);
|
||||
const lineLex = AutomatorLexer.lexer.tokenize(lineStart);
|
||||
if (lineLex.errors.length > 0) return undefined;
|
||||
const rawSuggestions = AutomatorGrammar.parser.computeContentAssist("command", lineLex.tokens);
|
||||
const suggestions = new Set();
|
||||
for (const s of rawSuggestions) {
|
||||
if (s.ruleStack[1] === "badCommand") continue;
|
||||
walkSuggestion(s.nextTokenType, currentPrefix, suggestions);
|
||||
}
|
||||
return {
|
||||
list: Array.from(suggestions),
|
||||
from: CodeMirror.Pos(cursor.line, start),
|
||||
to: CodeMirror.Pos(cursor.line, end)
|
||||
};
|
||||
});
|
||||
CodeMirror.registerHelper("hint", "anyword", editor => {
|
||||
const cursor = editor.getDoc().getCursor();
|
||||
let start = cursor.ch;
|
||||
const end = cursor.ch;
|
||||
const line = editor.getLine(cursor.line);
|
||||
while (start && /\w/u.test(line.charAt(start - 1)))--start;
|
||||
const lineStart = line.slice(0, start);
|
||||
const currentPrefix = line.slice(start, end);
|
||||
const lineLex = AutomatorLexer.lexer.tokenize(lineStart);
|
||||
if (lineLex.errors.length > 0) return undefined;
|
||||
const rawSuggestions = AutomatorGrammar.parser.computeContentAssist("command", lineLex.tokens);
|
||||
const suggestions = new Set();
|
||||
for (const s of rawSuggestions) {
|
||||
if (s.ruleStack[1] === "badCommand") continue;
|
||||
walkSuggestion(s.nextTokenType, currentPrefix, suggestions);
|
||||
}
|
||||
return {
|
||||
list: Array.from(suggestions),
|
||||
from: CodeMirror.Pos(cursor.line, start),
|
||||
to: CodeMirror.Pos(cursor.line, end)
|
||||
};
|
||||
});
|
||||
|
||||
const commentRule = { regex: /(\/\/|#).*/u, token: "comment", next: "start" };
|
||||
const commentRule = { regex: /(\/\/|#).*/u, token: "comment", next: "start" };
|
||||
|
||||
// This is a state machine which determines the syntax highlighting for the automator. Top-level props define
|
||||
// the states, the array entries define the transition rules which are checked in order of appearance, and next
|
||||
// specifies which state to transition to after consuming the given regex. Without an entry for "next" the state
|
||||
// machine will remain in the same state and run the transition check after consuming the regex. The "next" prop
|
||||
// in the line with "sol" is a fallback transition which will be followed if none of the rules are matched.
|
||||
// Matches to the regexes will color the matched text according to the specified color of cm-[token] in liquibyte.css
|
||||
// Note: This has no bearing on the actual functionality and behavior of the automator itself and is purely visual.
|
||||
CodeMirror.defineSimpleMode("automato", {
|
||||
// The start state contains the rules that are intially used
|
||||
start: [
|
||||
commentRule,
|
||||
{ regex: /studies\s+/ui, token: "keyword", next: "studiesArgs" },
|
||||
{ regex: /blob\s\s/ui, token: "blob" },
|
||||
{
|
||||
// eslint-disable-next-line max-len
|
||||
regex: /(auto|if|pause|studies|time[ \t]+theorems?|until|wait|while|black[ \t]+hole|stored?[ \t]+game[ \t]+time|notify)\s/ui,
|
||||
token: "keyword",
|
||||
next: "commandArgs"
|
||||
},
|
||||
{
|
||||
regex: /stop/ui,
|
||||
token: "keyword",
|
||||
next: "commandDone"
|
||||
},
|
||||
{
|
||||
regex: /start\s|unlock\s/ui,
|
||||
token: "keyword",
|
||||
next: "startUnlock"
|
||||
},
|
||||
{ regex: /infinity\S+|eternity\S+|reality\S+|pause\S+|restart\S+/ui, token: "error", next: "commandDone" },
|
||||
{ regex: /infinity|eternity|reality/ui, token: "keyword", next: "prestige" },
|
||||
{ regex: /pause|restart/ui, token: "keyword", next: "commandDone" },
|
||||
{ regex: /\}/ui, dedent: true },
|
||||
{ regex: /\S+\s/ui, token: "error", next: "commandDone" },
|
||||
],
|
||||
studiesArgs: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /load(\s+|$)/ui, token: "variable-2", next: "studiesLoad" },
|
||||
{ regex: /respec/ui, token: "variable-2", next: "commandDone" },
|
||||
{ regex: /purchase/ui, token: "variable-2", next: "studiesList" },
|
||||
{ regex: /nowait(\s+|$)/ui, token: "property" },
|
||||
],
|
||||
studiesList: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /(antimatter|infinity|time)(?=[\s,|]|$)/ui, token: "number" },
|
||||
{ regex: /(active|passive|idle)(?=[\s,|]|$)/ui, token: "number" },
|
||||
{ regex: /(light|dark)(?=[\s,|]|$)/ui, token: "number" },
|
||||
{ regex: /([1-9][0-9]+)(?=[\s,|-]|$)/ui, token: "number" },
|
||||
{ regex: /[a-zA-Z_][a-zA-Z_0-9]*/u, token: "variable", next: "commandDone" },
|
||||
{ regex: /([1-9]|1[0-2])$/ui, token: "number" },
|
||||
],
|
||||
studiesLoad: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /id(\s+|$)/ui, token: "variable-2", next: "studiesLoadId" },
|
||||
{ regex: /name(\s+|$)/ui, token: "variable-2", next: "studiesLoadPreset" },
|
||||
{ regex: /\S+/ui, token: "error" },
|
||||
],
|
||||
studiesLoadId: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /\d/ui, token: "qualifier", next: "commandDone" },
|
||||
],
|
||||
studiesLoadPreset: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /(\/(?!\/)|[^\s#/])+/ui, token: "qualifier", next: "commandDone" },
|
||||
],
|
||||
prestige: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /nowait(\s|$)/ui, token: "property" },
|
||||
{ regex: /respec/ui, token: "variable-2" },
|
||||
],
|
||||
commandDone: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
// This seems necessary to have a closing curly brace de-indent automatically in some cases
|
||||
{ regex: /\}/ui, dedent: true },
|
||||
{ regex: /\S+/ui, token: "error" },
|
||||
],
|
||||
startUnlock: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{
|
||||
regex: /ec\s?(1[0-2]|[1-9])|dilation/ui,
|
||||
token: "variable-2",
|
||||
next: "commandDone",
|
||||
},
|
||||
{ regex: /nowait(\s|$)/ui, token: "property" },
|
||||
],
|
||||
commandArgs: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /<=|>=|<|>/ui, token: "operator" },
|
||||
{ regex: /nowait(\s|$)/ui, token: "property" },
|
||||
{ regex: /".*"/ui, token: "string", next: "commandDone" },
|
||||
{ regex: /'.*'/ui, token: "string", next: "commandDone" },
|
||||
{ regex: /(on|off|bh1|bh2|dilation|load|respec)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /(eternity|reality|use)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /(antimatter|infinity|time)(\s|$|(?=,))/ui, token: "variable-2" },
|
||||
{ regex: /(active|passive|idle)(\s|$|(?=,))/ui, token: "variable-2" },
|
||||
{ regex: /(light|dark)(\s|$|(?=,))/ui, token: "variable-2" },
|
||||
{ regex: /x[\t ]+highest(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /pending[\t ]+(completions|ip|ep|tp|rm|glyph[\t ]+level)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /total[\t ]+(completions|tt)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /filter[ \t]+score/ui, token: "variable-2" },
|
||||
{ regex: /ec(1[0-2]|[1-9])[\t ]+completions(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /(am|ip|ep|all)(\s|$)/ui, token: "variable-2" },
|
||||
{
|
||||
regex: /(rm|rg|dt|tp|tt|(banked )?infinities|eternities|realities|rep(licanti)?)(\s|$)/ui,
|
||||
token: "variable-2",
|
||||
},
|
||||
{ regex: / sec(onds ?) ?| min(utes ?) ?| hours ?/ui, token: "variable-2" },
|
||||
{ regex: /([0-9]+:[0-5][0-9]:[0-5][0-9]|[0-5]?[0-9]:[0-5][0-9]|t[1-4])/ui, token: "number" },
|
||||
{ regex: /-?(0|[1-9]\d*)(\.\d+)?([eE][+-]?\d+)?/ui, token: "number" },
|
||||
{ regex: /[a-zA-Z_][a-zA-Z_0-9]*/u, token: "variable" },
|
||||
{ regex: /\{/ui, indent: true, next: "commandDone" },
|
||||
// This seems necessary to have a closing curly brace de-indent automatically in some cases
|
||||
{ regex: /\}/ui, dedent: true },
|
||||
],
|
||||
// This is a state machine which determines the syntax highlighting for the automator. Top-level props define
|
||||
// the states, the array entries define the transition rules which are checked in order of appearance, and next
|
||||
// specifies which state to transition to after consuming the given regex. Without an entry for "next" the state
|
||||
// machine will remain in the same state and run the transition check after consuming the regex. The "next" prop
|
||||
// in the line with "sol" is a fallback transition which will be followed if none of the rules are matched.
|
||||
// Matches to the regexes will color the matched text according to the specified color of cm-[token] in liquibyte.css
|
||||
// Note: This has no bearing on the actual functionality and behavior of the automator itself and is purely visual.
|
||||
CodeMirror.defineSimpleMode("automato", {
|
||||
// The start state contains the rules that are intially used
|
||||
start: [
|
||||
commentRule,
|
||||
{ regex: /studies\s+/ui, token: "keyword", next: "studiesArgs" },
|
||||
{ regex: /blob\s\s/ui, token: "blob" },
|
||||
{
|
||||
// eslint-disable-next-line max-len
|
||||
regex: /(auto|if|pause|studies|time[ \t]+theorems?|until|wait|while|black[ \t]+hole|stored?[ \t]+game[ \t]+time|notify)\s/ui,
|
||||
token: "keyword",
|
||||
next: "commandArgs"
|
||||
},
|
||||
{
|
||||
regex: /stop/ui,
|
||||
token: "keyword",
|
||||
next: "commandDone"
|
||||
},
|
||||
{
|
||||
regex: /start\s|unlock\s/ui,
|
||||
token: "keyword",
|
||||
next: "startUnlock"
|
||||
},
|
||||
{ regex: /infinity\S+|eternity\S+|reality\S+|pause\S+|restart\S+/ui, token: "error", next: "commandDone" },
|
||||
{ regex: /infinity|eternity|reality/ui, token: "keyword", next: "prestige" },
|
||||
{ regex: /pause|restart/ui, token: "keyword", next: "commandDone" },
|
||||
{ regex: /\}/ui, dedent: true },
|
||||
{ regex: /\S+\s/ui, token: "error", next: "commandDone" },
|
||||
],
|
||||
studiesArgs: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /load(\s+|$)/ui, token: "variable-2", next: "studiesLoad" },
|
||||
{ regex: /respec/ui, token: "variable-2", next: "commandDone" },
|
||||
{ regex: /purchase/ui, token: "variable-2", next: "studiesList" },
|
||||
{ regex: /nowait(\s+|$)/ui, token: "property" },
|
||||
],
|
||||
studiesList: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /(antimatter|infinity|time)(?=[\s,|]|$)/ui, token: "number" },
|
||||
{ regex: /(active|passive|idle)(?=[\s,|]|$)/ui, token: "number" },
|
||||
{ regex: /(light|dark)(?=[\s,|]|$)/ui, token: "number" },
|
||||
{ regex: /([1-9][0-9]+)(?=[\s,|-]|$)/ui, token: "number" },
|
||||
{ regex: /[a-zA-Z_][a-zA-Z_0-9]*/u, token: "variable", next: "commandDone" },
|
||||
{ regex: /([1-9]|1[0-2])$/ui, token: "number" },
|
||||
],
|
||||
studiesLoad: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /id(\s+|$)/ui, token: "variable-2", next: "studiesLoadId" },
|
||||
{ regex: /name(\s+|$)/ui, token: "variable-2", next: "studiesLoadPreset" },
|
||||
{ regex: /\S+/ui, token: "error" },
|
||||
],
|
||||
studiesLoadId: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /\d/ui, token: "qualifier", next: "commandDone" },
|
||||
],
|
||||
studiesLoadPreset: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /(\/(?!\/)|[^\s#/])+/ui, token: "qualifier", next: "commandDone" },
|
||||
],
|
||||
prestige: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /nowait(\s|$)/ui, token: "property" },
|
||||
{ regex: /respec/ui, token: "variable-2" },
|
||||
],
|
||||
commandDone: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
// This seems necessary to have a closing curly brace de-indent automatically in some cases
|
||||
{ regex: /\}/ui, dedent: true },
|
||||
{ regex: /\S+/ui, token: "error" },
|
||||
],
|
||||
startUnlock: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{
|
||||
regex: /ec\s?(1[0-2]|[1-9])|dilation/ui,
|
||||
token: "variable-2",
|
||||
next: "commandDone",
|
||||
},
|
||||
{ regex: /nowait(\s|$)/ui, token: "property" },
|
||||
],
|
||||
commandArgs: [
|
||||
commentRule,
|
||||
{ sol: true, next: "start" },
|
||||
{ regex: /<=|>=|<|>/ui, token: "operator" },
|
||||
{ regex: /nowait(\s|$)/ui, token: "property" },
|
||||
{ regex: /".*"/ui, token: "string", next: "commandDone" },
|
||||
{ regex: /'.*'/ui, token: "string", next: "commandDone" },
|
||||
{ regex: /(on|off|bh1|bh2|dilation|load|respec)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /(eternity|reality|use)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /(antimatter|infinity|time)(\s|$|(?=,))/ui, token: "variable-2" },
|
||||
{ regex: /(active|passive|idle)(\s|$|(?=,))/ui, token: "variable-2" },
|
||||
{ regex: /(light|dark)(\s|$|(?=,))/ui, token: "variable-2" },
|
||||
{ regex: /x[\t ]+highest(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /pending[\t ]+(completions|ip|ep|tp|rm|glyph[\t ]+level)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /total[\t ]+(completions|tt)(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /filter[ \t]+score/ui, token: "variable-2" },
|
||||
{ regex: /ec(1[0-2]|[1-9])[\t ]+completions(\s|$)/ui, token: "variable-2" },
|
||||
{ regex: /(am|ip|ep|all)(\s|$)/ui, token: "variable-2" },
|
||||
{
|
||||
regex: /(rm|rg|dt|tp|tt|(banked )?infinities|eternities|realities|rep(licanti)?)(\s|$)/ui,
|
||||
token: "variable-2",
|
||||
},
|
||||
{ regex: / sec(onds ?) ?| min(utes ?) ?| hours ?/ui, token: "variable-2" },
|
||||
{ regex: /([0-9]+:[0-5][0-9]:[0-5][0-9]|[0-5]?[0-9]:[0-5][0-9]|t[1-4])/ui, token: "number" },
|
||||
{ regex: /-?(0|[1-9]\d*)(\.\d+)?([eE][+-]?\d+)?/ui, token: "number" },
|
||||
{ regex: /[a-zA-Z_][a-zA-Z_0-9]*/u, token: "variable" },
|
||||
{ regex: /\{/ui, indent: true, next: "commandDone" },
|
||||
// This seems necessary to have a closing curly brace de-indent automatically in some cases
|
||||
{ regex: /\}/ui, dedent: true },
|
||||
],
|
||||
|
||||
// The meta property contains global information about the mode. It
|
||||
// can contain properties like lineComment, which are supported by
|
||||
// all modes, and also directives like dontIndentStates, which are
|
||||
// specific to simple modes.
|
||||
meta: {
|
||||
lineComment: "//",
|
||||
electricChars: "}",
|
||||
}
|
||||
});
|
||||
|
||||
}());
|
||||
// The meta property contains global information about the mode. It
|
||||
// can contain properties like lineComment, which are supported by
|
||||
// all modes, and also directives like dontIndentStates, which are
|
||||
// specific to simple modes.
|
||||
meta: {
|
||||
lineComment: "//",
|
||||
electricChars: "}",
|
||||
}
|
||||
});
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -5,418 +5,412 @@ import { createToken, Lexer } from "chevrotain";
|
||||
|
||||
import { DC } from "../constants";
|
||||
|
||||
export const AutomatorLexer = (() => {
|
||||
const createCategory = name => createToken({ name, pattern: Lexer.NA, longer_alt: Identifier });
|
||||
const createCategory = name => createToken({ name, pattern: Lexer.NA, longer_alt: Identifier });
|
||||
|
||||
// Shorthand for creating tokens and adding them to a list
|
||||
const tokenLists = {};
|
||||
// eslint-disable-next-line max-params
|
||||
const createInCategory = (category, name, pattern, props = {}) => {
|
||||
const categories = [category];
|
||||
if (props.extraCategories) categories.push(...props.extraCategories);
|
||||
const token = createToken({
|
||||
name,
|
||||
pattern,
|
||||
categories,
|
||||
longer_alt: Identifier,
|
||||
});
|
||||
const categoryName = Array.isArray(category) ? category[0].name : category.name;
|
||||
if (tokenLists[categoryName] === undefined) tokenLists[categoryName] = [];
|
||||
tokenLists[categoryName].push(token);
|
||||
const patternWord = pattern.toString().match(/^\/([a-zA-Z0-9]*)\/[a-zA-Z]*$/ui);
|
||||
if (patternWord && patternWord[1]) token.$autocomplete = patternWord[1];
|
||||
Object.assign(token, props);
|
||||
return token;
|
||||
};
|
||||
|
||||
const HSpace = createToken({
|
||||
name: "HSpace",
|
||||
pattern: /[ \t]+/,
|
||||
group: Lexer.SKIPPED
|
||||
});
|
||||
|
||||
const EOL = createToken({
|
||||
name: "EOL",
|
||||
line_breaks: true,
|
||||
pattern: /[ \t\r]*\n\s*/,
|
||||
label: "End of line",
|
||||
});
|
||||
|
||||
const StringLiteral = createToken({
|
||||
name: "StringLiteral",
|
||||
pattern: /".*"/,
|
||||
});
|
||||
|
||||
const StringLiteralSingleQuote = createToken({
|
||||
name: "StringLiteralSingleQuote",
|
||||
pattern: /'.*'/,
|
||||
});
|
||||
|
||||
const Comment = createToken({
|
||||
name: "Comment",
|
||||
pattern: /(#|\/\/)[^\n]*/,
|
||||
});
|
||||
|
||||
const NumberLiteral = createToken({
|
||||
name: "NumberLiteral",
|
||||
pattern: /-?(0|[1-9]\d*)(\.\d+)?([eE][+-]?\d+)?/,
|
||||
});
|
||||
|
||||
const BlackHoleStr = createToken({
|
||||
name: "BlackHoleStr",
|
||||
pattern: /[Bb][Hh][12]/,
|
||||
});
|
||||
|
||||
const Identifier = createToken({
|
||||
name: "Identifier",
|
||||
pattern: /[a-zA-Z_][a-zA-Z_0-9]*/,
|
||||
});
|
||||
|
||||
const ComparisonOperator = createToken({
|
||||
name: "ComparisonOperator",
|
||||
pattern: Lexer.NA,
|
||||
});
|
||||
|
||||
const AutomatorCurrency = createCategory("AutomatorCurrency");
|
||||
const PrestigeEvent = createCategory("PrestigeEvent");
|
||||
const StudyPath = createCategory("StudyPath");
|
||||
const TimeUnit = createCategory("TimeUnit");
|
||||
|
||||
createInCategory(ComparisonOperator, "OpGTE", />=/, {
|
||||
$autocomplete: ">=",
|
||||
$compare: (a, b) => Decimal.gte(a, b),
|
||||
});
|
||||
createInCategory(ComparisonOperator, "OpLTE", /<=/, {
|
||||
$autocomplete: "<=",
|
||||
$compare: (a, b) => Decimal.lte(a, b),
|
||||
});
|
||||
createInCategory(ComparisonOperator, "OpGT", />/, {
|
||||
$autocomplete: ">",
|
||||
$compare: (a, b) => Decimal.gt(a, b),
|
||||
});
|
||||
createInCategory(ComparisonOperator, "OpLT", /</, {
|
||||
$autocomplete: "<",
|
||||
$compare: (a, b) => Decimal.lt(a, b),
|
||||
});
|
||||
const OpEQ = createInCategory(ComparisonOperator, "OpEQ", /==/, {
|
||||
$compare: (a, b) => Decimal.eq(a, b),
|
||||
});
|
||||
// EqualSign is a single = which is defined for both comparisons and define
|
||||
const EqualSign = createToken({
|
||||
name: "EqualSign",
|
||||
pattern: /=/,
|
||||
categories: ComparisonOperator,
|
||||
label: "=",
|
||||
longer_alt: OpEQ,
|
||||
});
|
||||
EqualSign.$compare = (a, b) => Decimal.eq(a, b);
|
||||
|
||||
createInCategory(AutomatorCurrency, "EP", /ep/i, { $getter: () => Currency.eternityPoints.value });
|
||||
createInCategory(AutomatorCurrency, "IP", /ip/i, { $getter: () => Currency.infinityPoints.value });
|
||||
createInCategory(AutomatorCurrency, "AM", /am/i, { $getter: () => Currency.antimatter.value });
|
||||
createInCategory(AutomatorCurrency, "DT", /dt/i, { $getter: () => Currency.dilatedTime.value });
|
||||
createInCategory(AutomatorCurrency, "TP", /tp/i, { $getter: () => Currency.tachyonParticles.value });
|
||||
createInCategory(AutomatorCurrency, "RG", /rg/i, { $getter: () => new Decimal(Replicanti.galaxies.total) });
|
||||
createInCategory(AutomatorCurrency, "RM", /rm/i, { $getter: () => Currency.realityMachines.value });
|
||||
|
||||
createInCategory(AutomatorCurrency, "infinities", /infinities/i, { $getter: () => Currency.infinities.value });
|
||||
createInCategory(AutomatorCurrency, "bankedInfinities", /banked[ \t]+infinities/i, {
|
||||
$autocomplete: "banked infinities",
|
||||
$getter: () => Currency.infinitiesBanked.value
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "eternities", /eternities/i, { $getter: () => Currency.eternities.value });
|
||||
createInCategory(AutomatorCurrency, "realities", /realities/i, { $getter: () => Currency.realities.value });
|
||||
|
||||
createInCategory(AutomatorCurrency, "PendingIP", /pending[ \t]+ip/i, {
|
||||
$autocomplete: "pending IP",
|
||||
$getter: () => (Player.canCrunch ? gainedInfinityPoints() : DC.D0)
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingEP", /pending[ \t]+ep/i, {
|
||||
$autocomplete: "pending EP",
|
||||
$getter: () => (Player.canEternity ? gainedEternityPoints() : DC.D0)
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingTP", /pending[ \t]+tp/i, {
|
||||
$autocomplete: "pending TP",
|
||||
$getter: () => (player.dilation.active ? getTachyonGain() : DC.D0),
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingRM", /pending[ \t]+rm/i, {
|
||||
$autocomplete: "pending RM",
|
||||
$getter: () => (isRealityAvailable() ? MachineHandler.gainedRealityMachines : DC.D0)
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingGlyphLevel", /pending[ \t]+glyph[ \t]+level/i, {
|
||||
$autocomplete: "pending Glyph level",
|
||||
$getter: () => new Decimal(isRealityAvailable() ? gainedGlyphLevel().actualLevel : 0),
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "Rep", /rep(licanti)?/i, {
|
||||
$autocomplete: "rep",
|
||||
$getter: () => Replicanti.amount,
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "TT", /(tt|time theorems?)/i, {
|
||||
$autocomplete: "TT",
|
||||
$getter: () => Currency.timeTheorems.value,
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "TotalTT", /total[ \t]+tt/i, {
|
||||
$autocomplete: "total TT",
|
||||
$getter: () => player.timestudy.theorem.plus(TimeTheorems.calculateTimeStudiesCost()),
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "TotalCompletions", /total[ \t]+completions/i, {
|
||||
$autocomplete: "total completions",
|
||||
$getter: () => EternityChallenges.completions,
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "PendingCompletions", /pending[ \t]+completions/i, {
|
||||
$autocomplete: "pending completions",
|
||||
$getter: () => {
|
||||
// If we are not in an EC, pretend like we have a ton of completions so any check for sufficient
|
||||
// completions returns true
|
||||
if (!EternityChallenge.isRunning) return Decimal.NUMBER_MAX_VALUE;
|
||||
return EternityChallenge.current.gainedCompletionStatus.totalCompletions;
|
||||
}
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "FilterScore", /filter[ \t]+score/i, {
|
||||
$autocomplete: "filter score",
|
||||
$getter: () => {
|
||||
// If the filter isn't unlocked somehow, return the most negative number in order to ensure it's nonblocking
|
||||
if (!EffarigUnlock.glyphFilter.isUnlocked) return -Number.MAX_VALUE;
|
||||
const choices = GlyphSelection.glyphList(GlyphSelection.choiceCount, gainedGlyphLevel(),
|
||||
{ isChoosingGlyph: false });
|
||||
const bestGlyph = AutoGlyphProcessor.pick(choices);
|
||||
return AutoGlyphProcessor.filterValue(bestGlyph);
|
||||
},
|
||||
$unlocked: () => EffarigUnlock.glyphFilter.isUnlocked,
|
||||
});
|
||||
|
||||
for (let i = 1; i <= 12; ++i) {
|
||||
const id = i;
|
||||
createInCategory(AutomatorCurrency, `EC${i}`, new RegExp(`ec${i} completions`, "i"), {
|
||||
$autocomplete: `ec${i} completions`,
|
||||
// eslint-disable-next-line no-loop-func
|
||||
$getter: () => EternityChallenge(id).completions
|
||||
});
|
||||
}
|
||||
|
||||
// $prestigeLevel is used by things that wait for a prestige event. Something waiting for
|
||||
// eternity will be triggered by something waiting for reality, for example.
|
||||
createInCategory(PrestigeEvent, "Infinity", /infinity/i, {
|
||||
extraCategories: [StudyPath],
|
||||
$autobuyer: () => Autobuyer.bigCrunch,
|
||||
$autobuyerDurationMode: AUTO_CRUNCH_MODE.TIME,
|
||||
$autobuyerXHighestMode: AUTO_CRUNCH_MODE.X_HIGHEST,
|
||||
$autobuyerCurrencyMode: AUTO_CRUNCH_MODE.AMOUNT,
|
||||
$prestigeAvailable: () => Player.canCrunch,
|
||||
$prestige: () => bigCrunchResetRequest(true),
|
||||
$prestigeLevel: 1,
|
||||
$prestigeCurrency: "IP",
|
||||
$studyPath: TIME_STUDY_PATH.INFINITY_DIM,
|
||||
});
|
||||
createInCategory(PrestigeEvent, "Eternity", /eternity/i, {
|
||||
$autobuyer: () => Autobuyer.eternity,
|
||||
$autobuyerDurationMode: AUTO_ETERNITY_MODE.TIME,
|
||||
$autobuyerXHighestMode: AUTO_ETERNITY_MODE.X_HIGHEST,
|
||||
$autobuyerCurrencyMode: AUTO_ETERNITY_MODE.AMOUNT,
|
||||
$prestigeAvailable: () => Player.canEternity,
|
||||
$prestigeLevel: 2,
|
||||
$prestigeCurrency: "EP",
|
||||
$prestige: () => eternity(false, true),
|
||||
$respec: () => {
|
||||
player.respec = true;
|
||||
},
|
||||
});
|
||||
createInCategory(PrestigeEvent, "Reality", /reality/i, {
|
||||
$autobuyer: () => Autobuyer.reality,
|
||||
$autobuyerCurrencyMode: AUTO_REALITY_MODE.RM,
|
||||
$prestigeAvailable: () => isRealityAvailable(),
|
||||
$prestigeLevel: 3,
|
||||
$prestigeCurrency: "RM",
|
||||
$prestige: () => autoReality(),
|
||||
$respec: () => {
|
||||
player.reality.respec = true;
|
||||
},
|
||||
});
|
||||
|
||||
createInCategory(StudyPath, "Idle", /idle/i, { $studyPath: TIME_STUDY_PATH.IDLE });
|
||||
createInCategory(StudyPath, "Passive", /passive/i, { $studyPath: TIME_STUDY_PATH.PASSIVE });
|
||||
createInCategory(StudyPath, "Active", /active/i, { $studyPath: TIME_STUDY_PATH.ACTIVE });
|
||||
createInCategory(StudyPath, "Antimatter", /antimatter/i, { $studyPath: TIME_STUDY_PATH.ANTIMATTER_DIM });
|
||||
createInCategory(StudyPath, "Time", /time/i, { $studyPath: TIME_STUDY_PATH.TIME_DIM });
|
||||
createInCategory(StudyPath, "Light", /light/i, { $studyPath: TIME_STUDY_PATH.LIGHT });
|
||||
createInCategory(StudyPath, "Dark", /dark/i, { $studyPath: TIME_STUDY_PATH.DARK });
|
||||
|
||||
createInCategory(TimeUnit, "Milliseconds", /ms/i, {
|
||||
$autocomplete: "ms",
|
||||
$scale: 1,
|
||||
});
|
||||
createInCategory(TimeUnit, "Seconds", /s(ec(onds?)?)?/i, {
|
||||
$autocomplete: "sec",
|
||||
$scale: 1000,
|
||||
});
|
||||
createInCategory(TimeUnit, "Minutes", /m(in(utes?)?)?/i, {
|
||||
$autocomplete: "min",
|
||||
$scale: 60 * 1000,
|
||||
});
|
||||
createInCategory(TimeUnit, "Hours", /h(ours?)?/i, {
|
||||
$autocomplete: "hours",
|
||||
$scale: 3600 * 1000,
|
||||
});
|
||||
|
||||
const Keyword = createToken({
|
||||
name: "Keyword",
|
||||
pattern: Lexer.NA,
|
||||
// Shorthand for creating tokens and adding them to a list
|
||||
const tokenLists = {};
|
||||
// eslint-disable-next-line max-params
|
||||
const createInCategory = (category, name, pattern, props = {}) => {
|
||||
const categories = [category];
|
||||
if (props.extraCategories) categories.push(...props.extraCategories);
|
||||
const token = createToken({
|
||||
name,
|
||||
pattern,
|
||||
categories,
|
||||
longer_alt: Identifier,
|
||||
});
|
||||
const categoryName = Array.isArray(category) ? category[0].name : category.name;
|
||||
if (tokenLists[categoryName] === undefined) tokenLists[categoryName] = [];
|
||||
tokenLists[categoryName].push(token);
|
||||
const patternWord = pattern.toString().match(/^\/([a-zA-Z0-9]*)\/[a-zA-Z]*$/ui);
|
||||
if (patternWord && patternWord[1]) token.$autocomplete = patternWord[1];
|
||||
Object.assign(token, props);
|
||||
return token;
|
||||
};
|
||||
|
||||
const keywordTokens = [];
|
||||
const createKeyword = (name, pattern, props = {}) => {
|
||||
const categories = [Keyword];
|
||||
if (props.extraCategories) categories.push(...props.extraCategories);
|
||||
const token = createToken({
|
||||
name,
|
||||
pattern,
|
||||
categories,
|
||||
longer_alt: Identifier,
|
||||
});
|
||||
token.$autocomplete = name.toLocaleLowerCase();
|
||||
keywordTokens.push(token);
|
||||
Object.assign(token, props);
|
||||
return token;
|
||||
};
|
||||
const HSpace = createToken({
|
||||
name: "HSpace",
|
||||
pattern: /[ \t]+/,
|
||||
group: Lexer.SKIPPED
|
||||
});
|
||||
|
||||
createKeyword("Auto", /auto/i);
|
||||
createKeyword("Buy", /buy/i);
|
||||
// Necessary to hide it from Codemirror's tab auto-completion
|
||||
createKeyword("Blob", /blob\s\s/i, {
|
||||
$unlocked: () => false,
|
||||
});
|
||||
createKeyword("If", /if/i);
|
||||
createKeyword("Load", /load/i);
|
||||
createKeyword("Notify", /notify/i);
|
||||
createKeyword("Nowait", /nowait/i);
|
||||
createKeyword("Off", /off/i);
|
||||
createKeyword("On", /on/i);
|
||||
createKeyword("Pause", /pause/i);
|
||||
// Names are a little special, because they can be named anything (like ec12 or wait)
|
||||
// So, we consume the label at the same time as we consume the preset. In order to report
|
||||
// errors, we also match just the word name. And, we have to not match comments.
|
||||
createKeyword("Name", /name([ \t]+(\/(?!\/)|[^\n#/])*)?/i);
|
||||
createKeyword("Id", /id\b([ \t]+\d)?/i);
|
||||
createKeyword("Purchase", /purchase/i);
|
||||
createKeyword("Respec", /respec/i);
|
||||
createKeyword("Restart", /restart/i);
|
||||
createKeyword("Start", /start/i);
|
||||
createKeyword("Stop", /stop/i);
|
||||
createKeyword("Studies", /studies/i);
|
||||
createKeyword("Unlock", /unlock/i);
|
||||
createKeyword("Until", /until/i);
|
||||
createKeyword("Use", /use/i);
|
||||
createKeyword("Wait", /wait/i);
|
||||
createKeyword("While", /while/i);
|
||||
createKeyword("BlackHole", /black[ \t]+hole/i, {
|
||||
$autocomplete: "black hole",
|
||||
$unlocked: () => BlackHole(1).isUnlocked,
|
||||
});
|
||||
createKeyword("StoreGameTime", /stored?[ \t]+game[ \t]+time/i, {
|
||||
$autocomplete: "store game time",
|
||||
$unlocked: () => Enslaved.isUnlocked,
|
||||
});
|
||||
const EOL = createToken({
|
||||
name: "EOL",
|
||||
line_breaks: true,
|
||||
pattern: /[ \t\r]*\n\s*/,
|
||||
label: "End of line",
|
||||
});
|
||||
|
||||
createKeyword("Dilation", /dilation/i);
|
||||
createKeyword("EC", /ec/i);
|
||||
createKeyword("XHighest", /x[ \t]+highest/i, {
|
||||
$autocomplete: "x highest",
|
||||
});
|
||||
const StringLiteral = createToken({
|
||||
name: "StringLiteral",
|
||||
pattern: /".*"/,
|
||||
});
|
||||
|
||||
// We allow ECLiteral to consume lots of digits because that makes error reporting more
|
||||
// clear (it's nice to say ec123 is an invalid ec)
|
||||
const ECLiteral = createToken({
|
||||
name: "ECLiteral",
|
||||
pattern: /ec[1-9][0-9]*/i,
|
||||
const StringLiteralSingleQuote = createToken({
|
||||
name: "StringLiteralSingleQuote",
|
||||
pattern: /'.*'/,
|
||||
});
|
||||
|
||||
const Comment = createToken({
|
||||
name: "Comment",
|
||||
pattern: /(#|\/\/)[^\n]*/,
|
||||
});
|
||||
|
||||
const NumberLiteral = createToken({
|
||||
name: "NumberLiteral",
|
||||
pattern: /-?(0|[1-9]\d*)(\.\d+)?([eE][+-]?\d+)?/,
|
||||
});
|
||||
|
||||
const BlackHoleStr = createToken({
|
||||
name: "BlackHoleStr",
|
||||
pattern: /[Bb][Hh][12]/,
|
||||
});
|
||||
|
||||
const Identifier = createToken({
|
||||
name: "Identifier",
|
||||
pattern: /[a-zA-Z_][a-zA-Z_0-9]*/,
|
||||
});
|
||||
|
||||
const ComparisonOperator = createToken({
|
||||
name: "ComparisonOperator",
|
||||
pattern: Lexer.NA,
|
||||
});
|
||||
|
||||
const AutomatorCurrency = createCategory("AutomatorCurrency");
|
||||
const PrestigeEvent = createCategory("PrestigeEvent");
|
||||
const StudyPath = createCategory("StudyPath");
|
||||
const TimeUnit = createCategory("TimeUnit");
|
||||
|
||||
createInCategory(ComparisonOperator, "OpGTE", />=/, {
|
||||
$autocomplete: ">=",
|
||||
$compare: (a, b) => Decimal.gte(a, b),
|
||||
});
|
||||
createInCategory(ComparisonOperator, "OpLTE", /<=/, {
|
||||
$autocomplete: "<=",
|
||||
$compare: (a, b) => Decimal.lte(a, b),
|
||||
});
|
||||
createInCategory(ComparisonOperator, "OpGT", />/, {
|
||||
$autocomplete: ">",
|
||||
$compare: (a, b) => Decimal.gt(a, b),
|
||||
});
|
||||
createInCategory(ComparisonOperator, "OpLT", /</, {
|
||||
$autocomplete: "<",
|
||||
$compare: (a, b) => Decimal.lt(a, b),
|
||||
});
|
||||
const OpEQ = createInCategory(ComparisonOperator, "OpEQ", /==/, {
|
||||
$compare: (a, b) => Decimal.eq(a, b),
|
||||
});
|
||||
// EqualSign is a single = which is defined for both comparisons and define
|
||||
const EqualSign = createToken({
|
||||
name: "EqualSign",
|
||||
pattern: /=/,
|
||||
categories: ComparisonOperator,
|
||||
label: "=",
|
||||
longer_alt: OpEQ,
|
||||
});
|
||||
EqualSign.$compare = (a, b) => Decimal.eq(a, b);
|
||||
|
||||
createInCategory(AutomatorCurrency, "EP", /ep/i, { $getter: () => Currency.eternityPoints.value });
|
||||
createInCategory(AutomatorCurrency, "IP", /ip/i, { $getter: () => Currency.infinityPoints.value });
|
||||
createInCategory(AutomatorCurrency, "AM", /am/i, { $getter: () => Currency.antimatter.value });
|
||||
createInCategory(AutomatorCurrency, "DT", /dt/i, { $getter: () => Currency.dilatedTime.value });
|
||||
createInCategory(AutomatorCurrency, "TP", /tp/i, { $getter: () => Currency.tachyonParticles.value });
|
||||
createInCategory(AutomatorCurrency, "RG", /rg/i, { $getter: () => new Decimal(Replicanti.galaxies.total) });
|
||||
createInCategory(AutomatorCurrency, "RM", /rm/i, { $getter: () => Currency.realityMachines.value });
|
||||
|
||||
createInCategory(AutomatorCurrency, "infinities", /infinities/i, { $getter: () => Currency.infinities.value });
|
||||
createInCategory(AutomatorCurrency, "bankedInfinities", /banked[ \t]+infinities/i, {
|
||||
$autocomplete: "banked infinities",
|
||||
$getter: () => Currency.infinitiesBanked.value
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "eternities", /eternities/i, { $getter: () => Currency.eternities.value });
|
||||
createInCategory(AutomatorCurrency, "realities", /realities/i, { $getter: () => Currency.realities.value });
|
||||
|
||||
createInCategory(AutomatorCurrency, "PendingIP", /pending[ \t]+ip/i, {
|
||||
$autocomplete: "pending IP",
|
||||
$getter: () => (Player.canCrunch ? gainedInfinityPoints() : DC.D0)
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingEP", /pending[ \t]+ep/i, {
|
||||
$autocomplete: "pending EP",
|
||||
$getter: () => (Player.canEternity ? gainedEternityPoints() : DC.D0)
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingTP", /pending[ \t]+tp/i, {
|
||||
$autocomplete: "pending TP",
|
||||
$getter: () => (player.dilation.active ? getTachyonGain() : DC.D0),
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingRM", /pending[ \t]+rm/i, {
|
||||
$autocomplete: "pending RM",
|
||||
$getter: () => (isRealityAvailable() ? MachineHandler.gainedRealityMachines : DC.D0)
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "PendingGlyphLevel", /pending[ \t]+glyph[ \t]+level/i, {
|
||||
$autocomplete: "pending Glyph level",
|
||||
$getter: () => new Decimal(isRealityAvailable() ? gainedGlyphLevel().actualLevel : 0),
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "Rep", /rep(licanti)?/i, {
|
||||
$autocomplete: "rep",
|
||||
$getter: () => Replicanti.amount,
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "TT", /(tt|time theorems?)/i, {
|
||||
$autocomplete: "TT",
|
||||
$getter: () => Currency.timeTheorems.value,
|
||||
});
|
||||
createInCategory(AutomatorCurrency, "TotalTT", /total[ \t]+tt/i, {
|
||||
$autocomplete: "total TT",
|
||||
$getter: () => player.timestudy.theorem.plus(TimeTheorems.calculateTimeStudiesCost()),
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "TotalCompletions", /total[ \t]+completions/i, {
|
||||
$autocomplete: "total completions",
|
||||
$getter: () => EternityChallenges.completions,
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "PendingCompletions", /pending[ \t]+completions/i, {
|
||||
$autocomplete: "pending completions",
|
||||
$getter: () => {
|
||||
// If we are not in an EC, pretend like we have a ton of completions so any check for sufficient
|
||||
// completions returns true
|
||||
if (!EternityChallenge.isRunning) return Decimal.NUMBER_MAX_VALUE;
|
||||
return EternityChallenge.current.gainedCompletionStatus.totalCompletions;
|
||||
}
|
||||
});
|
||||
|
||||
createInCategory(AutomatorCurrency, "FilterScore", /filter[ \t]+score/i, {
|
||||
$autocomplete: "filter score",
|
||||
$getter: () => {
|
||||
// If the filter isn't unlocked somehow, return the most negative number in order to ensure it's nonblocking
|
||||
if (!EffarigUnlock.glyphFilter.isUnlocked) return -Number.MAX_VALUE;
|
||||
const choices = GlyphSelection.glyphList(GlyphSelection.choiceCount, gainedGlyphLevel(),
|
||||
{ isChoosingGlyph: false });
|
||||
const bestGlyph = AutoGlyphProcessor.pick(choices);
|
||||
return AutoGlyphProcessor.filterValue(bestGlyph);
|
||||
},
|
||||
$unlocked: () => EffarigUnlock.glyphFilter.isUnlocked,
|
||||
});
|
||||
|
||||
for (let i = 1; i <= 12; ++i) {
|
||||
const id = i;
|
||||
createInCategory(AutomatorCurrency, `EC${i}`, new RegExp(`ec${i} completions`, "i"), {
|
||||
$autocomplete: `ec${i} completions`,
|
||||
// eslint-disable-next-line no-loop-func
|
||||
$getter: () => EternityChallenge(id).completions
|
||||
});
|
||||
}
|
||||
|
||||
// $prestigeLevel is used by things that wait for a prestige event. Something waiting for
|
||||
// eternity will be triggered by something waiting for reality, for example.
|
||||
createInCategory(PrestigeEvent, "Infinity", /infinity/i, {
|
||||
extraCategories: [StudyPath],
|
||||
$autobuyer: () => Autobuyer.bigCrunch,
|
||||
$autobuyerDurationMode: AUTO_CRUNCH_MODE.TIME,
|
||||
$autobuyerXHighestMode: AUTO_CRUNCH_MODE.X_HIGHEST,
|
||||
$autobuyerCurrencyMode: AUTO_CRUNCH_MODE.AMOUNT,
|
||||
$prestigeAvailable: () => Player.canCrunch,
|
||||
$prestige: () => bigCrunchResetRequest(true),
|
||||
$prestigeLevel: 1,
|
||||
$prestigeCurrency: "IP",
|
||||
$studyPath: TIME_STUDY_PATH.INFINITY_DIM,
|
||||
});
|
||||
createInCategory(PrestigeEvent, "Eternity", /eternity/i, {
|
||||
$autobuyer: () => Autobuyer.eternity,
|
||||
$autobuyerDurationMode: AUTO_ETERNITY_MODE.TIME,
|
||||
$autobuyerXHighestMode: AUTO_ETERNITY_MODE.X_HIGHEST,
|
||||
$autobuyerCurrencyMode: AUTO_ETERNITY_MODE.AMOUNT,
|
||||
$prestigeAvailable: () => Player.canEternity,
|
||||
$prestigeLevel: 2,
|
||||
$prestigeCurrency: "EP",
|
||||
$prestige: () => eternity(false, true),
|
||||
$respec: () => {
|
||||
player.respec = true;
|
||||
},
|
||||
});
|
||||
createInCategory(PrestigeEvent, "Reality", /reality/i, {
|
||||
$autobuyer: () => Autobuyer.reality,
|
||||
$autobuyerCurrencyMode: AUTO_REALITY_MODE.RM,
|
||||
$prestigeAvailable: () => isRealityAvailable(),
|
||||
$prestigeLevel: 3,
|
||||
$prestigeCurrency: "RM",
|
||||
$prestige: () => autoReality(),
|
||||
$respec: () => {
|
||||
player.reality.respec = true;
|
||||
},
|
||||
});
|
||||
|
||||
createInCategory(StudyPath, "Idle", /idle/i, { $studyPath: TIME_STUDY_PATH.IDLE });
|
||||
createInCategory(StudyPath, "Passive", /passive/i, { $studyPath: TIME_STUDY_PATH.PASSIVE });
|
||||
createInCategory(StudyPath, "Active", /active/i, { $studyPath: TIME_STUDY_PATH.ACTIVE });
|
||||
createInCategory(StudyPath, "Antimatter", /antimatter/i, { $studyPath: TIME_STUDY_PATH.ANTIMATTER_DIM });
|
||||
createInCategory(StudyPath, "Time", /time/i, { $studyPath: TIME_STUDY_PATH.TIME_DIM });
|
||||
createInCategory(StudyPath, "Light", /light/i, { $studyPath: TIME_STUDY_PATH.LIGHT });
|
||||
createInCategory(StudyPath, "Dark", /dark/i, { $studyPath: TIME_STUDY_PATH.DARK });
|
||||
|
||||
createInCategory(TimeUnit, "Milliseconds", /ms/i, {
|
||||
$autocomplete: "ms",
|
||||
$scale: 1,
|
||||
});
|
||||
createInCategory(TimeUnit, "Seconds", /s(ec(onds?)?)?/i, {
|
||||
$autocomplete: "sec",
|
||||
$scale: 1000,
|
||||
});
|
||||
createInCategory(TimeUnit, "Minutes", /m(in(utes?)?)?/i, {
|
||||
$autocomplete: "min",
|
||||
$scale: 60 * 1000,
|
||||
});
|
||||
createInCategory(TimeUnit, "Hours", /h(ours?)?/i, {
|
||||
$autocomplete: "hours",
|
||||
$scale: 3600 * 1000,
|
||||
});
|
||||
|
||||
const Keyword = createToken({
|
||||
name: "Keyword",
|
||||
pattern: Lexer.NA,
|
||||
longer_alt: Identifier,
|
||||
});
|
||||
|
||||
const keywordTokens = [];
|
||||
const createKeyword = (name, pattern, props = {}) => {
|
||||
const categories = [Keyword];
|
||||
if (props.extraCategories) categories.push(...props.extraCategories);
|
||||
const token = createToken({
|
||||
name,
|
||||
pattern,
|
||||
categories,
|
||||
longer_alt: Identifier,
|
||||
});
|
||||
token.$autocomplete = name.toLocaleLowerCase();
|
||||
keywordTokens.push(token);
|
||||
Object.assign(token, props);
|
||||
return token;
|
||||
};
|
||||
|
||||
const LCurly = createToken({ name: "LCurly", pattern: /[ \t]*{/ });
|
||||
const RCurly = createToken({ name: "RCurly", pattern: /[ \t]*}/ });
|
||||
const Comma = createToken({ name: "Comma", pattern: /,/ });
|
||||
const Pipe = createToken({ name: "Pipe", pattern: /\|/, label: "|" });
|
||||
const Dash = createToken({ name: "Dash", pattern: /-/, label: "-" });
|
||||
createKeyword("Auto", /auto/i);
|
||||
createKeyword("Buy", /buy/i);
|
||||
// Necessary to hide it from Codemirror's tab auto-completion
|
||||
createKeyword("Blob", /blob\s\s/i, {
|
||||
$unlocked: () => false,
|
||||
});
|
||||
createKeyword("If", /if/i);
|
||||
createKeyword("Load", /load/i);
|
||||
createKeyword("Notify", /notify/i);
|
||||
createKeyword("Nowait", /nowait/i);
|
||||
createKeyword("Off", /off/i);
|
||||
createKeyword("On", /on/i);
|
||||
createKeyword("Pause", /pause/i);
|
||||
// Names are a little special, because they can be named anything (like ec12 or wait)
|
||||
// So, we consume the label at the same time as we consume the preset. In order to report
|
||||
// errors, we also match just the word name. And, we have to not match comments.
|
||||
createKeyword("Name", /name([ \t]+(\/(?!\/)|[^\n#/])*)?/i);
|
||||
createKeyword("Id", /id\b([ \t]+\d)?/i);
|
||||
createKeyword("Purchase", /purchase/i);
|
||||
createKeyword("Respec", /respec/i);
|
||||
createKeyword("Restart", /restart/i);
|
||||
createKeyword("Start", /start/i);
|
||||
createKeyword("Stop", /stop/i);
|
||||
createKeyword("Studies", /studies/i);
|
||||
createKeyword("Unlock", /unlock/i);
|
||||
createKeyword("Until", /until/i);
|
||||
createKeyword("Use", /use/i);
|
||||
createKeyword("Wait", /wait/i);
|
||||
createKeyword("While", /while/i);
|
||||
createKeyword("BlackHole", /black[ \t]+hole/i, {
|
||||
$autocomplete: "black hole",
|
||||
$unlocked: () => BlackHole(1).isUnlocked,
|
||||
});
|
||||
createKeyword("StoreGameTime", /stored?[ \t]+game[ \t]+time/i, {
|
||||
$autocomplete: "store game time",
|
||||
$unlocked: () => Enslaved.isUnlocked,
|
||||
});
|
||||
|
||||
// The order here is the order the lexer looks for tokens in.
|
||||
const automatorTokens = [
|
||||
HSpace, StringLiteral, StringLiteralSingleQuote, Comment, EOL,
|
||||
ComparisonOperator, ...tokenLists.ComparisonOperator,
|
||||
LCurly, RCurly, Comma, EqualSign, Pipe, Dash,
|
||||
BlackHoleStr, NumberLiteral,
|
||||
AutomatorCurrency, ...tokenLists.AutomatorCurrency,
|
||||
ECLiteral,
|
||||
Keyword, ...keywordTokens,
|
||||
PrestigeEvent, ...tokenLists.PrestigeEvent,
|
||||
StudyPath, ...tokenLists.StudyPath,
|
||||
TimeUnit, ...tokenLists.TimeUnit,
|
||||
Identifier,
|
||||
];
|
||||
createKeyword("Dilation", /dilation/i);
|
||||
createKeyword("EC", /ec/i);
|
||||
createKeyword("XHighest", /x[ \t]+highest/i, {
|
||||
$autocomplete: "x highest",
|
||||
});
|
||||
|
||||
// Labels only affect error messages and Diagrams.
|
||||
LCurly.LABEL = "'{'";
|
||||
RCurly.LABEL = "'}'";
|
||||
NumberLiteral.LABEL = "Number";
|
||||
Comma.LABEL = "❟";
|
||||
// We allow ECLiteral to consume lots of digits because that makes error reporting more
|
||||
// clear (it's nice to say ec123 is an invalid ec)
|
||||
const ECLiteral = createToken({
|
||||
name: "ECLiteral",
|
||||
pattern: /ec[1-9][0-9]*/i,
|
||||
longer_alt: Identifier,
|
||||
});
|
||||
|
||||
const lexer = new Lexer(automatorTokens, {
|
||||
positionTracking: "full",
|
||||
ensureOptimizations: true
|
||||
});
|
||||
const LCurly = createToken({ name: "LCurly", pattern: /[ \t]*{/ });
|
||||
const RCurly = createToken({ name: "RCurly", pattern: /[ \t]*}/ });
|
||||
const Comma = createToken({ name: "Comma", pattern: /,/ });
|
||||
const Pipe = createToken({ name: "Pipe", pattern: /\|/, label: "|" });
|
||||
const Dash = createToken({ name: "Dash", pattern: /-/, label: "-" });
|
||||
|
||||
// The lexer uses an ID system that's separate from indices into the token array
|
||||
const tokenIds = [];
|
||||
for (const token of lexer.lexerDefinition) {
|
||||
tokenIds[token.tokenTypeIdx] = token;
|
||||
}
|
||||
// The order here is the order the lexer looks for tokens in.
|
||||
const automatorTokens = [
|
||||
HSpace, StringLiteral, StringLiteralSingleQuote, Comment, EOL,
|
||||
ComparisonOperator, ...tokenLists.ComparisonOperator,
|
||||
LCurly, RCurly, Comma, EqualSign, Pipe, Dash,
|
||||
BlackHoleStr, NumberLiteral,
|
||||
AutomatorCurrency, ...tokenLists.AutomatorCurrency,
|
||||
ECLiteral,
|
||||
Keyword, ...keywordTokens,
|
||||
PrestigeEvent, ...tokenLists.PrestigeEvent,
|
||||
StudyPath, ...tokenLists.StudyPath,
|
||||
TimeUnit, ...tokenLists.TimeUnit,
|
||||
Identifier,
|
||||
];
|
||||
|
||||
// We use this while building up the grammar
|
||||
const tokenMap = automatorTokens.mapToObject(e => e.name, e => e);
|
||||
// Labels only affect error messages and Diagrams.
|
||||
LCurly.LABEL = "'{'";
|
||||
RCurly.LABEL = "'}'";
|
||||
NumberLiteral.LABEL = "Number";
|
||||
Comma.LABEL = "❟";
|
||||
|
||||
const automatorCurrencyNames = tokenLists.AutomatorCurrency.map(i => i.$autocomplete.toUpperCase());
|
||||
const lexer = new Lexer(automatorTokens, {
|
||||
positionTracking: "full",
|
||||
ensureOptimizations: true
|
||||
});
|
||||
|
||||
const standardizeAutomatorValues = function(x) {
|
||||
try {
|
||||
if (automatorCurrencyNames.includes(x.toUpperCase())) return x.toUpperCase();
|
||||
} catch {
|
||||
// This only happens if the input is a number or Decimal, in which case we don't attempt to change any formatting
|
||||
// and simply return
|
||||
return x;
|
||||
}
|
||||
for (const i of tokenLists.AutomatorCurrency) {
|
||||
// Check for a match of the full string.
|
||||
if (x.match(i.PATTERN) && x.match(i.PATTERN)[0].length === x.length) {
|
||||
return i.$autocomplete.toUpperCase();
|
||||
}
|
||||
}
|
||||
// If we get to this point, we haven't matched a currency name and instead assume it's a defined constant and
|
||||
// return it without any format changes since these are case-sensitive
|
||||
// The lexer uses an ID system that's separate from indices into the token array
|
||||
const tokenIds = [];
|
||||
for (const token of lexer.lexerDefinition) {
|
||||
tokenIds[token.tokenTypeIdx] = token;
|
||||
}
|
||||
|
||||
// We use this while building up the grammar
|
||||
const tokenMap = automatorTokens.mapToObject(e => e.name, e => e);
|
||||
|
||||
const automatorCurrencyNames = tokenLists.AutomatorCurrency.map(i => i.$autocomplete.toUpperCase());
|
||||
|
||||
export const standardizeAutomatorValues = function(x) {
|
||||
try {
|
||||
if (automatorCurrencyNames.includes(x.toUpperCase())) return x.toUpperCase();
|
||||
} catch {
|
||||
// This only happens if the input is a number or Decimal, in which case we don't attempt to change any formatting
|
||||
// and simply return
|
||||
return x;
|
||||
};
|
||||
}
|
||||
for (const i of tokenLists.AutomatorCurrency) {
|
||||
// Check for a match of the full string.
|
||||
if (x.match(i.PATTERN) && x.match(i.PATTERN)[0].length === x.length) {
|
||||
return i.$autocomplete.toUpperCase();
|
||||
}
|
||||
}
|
||||
// If we get to this point, we haven't matched a currency name and instead assume it's a defined constant and
|
||||
// return it without any format changes since these are case-sensitive
|
||||
return x;
|
||||
};
|
||||
|
||||
// In order to disallow individual words within command key words/phrases, we need to ignore certain patterns (mostly
|
||||
// ones with special regex characters), split the rest of them up across all spaces and tabs, and then flatten the
|
||||
// final resulting array. Note that this technically duplicates words present in multiple phrases (eg. "pending")
|
||||
const ignoredPatterns = ["Identifier", "LCurly", "RCurly"];
|
||||
const forbiddenConstantPatterns = lexer.lexerDefinition
|
||||
.filter(p => !ignoredPatterns.includes(p.name))
|
||||
.map(p => p.PATTERN.source)
|
||||
.flatMap(p => ((p.includes("(") || p.includes(")")) ? p : p.split("[ \\t]+")));
|
||||
// In order to disallow individual words within command key words/phrases, we need to ignore certain patterns (mostly
|
||||
// ones with special regex characters), split the rest of them up across all spaces and tabs, and then flatten the
|
||||
// final resulting array. Note that this technically duplicates words present in multiple phrases (eg. "pending")
|
||||
const ignoredPatterns = ["Identifier", "LCurly", "RCurly"];
|
||||
export const forbiddenConstantPatterns = lexer.lexerDefinition
|
||||
.filter(p => !ignoredPatterns.includes(p.name))
|
||||
.map(p => p.PATTERN.source)
|
||||
.flatMap(p => ((p.includes("(") || p.includes(")")) ? p : p.split("[ \\t]+")));
|
||||
|
||||
return {
|
||||
lexer,
|
||||
tokens: automatorTokens,
|
||||
tokenIds,
|
||||
tokenMap,
|
||||
standardizeAutomatorValues,
|
||||
forbiddenConstantPatterns,
|
||||
};
|
||||
})();
|
||||
|
||||
export const standardizeAutomatorValues = AutomatorLexer.standardizeAutomatorValues;
|
||||
|
||||
export const forbiddenConstantPatterns = AutomatorLexer.forbiddenConstantPatterns;
|
||||
export const AutomatorLexer = {
|
||||
lexer,
|
||||
tokens: automatorTokens,
|
||||
tokenIds,
|
||||
tokenMap,
|
||||
standardizeAutomatorValues,
|
||||
forbiddenConstantPatterns,
|
||||
};
|
||||
|
@ -3,139 +3,137 @@ import { EOF, Parser } from "chevrotain";
|
||||
import { AutomatorCommands } from "./automator-commands";
|
||||
import { AutomatorLexer } from "./lexer";
|
||||
|
||||
export const AutomatorGrammar = (function() {
|
||||
const T = AutomatorLexer.tokenMap;
|
||||
const T = AutomatorLexer.tokenMap;
|
||||
|
||||
// ----------------- parser -----------------
|
||||
class AutomatorParser extends Parser {
|
||||
constructor() {
|
||||
super(AutomatorLexer.tokens, {
|
||||
recoveryEnabled: true,
|
||||
outputCst: true,
|
||||
nodeLocationTracking: "full",
|
||||
});
|
||||
// ----------------- parser -----------------
|
||||
class AutomatorParser extends Parser {
|
||||
constructor() {
|
||||
super(AutomatorLexer.tokens, {
|
||||
recoveryEnabled: true,
|
||||
outputCst: true,
|
||||
nodeLocationTracking: "full",
|
||||
});
|
||||
|
||||
// eslint-disable-next-line consistent-this
|
||||
const $ = this;
|
||||
// eslint-disable-next-line consistent-this
|
||||
const $ = this;
|
||||
|
||||
$.RULE("script", () => $.SUBRULE($.block));
|
||||
$.RULE("script", () => $.SUBRULE($.block));
|
||||
|
||||
$.RULE("block", () => $.MANY_SEP({
|
||||
SEP: T.EOL,
|
||||
DEF: () => $.OPTION(() => $.SUBRULE($.command)),
|
||||
}));
|
||||
$.RULE("block", () => $.MANY_SEP({
|
||||
SEP: T.EOL,
|
||||
DEF: () => $.OPTION(() => $.SUBRULE($.command)),
|
||||
}));
|
||||
|
||||
// This is a bit ugly looking. Chevrotain uses Function.toString() to do crazy
|
||||
// optimizations. That clashes with our desire to build our list of commands dynamically.
|
||||
// We are creating a function body like this one:
|
||||
// $.RULE("command", () => {
|
||||
// $.OR(
|
||||
// $.c1 || ($.c1 = [
|
||||
// { ALT: () => $.SUBRULE($.badCommand) },
|
||||
// { ALT: () => $.SUBRULE($.auto) },
|
||||
// { ALT: () => $.SUBRULE($.define) },
|
||||
// { ALT: () => $.SUBRULE($.ifBlock) },
|
||||
// This is a bit ugly looking. Chevrotain uses Function.toString() to do crazy
|
||||
// optimizations. That clashes with our desire to build our list of commands dynamically.
|
||||
// We are creating a function body like this one:
|
||||
// $.RULE("command", () => {
|
||||
// $.OR(
|
||||
// $.c1 || ($.c1 = [
|
||||
// { ALT: () => $.SUBRULE($.badCommand) },
|
||||
// { ALT: () => $.SUBRULE($.auto) },
|
||||
// { ALT: () => $.SUBRULE($.define) },
|
||||
// { ALT: () => $.SUBRULE($.ifBlock) },
|
||||
|
||||
const commandAlts = [
|
||||
"$.SUBRULE($.badCommand)",
|
||||
"$.CONSUME(EOF)",
|
||||
];
|
||||
const commandAlts = [
|
||||
"$.SUBRULE($.badCommand)",
|
||||
"$.CONSUME(EOF)",
|
||||
];
|
||||
|
||||
for (const cmd of AutomatorCommands) {
|
||||
$.RULE(cmd.id, cmd.rule($));
|
||||
commandAlts.push(`$.SUBRULE($.${cmd.id})`);
|
||||
}
|
||||
|
||||
const commandOr = window.Function("$", "EOF", `
|
||||
return () => $.OR($.c1 || ($.c1 = [
|
||||
${commandAlts.map(e => `{ ALT: () => ${e} },`).join("\n")}]));
|
||||
`);
|
||||
|
||||
$.RULE("command", commandOr($, EOF));
|
||||
|
||||
$.RULE("badCommand", () => $.AT_LEAST_ONE(() => $.SUBRULE($.badCommandToken)),
|
||||
{ resyncEnabled: false, }
|
||||
);
|
||||
|
||||
$.RULE("badCommandToken", () => $.OR([
|
||||
{ ALT: () => $.CONSUME(T.Identifier) },
|
||||
{ ALT: () => $.CONSUME(T.NumberLiteral) },
|
||||
{ ALT: () => $.CONSUME(T.ComparisonOperator) },
|
||||
]), { resyncEnabled: false, });
|
||||
|
||||
$.RULE("comparison", () => {
|
||||
$.SUBRULE($.compareValue);
|
||||
$.CONSUME(T.ComparisonOperator);
|
||||
$.SUBRULE2($.compareValue);
|
||||
});
|
||||
|
||||
$.RULE("compareValue", () => $.OR([
|
||||
{ ALT: () => $.CONSUME(T.NumberLiteral) },
|
||||
{ ALT: () => $.CONSUME(T.Identifier) },
|
||||
{ ALT: () => $.CONSUME(T.AutomatorCurrency) },
|
||||
]));
|
||||
|
||||
$.RULE("duration", () => {
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
$.CONSUME(T.TimeUnit);
|
||||
});
|
||||
|
||||
$.RULE("eternityChallenge", () => $.OR([
|
||||
{
|
||||
ALT: () => {
|
||||
$.CONSUME(T.EC);
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
}
|
||||
},
|
||||
{ ALT: () => $.CONSUME(T.ECLiteral) }
|
||||
]));
|
||||
|
||||
$.RULE("studyList", () => {
|
||||
$.AT_LEAST_ONE(() => $.SUBRULE($.studyListEntry));
|
||||
// Support the |3 export format for EC number
|
||||
$.OPTION(() => {
|
||||
$.CONSUME(T.Pipe);
|
||||
$.CONSUME1(T.NumberLiteral, { LABEL: "ECNumber" });
|
||||
});
|
||||
}, { resyncEnabled: false });
|
||||
|
||||
$.RULE("studyListEntry", () => {
|
||||
$.OR([
|
||||
{ ALT: () => $.SUBRULE($.studyRange) },
|
||||
{ ALT: () => $.CONSUME(T.NumberLiteral) },
|
||||
{ ALT: () => $.CONSUME(T.StudyPath) },
|
||||
]);
|
||||
$.OPTION(() => $.CONSUME(T.Comma));
|
||||
});
|
||||
|
||||
$.RULE("studyRange", () => {
|
||||
$.CONSUME(T.NumberLiteral, { LABEL: "firstStudy" });
|
||||
$.CONSUME(T.Dash);
|
||||
$.CONSUME1(T.NumberLiteral, { LABEL: "lastStudy" });
|
||||
});
|
||||
|
||||
$.RULE("xHighest", () => {
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
$.CONSUME(T.XHighest);
|
||||
});
|
||||
|
||||
$.RULE("currencyAmount", () => {
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
$.CONSUME(T.AutomatorCurrency);
|
||||
});
|
||||
|
||||
// Very important to call this after all the rules have been setup.
|
||||
// otherwise the parser may not work correctly as it will lack information
|
||||
// derived from the self analysis.
|
||||
$.performSelfAnalysis();
|
||||
for (const cmd of AutomatorCommands) {
|
||||
$.RULE(cmd.id, cmd.rule($));
|
||||
commandAlts.push(`$.SUBRULE($.${cmd.id})`);
|
||||
}
|
||||
|
||||
const commandOr = window.Function("$", "EOF", `
|
||||
return () => $.OR($.c1 || ($.c1 = [
|
||||
${commandAlts.map(e => `{ ALT: () => ${e} },`).join("\n")}]));
|
||||
`);
|
||||
|
||||
$.RULE("command", commandOr($, EOF));
|
||||
|
||||
$.RULE("badCommand", () => $.AT_LEAST_ONE(() => $.SUBRULE($.badCommandToken)),
|
||||
{ resyncEnabled: false, }
|
||||
);
|
||||
|
||||
$.RULE("badCommandToken", () => $.OR([
|
||||
{ ALT: () => $.CONSUME(T.Identifier) },
|
||||
{ ALT: () => $.CONSUME(T.NumberLiteral) },
|
||||
{ ALT: () => $.CONSUME(T.ComparisonOperator) },
|
||||
]), { resyncEnabled: false, });
|
||||
|
||||
$.RULE("comparison", () => {
|
||||
$.SUBRULE($.compareValue);
|
||||
$.CONSUME(T.ComparisonOperator);
|
||||
$.SUBRULE2($.compareValue);
|
||||
});
|
||||
|
||||
$.RULE("compareValue", () => $.OR([
|
||||
{ ALT: () => $.CONSUME(T.NumberLiteral) },
|
||||
{ ALT: () => $.CONSUME(T.Identifier) },
|
||||
{ ALT: () => $.CONSUME(T.AutomatorCurrency) },
|
||||
]));
|
||||
|
||||
$.RULE("duration", () => {
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
$.CONSUME(T.TimeUnit);
|
||||
});
|
||||
|
||||
$.RULE("eternityChallenge", () => $.OR([
|
||||
{
|
||||
ALT: () => {
|
||||
$.CONSUME(T.EC);
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
}
|
||||
},
|
||||
{ ALT: () => $.CONSUME(T.ECLiteral) }
|
||||
]));
|
||||
|
||||
$.RULE("studyList", () => {
|
||||
$.AT_LEAST_ONE(() => $.SUBRULE($.studyListEntry));
|
||||
// Support the |3 export format for EC number
|
||||
$.OPTION(() => {
|
||||
$.CONSUME(T.Pipe);
|
||||
$.CONSUME1(T.NumberLiteral, { LABEL: "ECNumber" });
|
||||
});
|
||||
}, { resyncEnabled: false });
|
||||
|
||||
$.RULE("studyListEntry", () => {
|
||||
$.OR([
|
||||
{ ALT: () => $.SUBRULE($.studyRange) },
|
||||
{ ALT: () => $.CONSUME(T.NumberLiteral) },
|
||||
{ ALT: () => $.CONSUME(T.StudyPath) },
|
||||
]);
|
||||
$.OPTION(() => $.CONSUME(T.Comma));
|
||||
});
|
||||
|
||||
$.RULE("studyRange", () => {
|
||||
$.CONSUME(T.NumberLiteral, { LABEL: "firstStudy" });
|
||||
$.CONSUME(T.Dash);
|
||||
$.CONSUME1(T.NumberLiteral, { LABEL: "lastStudy" });
|
||||
});
|
||||
|
||||
$.RULE("xHighest", () => {
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
$.CONSUME(T.XHighest);
|
||||
});
|
||||
|
||||
$.RULE("currencyAmount", () => {
|
||||
$.CONSUME(T.NumberLiteral);
|
||||
$.CONSUME(T.AutomatorCurrency);
|
||||
});
|
||||
|
||||
// Very important to call this after all the rules have been setup.
|
||||
// otherwise the parser may not work correctly as it will lack information
|
||||
// derived from the self analysis.
|
||||
$.performSelfAnalysis();
|
||||
}
|
||||
}
|
||||
|
||||
const parser = new AutomatorParser();
|
||||
const parser = new AutomatorParser();
|
||||
|
||||
return {
|
||||
parser,
|
||||
// This field is filled in by automator-validate.js
|
||||
validate: null,
|
||||
};
|
||||
}());
|
||||
export const AutomatorGrammar = {
|
||||
parser,
|
||||
// This field is filled in by automator-validate.js
|
||||
validate: null,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user