improve performance through makePredicate() (#3048)
This commit is contained in:
42
lib/parse.js
42
lib/parse.js
@@ -165,7 +165,7 @@ function is_unicode_connector_punctuation(ch) {
|
||||
};
|
||||
|
||||
function is_identifier(name) {
|
||||
return !RESERVED_WORDS(name) && /^[a-z_$][a-z0-9_$]*$/i.test(name);
|
||||
return !RESERVED_WORDS[name] && /^[a-z_$][a-z0-9_$]*$/i.test(name);
|
||||
};
|
||||
|
||||
function is_identifier_start(code) {
|
||||
@@ -245,7 +245,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
var ch = S.text.charAt(S.pos++);
|
||||
if (signal_eof && !ch)
|
||||
throw EX_EOF;
|
||||
if (NEWLINE_CHARS(ch)) {
|
||||
if (NEWLINE_CHARS[ch]) {
|
||||
S.newline_before = S.newline_before || !in_string;
|
||||
++S.line;
|
||||
S.col = 0;
|
||||
@@ -272,7 +272,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
var text = S.text;
|
||||
for (var i = S.pos, n = S.text.length; i < n; ++i) {
|
||||
var ch = text[i];
|
||||
if (NEWLINE_CHARS(ch))
|
||||
if (NEWLINE_CHARS[ch])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
@@ -292,9 +292,9 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
|
||||
var prev_was_dot = false;
|
||||
function token(type, value, is_comment) {
|
||||
S.regex_allowed = ((type == "operator" && !UNARY_POSTFIX(value)) ||
|
||||
(type == "keyword" && KEYWORDS_BEFORE_EXPRESSION(value)) ||
|
||||
(type == "punc" && PUNC_BEFORE_EXPRESSION(value)));
|
||||
S.regex_allowed = ((type == "operator" && !UNARY_POSTFIX[value]) ||
|
||||
(type == "keyword" && KEYWORDS_BEFORE_EXPRESSION[value]) ||
|
||||
(type == "punc" && PUNC_BEFORE_EXPRESSION[value]));
|
||||
if (type == "punc" && value == ".") {
|
||||
prev_was_dot = true;
|
||||
} else if (!is_comment) {
|
||||
@@ -324,7 +324,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
};
|
||||
|
||||
function skip_whitespace() {
|
||||
while (WHITESPACE_CHARS(peek()))
|
||||
while (WHITESPACE_CHARS[peek()])
|
||||
next();
|
||||
};
|
||||
|
||||
@@ -424,7 +424,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
for (;;) {
|
||||
var ch = next(true, true);
|
||||
if (ch == "\\") ch = read_escaped_char(true);
|
||||
else if (NEWLINE_CHARS(ch)) parse_error("Unterminated string constant");
|
||||
else if (NEWLINE_CHARS[ch]) parse_error("Unterminated string constant");
|
||||
else if (ch == quote) break;
|
||||
ret += ch;
|
||||
}
|
||||
@@ -476,7 +476,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
backslash = false;
|
||||
}
|
||||
}
|
||||
if (KEYWORDS(name) && escaped) {
|
||||
if (KEYWORDS[name] && escaped) {
|
||||
hex = name.charCodeAt(0).toString(16).toUpperCase();
|
||||
name = "\\u" + "0000".substr(hex.length) + hex + name.slice(1);
|
||||
}
|
||||
@@ -485,7 +485,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
|
||||
var read_regexp = with_eof_error("Unterminated regular expression", function(source) {
|
||||
var prev_backslash = false, ch, in_class = false;
|
||||
while ((ch = next(true))) if (NEWLINE_CHARS(ch)) {
|
||||
while ((ch = next(true))) if (NEWLINE_CHARS[ch]) {
|
||||
parse_error("Unexpected line terminator");
|
||||
} else if (prev_backslash) {
|
||||
source += "\\" + ch;
|
||||
@@ -517,7 +517,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
function grow(op) {
|
||||
if (!peek()) return op;
|
||||
var bigger = op + peek();
|
||||
if (OPERATORS(bigger)) {
|
||||
if (OPERATORS[bigger]) {
|
||||
next();
|
||||
return grow(bigger);
|
||||
} else {
|
||||
@@ -550,9 +550,9 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
function read_word() {
|
||||
var word = read_name();
|
||||
if (prev_was_dot) return token("name", word);
|
||||
return KEYWORDS_ATOM(word) ? token("atom", word)
|
||||
: !KEYWORDS(word) ? token("name", word)
|
||||
: OPERATORS(word) ? token("operator", word)
|
||||
return KEYWORDS_ATOM[word] ? token("atom", word)
|
||||
: !KEYWORDS[word] ? token("name", word)
|
||||
: OPERATORS[word] ? token("operator", word)
|
||||
: token("keyword", word);
|
||||
};
|
||||
|
||||
@@ -603,8 +603,8 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
|
||||
}
|
||||
}
|
||||
if (is_digit(code)) return read_num();
|
||||
if (PUNC_CHARS(ch)) return token("punc", next());
|
||||
if (OPERATOR_CHARS(ch)) return read_operator();
|
||||
if (PUNC_CHARS[ch]) return token("punc", next());
|
||||
if (OPERATOR_CHARS[ch]) return read_operator();
|
||||
if (code == 92 || is_identifier_start(code)) return read_word();
|
||||
break;
|
||||
}
|
||||
@@ -1321,7 +1321,7 @@ function parse($TEXT, options) {
|
||||
func.end = prev();
|
||||
return subscripts(func, allow_calls);
|
||||
}
|
||||
if (ATOMIC_START_TOKEN(S.token.type)) {
|
||||
if (ATOMIC_START_TOKEN[S.token.type]) {
|
||||
return subscripts(as_atom_node(), allow_calls);
|
||||
}
|
||||
unexpected();
|
||||
@@ -1406,7 +1406,7 @@ function parse($TEXT, options) {
|
||||
var tmp = S.token;
|
||||
switch (tmp.type) {
|
||||
case "operator":
|
||||
if (!KEYWORDS(tmp.value)) unexpected();
|
||||
if (!KEYWORDS[tmp.value]) unexpected();
|
||||
case "num":
|
||||
case "string":
|
||||
case "name":
|
||||
@@ -1504,7 +1504,7 @@ function parse($TEXT, options) {
|
||||
|
||||
var maybe_unary = function(allow_calls) {
|
||||
var start = S.token;
|
||||
if (is("operator") && UNARY_PREFIX(start.value)) {
|
||||
if (is("operator") && UNARY_PREFIX[start.value]) {
|
||||
next();
|
||||
handle_regexp();
|
||||
var ex = make_unary(AST_UnaryPrefix, start, maybe_unary(allow_calls));
|
||||
@@ -1513,7 +1513,7 @@ function parse($TEXT, options) {
|
||||
return ex;
|
||||
}
|
||||
var val = expr_atom(allow_calls);
|
||||
while (is("operator") && UNARY_POSTFIX(S.token.value) && !has_newline_before(S.token)) {
|
||||
while (is("operator") && UNARY_POSTFIX[S.token.value] && !has_newline_before(S.token)) {
|
||||
val = make_unary(AST_UnaryPostfix, S.token, val);
|
||||
val.start = start;
|
||||
val.end = S.token;
|
||||
@@ -1585,7 +1585,7 @@ function parse($TEXT, options) {
|
||||
var maybe_assign = function(no_in) {
|
||||
var start = S.token;
|
||||
var left = maybe_conditional(no_in), val = S.token.value;
|
||||
if (is("operator") && ASSIGNMENT(val)) {
|
||||
if (is("operator") && ASSIGNMENT[val]) {
|
||||
if (is_assignable(left)) {
|
||||
next();
|
||||
return new AST_Assign({
|
||||
|
||||
Reference in New Issue
Block a user