Support HTML5 comment syntax (enabled by default!)
See http://javascript.spec.whatwg.org/#comment-syntax https://github.com/mishoo/UglifyJS/issues/503 https://github.com/marijnh/acorn/issues/62
This commit is contained in:
@@ -399,7 +399,7 @@ function OutputStream(options) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
comments.forEach(function(c){
|
comments.forEach(function(c){
|
||||||
if (c.type == "comment1") {
|
if (/comment[134]/.test(c.type)) {
|
||||||
output.print("//" + c.value + "\n");
|
output.print("//" + c.value + "\n");
|
||||||
output.indent();
|
output.indent();
|
||||||
}
|
}
|
||||||
|
|||||||
58
lib/parse.js
58
lib/parse.js
@@ -210,7 +210,7 @@ function is_token(token, type, val) {
|
|||||||
|
|
||||||
var EX_EOF = {};
|
var EX_EOF = {};
|
||||||
|
|
||||||
function tokenizer($TEXT, filename) {
|
function tokenizer($TEXT, filename, html5_comments) {
|
||||||
|
|
||||||
var S = {
|
var S = {
|
||||||
text : $TEXT.replace(/\r\n?|[\n\u2028\u2029]/g, "\n").replace(/\uFEFF/g, ''),
|
text : $TEXT.replace(/\r\n?|[\n\u2028\u2029]/g, "\n").replace(/\uFEFF/g, ''),
|
||||||
@@ -242,6 +242,14 @@ function tokenizer($TEXT, filename) {
|
|||||||
return ch;
|
return ch;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
function forward(i) {
|
||||||
|
while (i-- > 0) next();
|
||||||
|
};
|
||||||
|
|
||||||
|
function looking_at(str) {
|
||||||
|
return S.text.substr(S.pos, str.length) == str;
|
||||||
|
};
|
||||||
|
|
||||||
function find(what, signal_eof) {
|
function find(what, signal_eof) {
|
||||||
var pos = S.text.indexOf(what, S.pos);
|
var pos = S.text.indexOf(what, S.pos);
|
||||||
if (signal_eof && pos == -1) throw EX_EOF;
|
if (signal_eof && pos == -1) throw EX_EOF;
|
||||||
@@ -381,8 +389,8 @@ function tokenizer($TEXT, filename) {
|
|||||||
return token("string", ret);
|
return token("string", ret);
|
||||||
});
|
});
|
||||||
|
|
||||||
function read_line_comment() {
|
function skip_line_comment(type) {
|
||||||
next();
|
var regex_allowed = S.regex_allowed;
|
||||||
var i = find("\n"), ret;
|
var i = find("\n"), ret;
|
||||||
if (i == -1) {
|
if (i == -1) {
|
||||||
ret = S.text.substr(S.pos);
|
ret = S.text.substr(S.pos);
|
||||||
@@ -391,11 +399,13 @@ function tokenizer($TEXT, filename) {
|
|||||||
ret = S.text.substring(S.pos, i);
|
ret = S.text.substring(S.pos, i);
|
||||||
S.pos = i;
|
S.pos = i;
|
||||||
}
|
}
|
||||||
return token("comment1", ret, true);
|
S.comments_before.push(token(type, ret, true));
|
||||||
|
S.regex_allowed = regex_allowed;
|
||||||
|
return next_token();
|
||||||
};
|
};
|
||||||
|
|
||||||
var read_multiline_comment = with_eof_error("Unterminated multiline comment", function(){
|
var skip_multiline_comment = with_eof_error("Unterminated multiline comment", function(){
|
||||||
next();
|
var regex_allowed = S.regex_allowed;
|
||||||
var i = find("*/", true);
|
var i = find("*/", true);
|
||||||
var text = S.text.substring(S.pos, i);
|
var text = S.text.substring(S.pos, i);
|
||||||
var a = text.split("\n"), n = a.length;
|
var a = text.split("\n"), n = a.length;
|
||||||
@@ -405,8 +415,11 @@ function tokenizer($TEXT, filename) {
|
|||||||
if (n > 1) S.col = a[n - 1].length;
|
if (n > 1) S.col = a[n - 1].length;
|
||||||
else S.col += a[n - 1].length;
|
else S.col += a[n - 1].length;
|
||||||
S.col += 2;
|
S.col += 2;
|
||||||
S.newline_before = S.newline_before || text.indexOf("\n") >= 0;
|
var nlb = S.newline_before = S.newline_before || text.indexOf("\n") >= 0;
|
||||||
return token("comment2", text, true);
|
S.comments_before.push(token("comment2", text, true));
|
||||||
|
S.regex_allowed = regex_allowed;
|
||||||
|
S.newline_before = nlb;
|
||||||
|
return next_token();
|
||||||
});
|
});
|
||||||
|
|
||||||
function read_name() {
|
function read_name() {
|
||||||
@@ -470,16 +483,13 @@ function tokenizer($TEXT, filename) {
|
|||||||
|
|
||||||
function handle_slash() {
|
function handle_slash() {
|
||||||
next();
|
next();
|
||||||
var regex_allowed = S.regex_allowed;
|
|
||||||
switch (peek()) {
|
switch (peek()) {
|
||||||
case "/":
|
case "/":
|
||||||
S.comments_before.push(read_line_comment());
|
next();
|
||||||
S.regex_allowed = regex_allowed;
|
return skip_line_comment("comment1");
|
||||||
return next_token();
|
|
||||||
case "*":
|
case "*":
|
||||||
S.comments_before.push(read_multiline_comment());
|
next();
|
||||||
S.regex_allowed = regex_allowed;
|
return skip_multiline_comment();
|
||||||
return next_token();
|
|
||||||
}
|
}
|
||||||
return S.regex_allowed ? read_regexp("") : read_operator("/");
|
return S.regex_allowed ? read_regexp("") : read_operator("/");
|
||||||
};
|
};
|
||||||
@@ -516,6 +526,16 @@ function tokenizer($TEXT, filename) {
|
|||||||
return read_regexp(force_regexp);
|
return read_regexp(force_regexp);
|
||||||
skip_whitespace();
|
skip_whitespace();
|
||||||
start_token();
|
start_token();
|
||||||
|
if (html5_comments) {
|
||||||
|
if (looking_at("<!--")) {
|
||||||
|
forward(4);
|
||||||
|
return skip_line_comment("comment3");
|
||||||
|
}
|
||||||
|
if (looking_at("-->") && S.newline_before) {
|
||||||
|
forward(3);
|
||||||
|
return skip_line_comment("comment4");
|
||||||
|
}
|
||||||
|
}
|
||||||
var ch = peek();
|
var ch = peek();
|
||||||
if (!ch) return token("eof");
|
if (!ch) return token("eof");
|
||||||
var code = ch.charCodeAt(0);
|
var code = ch.charCodeAt(0);
|
||||||
@@ -594,11 +614,15 @@ function parse($TEXT, options) {
|
|||||||
strict : false,
|
strict : false,
|
||||||
filename : null,
|
filename : null,
|
||||||
toplevel : null,
|
toplevel : null,
|
||||||
expression : false
|
expression : false,
|
||||||
|
html5_comments : true,
|
||||||
});
|
});
|
||||||
|
|
||||||
var S = {
|
var S = {
|
||||||
input : typeof $TEXT == "string" ? tokenizer($TEXT, options.filename) : $TEXT,
|
input : (typeof $TEXT == "string"
|
||||||
|
? tokenizer($TEXT, options.filename,
|
||||||
|
options.html5_comments)
|
||||||
|
: $TEXT),
|
||||||
token : null,
|
token : null,
|
||||||
prev : null,
|
prev : null,
|
||||||
peeked : null,
|
peeked : null,
|
||||||
|
|||||||
Reference in New Issue
Block a user