初始化
This commit is contained in:
37
node_modules/htmlparser2/src/FeedHandler.spec.ts
generated
vendored
Normal file
37
node_modules/htmlparser2/src/FeedHandler.spec.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import fs from "node:fs/promises";
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { parseFeed } from "./index.js";
|
||||
|
||||
const documents = new URL("__fixtures__/Documents/", import.meta.url);
|
||||
|
||||
describe("parseFeed", () => {
|
||||
it("(rssFeed)", async () =>
|
||||
expect(
|
||||
parseFeed(
|
||||
await fs.readFile(
|
||||
new URL("RSS_Example.xml", documents),
|
||||
"utf8",
|
||||
),
|
||||
),
|
||||
).toMatchSnapshot());
|
||||
|
||||
it("(atomFeed)", async () =>
|
||||
expect(
|
||||
parseFeed(
|
||||
await fs.readFile(
|
||||
new URL("Atom_Example.xml", documents),
|
||||
"utf8",
|
||||
),
|
||||
),
|
||||
).toMatchSnapshot());
|
||||
|
||||
it("(rdfFeed)", async () =>
|
||||
expect(
|
||||
parseFeed(
|
||||
await fs.readFile(
|
||||
new URL("RDF_Example.xml", documents),
|
||||
"utf8",
|
||||
),
|
||||
),
|
||||
).toMatchSnapshot());
|
||||
});
|
||||
228
node_modules/htmlparser2/src/Parser.events.spec.ts
generated
vendored
Normal file
228
node_modules/htmlparser2/src/Parser.events.spec.ts
generated
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import { Parser, type ParserOptions } from "./Parser.js";
|
||||
import * as helper from "./__fixtures__/testHelper.js";
|
||||
|
||||
/**
|
||||
* Write to the parser twice, once a bytes, once as a single blob. Then check
|
||||
* that we received the expected events.
|
||||
*
|
||||
* @internal
|
||||
* @param input Data to write.
|
||||
* @param options Parser options.
|
||||
* @returns Promise that resolves if the test passes.
|
||||
*/
|
||||
function runTest(input: string, options?: ParserOptions) {
|
||||
let firstResult: unknown[] | undefined;
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const handler = helper.getEventCollector((error, actual) => {
|
||||
if (error) {
|
||||
return reject(error);
|
||||
}
|
||||
|
||||
if (firstResult) {
|
||||
expect(actual).toEqual(firstResult);
|
||||
resolve();
|
||||
} else {
|
||||
firstResult = actual;
|
||||
expect(actual).toMatchSnapshot();
|
||||
}
|
||||
});
|
||||
|
||||
const parser = new Parser(handler, options);
|
||||
// First, try to run the test via chunks
|
||||
for (let index = 0; index < input.length; index++) {
|
||||
parser.write(input.charAt(index));
|
||||
}
|
||||
parser.end();
|
||||
// Then, parse everything
|
||||
parser.parseComplete(input);
|
||||
});
|
||||
}
|
||||
|
||||
describe("Events", () => {
|
||||
it("simple", () => runTest("<h1 class=test>adsf</h1>"));
|
||||
|
||||
it("Template script tags", () =>
|
||||
runTest(
|
||||
'<p><script type="text/template"><h1>Heading1</h1></script></p>',
|
||||
));
|
||||
|
||||
it("Lowercase tags", () =>
|
||||
runTest("<H1 class=test>adsf</H1>", { lowerCaseTags: true }));
|
||||
|
||||
it("CDATA", () =>
|
||||
runTest("<tag><![CDATA[ asdf ><asdf></adsf><> fo]]></tag><![CD>", {
|
||||
xmlMode: true,
|
||||
}));
|
||||
|
||||
it("CDATA (inside special)", () =>
|
||||
runTest(
|
||||
"<script>/*<![CDATA[*/ asdf ><asdf></adsf><> fo/*]]>*/</script>",
|
||||
));
|
||||
|
||||
it("leading lt", () => runTest(">a>"));
|
||||
|
||||
it("end slash: void element ending with />", () =>
|
||||
runTest("<hr / ><p>Hold the line."));
|
||||
|
||||
it("end slash: void element ending with >", () =>
|
||||
runTest("<hr ><p>Hold the line."));
|
||||
|
||||
it("end slash: void element ending with >, xmlMode=true", () =>
|
||||
runTest("<hr ><p>Hold the line.", { xmlMode: true }));
|
||||
|
||||
it("end slash: non-void element ending with />", () =>
|
||||
runTest("<xx / ><p>Hold the line."));
|
||||
|
||||
it("end slash: non-void element ending with />, xmlMode=true", () =>
|
||||
runTest("<xx / ><p>Hold the line.", { xmlMode: true }));
|
||||
|
||||
it("end slash: non-void element ending with />, recognizeSelfClosing=true", () =>
|
||||
runTest("<xx / ><p>Hold the line.", { recognizeSelfClosing: true }));
|
||||
|
||||
it("end slash: as part of attrib value of void element", () =>
|
||||
runTest("<img src=gif.com/123/><p>Hold the line."));
|
||||
|
||||
it("end slash: as part of attrib value of non-void element", () =>
|
||||
runTest("<a href=http://test.com/>Foo</a><p>Hold the line."));
|
||||
|
||||
it("Implicit close tags", () =>
|
||||
runTest(
|
||||
"<ol><li class=test><div><table style=width:100%><tr><th>TH<td colspan=2><h3>Heading</h3><tr><td><div>Div</div><td><div>Div2</div></table></div><li><div><h3>Heading 2</h3></div></li></ol><p>Para<h4>Heading 4</h4><p><ul><li>Hi<li>bye</ul>",
|
||||
));
|
||||
|
||||
it("attributes (no white space, no value, no quotes)", () =>
|
||||
runTest(
|
||||
'<button class="test0"title="test1" disabled value=test2>adsf</button>',
|
||||
));
|
||||
|
||||
it("crazy attribute", () => runTest("<p < = '' FAIL>stuff</p><a"));
|
||||
|
||||
it("Scripts creating other scripts", () =>
|
||||
runTest("<p><script>var str = '<script></'+'script>';</script></p>"));
|
||||
|
||||
it("Long comment ending", () =>
|
||||
runTest("<meta id='before'><!-- text ---><meta id='after'>"));
|
||||
|
||||
it("Long CDATA ending", () =>
|
||||
runTest("<before /><tag><![CDATA[ text ]]]></tag><after />", {
|
||||
xmlMode: true,
|
||||
}));
|
||||
|
||||
it("Implicit open p and br tags", () =>
|
||||
runTest("<div>Hallo</p>World</br></ignore></div></p></br>"));
|
||||
|
||||
it("lt followed by whitespace", () => runTest("a < b"));
|
||||
|
||||
it("double attribute", () => runTest("<h1 class=test class=boo></h1>"));
|
||||
|
||||
it("numeric entities", () =>
|
||||
runTest("abcdfg&#x;h"));
|
||||
|
||||
it("legacy entities", () => runTest("&elíe&eer;s<er&sum"));
|
||||
|
||||
it("named entities", () =>
|
||||
runTest("&el<er∳foo&bar"));
|
||||
|
||||
it("xml entities", () =>
|
||||
runTest("&>&<üabcde", {
|
||||
xmlMode: true,
|
||||
}));
|
||||
|
||||
it("entity in attribute", () =>
|
||||
runTest(
|
||||
"<a href='http://example.com/pa#x61ge?param=value¶m2¶m3=<val&; & &'>",
|
||||
));
|
||||
|
||||
it("double brackets", () =>
|
||||
runTest("<<princess-purpose>>testing</princess-purpose>"));
|
||||
|
||||
it("legacy entities fail", () => runTest("M&M"));
|
||||
|
||||
it("Special special tags", () =>
|
||||
runTest(
|
||||
"<tItLe><b>foo</b><title></TiTlE><sitle><b></b></sitle><ttyle><b></b></ttyle><sCriPT></scripter</soo</sCript><STyLE></styler</STylE><sCiPt><stylee><scriptee><soo>",
|
||||
));
|
||||
|
||||
it("Empty tag name", () => runTest("< ></ >"));
|
||||
|
||||
it("Not quite closed", () => runTest("<foo /bar></foo bar>"));
|
||||
|
||||
it("Entities in attributes", () =>
|
||||
runTest("<foo bar=& baz=\"&\" boo='&' noo=>"));
|
||||
|
||||
it("CDATA in HTML", () => runTest("<![CDATA[ foo ]]>"));
|
||||
|
||||
it("Comment edge-cases", () => runTest("<!-foo><!-- --- --><!--foo"));
|
||||
|
||||
it("CDATA edge-cases", () =>
|
||||
runTest("<![CDATA><![CDATA[[]]sdaf]]><![CDATA[foo", {
|
||||
recognizeCDATA: true,
|
||||
}));
|
||||
|
||||
it("Comment false ending", () => runTest("<!-- a-b-> -->"));
|
||||
|
||||
it("Scripts ending with <", () => runTest("<script><</script>"));
|
||||
|
||||
it("CDATA more edge-cases", () =>
|
||||
runTest("<![CDATA[foo]bar]>baz]]>", { recognizeCDATA: true }));
|
||||
|
||||
it("tag names are not ASCII alpha", () => runTest("<12>text</12>"));
|
||||
|
||||
it("open-implies-close case of (non-br) void close tag in non-XML mode", () =>
|
||||
runTest("<select><input></select>", { lowerCaseAttributeNames: true }));
|
||||
|
||||
it("entity in attribute (#276)", () =>
|
||||
runTest(
|
||||
'<img src="?&image_uri=1&ℑ=2&image=3"/>?&image_uri=1&ℑ=2&image=3',
|
||||
));
|
||||
|
||||
it("entity in title (#592)", () => runTest("<title>the "title""));
|
||||
|
||||
it("entity in title - decodeEntities=false (#592)", () =>
|
||||
runTest("<title>the "title"", { decodeEntities: false }));
|
||||
|
||||
it("</title> in <script> (#745)", () =>
|
||||
runTest("<script>'</title>'</script>"));
|
||||
|
||||
it("XML tags", () => runTest("<:foo><_bar>", { xmlMode: true }));
|
||||
|
||||
it("Trailing legacy entity", () => runTest("⨱×bar"));
|
||||
|
||||
it("Trailing numeric entity", () => runTest("55"));
|
||||
|
||||
it("Multi-byte entity", () => runTest("≧̸"));
|
||||
|
||||
it("Start & end indices from domhandler", () =>
|
||||
runTest(
|
||||
"<!DOCTYPE html> <html> <title>The Title</title> <body class='foo'>Hello world <p></p></body> <!-- the comment --> </html> ",
|
||||
));
|
||||
|
||||
it("Self-closing indices (#941)", () =>
|
||||
runTest("<xml><a/><b/></xml>", { xmlMode: true }));
|
||||
|
||||
it("Entity after <", () => runTest("<&"));
|
||||
|
||||
it("Attribute in XML (see #1350)", () =>
|
||||
runTest(
|
||||
'<Page\n title="Hello world"\n actionBarVisible="false"/>',
|
||||
{ xmlMode: true },
|
||||
));
|
||||
});
|
||||
|
||||
describe("Helper", () => {
|
||||
it("should handle errors", () => {
|
||||
const eventCallback = vi.fn();
|
||||
const parser = new Parser(helper.getEventCollector(eventCallback));
|
||||
|
||||
parser.end();
|
||||
parser.write("foo");
|
||||
|
||||
expect(eventCallback).toHaveBeenCalledTimes(2);
|
||||
expect(eventCallback).toHaveBeenNthCalledWith(1, null, []);
|
||||
expect(eventCallback).toHaveBeenLastCalledWith(
|
||||
new Error(".write() after done!"),
|
||||
);
|
||||
});
|
||||
});
|
||||
164
node_modules/htmlparser2/src/Parser.spec.ts
generated
vendored
Normal file
164
node_modules/htmlparser2/src/Parser.spec.ts
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import { Parser, Tokenizer } from "./index.js";
|
||||
import type { Handler } from "./Parser.js";
|
||||
|
||||
describe("API", () => {
|
||||
it("should work without callbacks", () => {
|
||||
const cbs: Partial<Handler> = { onerror: vi.fn() };
|
||||
const p = new Parser(cbs, {
|
||||
xmlMode: true,
|
||||
lowerCaseAttributeNames: true,
|
||||
});
|
||||
|
||||
p.end("<a foo><bar></a><!-- --><![CDATA[]]]><?foo?><!bar><boo/>boohay");
|
||||
p.write("foo");
|
||||
|
||||
// Check for an error
|
||||
p.end();
|
||||
p.write("foo");
|
||||
expect(cbs.onerror).toHaveBeenLastCalledWith(
|
||||
new Error(".write() after done!"),
|
||||
);
|
||||
p.end();
|
||||
expect(cbs.onerror).toHaveBeenLastCalledWith(
|
||||
new Error(".end() after done!"),
|
||||
);
|
||||
|
||||
// Should ignore the error if there is no callback
|
||||
delete cbs.onerror;
|
||||
p.write("foo");
|
||||
|
||||
p.reset();
|
||||
|
||||
// Remove method
|
||||
cbs.onopentag = vi.fn();
|
||||
p.write("<a foo");
|
||||
delete cbs.onopentag;
|
||||
p.write(">");
|
||||
|
||||
// Pause/resume
|
||||
const onText = vi.fn();
|
||||
cbs.ontext = onText;
|
||||
p.pause();
|
||||
p.write("foo");
|
||||
expect(onText).not.toHaveBeenCalled();
|
||||
p.resume();
|
||||
expect(onText).toHaveBeenLastCalledWith("foo");
|
||||
p.pause();
|
||||
expect(onText).toHaveBeenCalledTimes(1);
|
||||
p.resume();
|
||||
expect(onText).toHaveBeenCalledTimes(1);
|
||||
p.pause();
|
||||
p.end("bar");
|
||||
expect(onText).toHaveBeenCalledTimes(1);
|
||||
p.resume();
|
||||
expect(onText).toHaveBeenCalledTimes(2);
|
||||
expect(onText).toHaveBeenLastCalledWith("bar");
|
||||
});
|
||||
|
||||
it("should back out of numeric entities (#125)", () => {
|
||||
const onend = vi.fn();
|
||||
let text = "";
|
||||
const p = new Parser({
|
||||
ontext(data) {
|
||||
text += data;
|
||||
},
|
||||
onend,
|
||||
});
|
||||
|
||||
p.end("id=770&#anchor");
|
||||
|
||||
expect(onend).toHaveBeenCalledTimes(1);
|
||||
expect(text).toBe("id=770&#anchor");
|
||||
|
||||
p.reset();
|
||||
text = "";
|
||||
|
||||
p.end("0&#xn");
|
||||
|
||||
expect(onend).toHaveBeenCalledTimes(2);
|
||||
expect(text).toBe("0&#xn");
|
||||
});
|
||||
|
||||
it("should not have the start index be greater than the end index", () => {
|
||||
const onopentag = vi.fn();
|
||||
const onclosetag = vi.fn();
|
||||
|
||||
const p = new Parser({
|
||||
onopentag(tag) {
|
||||
expect(p.startIndex).toBeLessThanOrEqual(p.endIndex);
|
||||
onopentag(tag, p.startIndex, p.endIndex);
|
||||
},
|
||||
onclosetag(tag) {
|
||||
expect(p.startIndex).toBeLessThanOrEqual(p.endIndex);
|
||||
onclosetag(tag, p.endIndex);
|
||||
},
|
||||
});
|
||||
|
||||
p.write("<p>");
|
||||
|
||||
expect(onopentag).toHaveBeenLastCalledWith("p", 0, 2);
|
||||
expect(onclosetag).not.toHaveBeenCalled();
|
||||
|
||||
p.write("Foo");
|
||||
|
||||
p.write("<hr>");
|
||||
|
||||
expect(onopentag).toHaveBeenLastCalledWith("hr", 6, 9);
|
||||
expect(onclosetag).toHaveBeenCalledTimes(2);
|
||||
expect(onclosetag).toHaveBeenNthCalledWith(1, "p", 9);
|
||||
expect(onclosetag).toHaveBeenNthCalledWith(2, "hr", 9);
|
||||
});
|
||||
|
||||
it("should update the position when a single tag is spread across multiple chunks", () => {
|
||||
let called = false;
|
||||
const p = new Parser({
|
||||
onopentag() {
|
||||
called = true;
|
||||
expect(p.startIndex).toBe(0);
|
||||
expect(p.endIndex).toBe(12);
|
||||
},
|
||||
});
|
||||
|
||||
p.write("<div ");
|
||||
p.write("foo=bar>");
|
||||
|
||||
expect(called).toBe(true);
|
||||
});
|
||||
|
||||
it("should have the correct position for implied opening tags", () => {
|
||||
let called = false;
|
||||
const p = new Parser({
|
||||
onopentag() {
|
||||
called = true;
|
||||
expect(p.startIndex).toBe(0);
|
||||
expect(p.endIndex).toBe(3);
|
||||
},
|
||||
});
|
||||
|
||||
p.write("</p>");
|
||||
expect(called).toBe(true);
|
||||
});
|
||||
|
||||
it("should parse <__proto__> (#387)", () => {
|
||||
const p = new Parser(null);
|
||||
|
||||
// Should not throw
|
||||
p.parseChunk("<__proto__>");
|
||||
});
|
||||
|
||||
it("should support custom tokenizer", () => {
|
||||
class CustomTokenizer extends Tokenizer {}
|
||||
|
||||
const p = new Parser(
|
||||
{
|
||||
onparserinit(parser: Parser) {
|
||||
// @ts-expect-error Accessing private tokenizer here
|
||||
expect(parser.tokenizer).toBeInstanceOf(CustomTokenizer);
|
||||
},
|
||||
},
|
||||
{ Tokenizer: CustomTokenizer },
|
||||
);
|
||||
p.done();
|
||||
});
|
||||
});
|
||||
663
node_modules/htmlparser2/src/Parser.ts
generated
vendored
Normal file
663
node_modules/htmlparser2/src/Parser.ts
generated
vendored
Normal file
@@ -0,0 +1,663 @@
|
||||
import Tokenizer, { type Callbacks, QuoteType } from "./Tokenizer.js";
|
||||
import { fromCodePoint } from "entities/decode";
|
||||
|
||||
const formTags = new Set([
|
||||
"input",
|
||||
"option",
|
||||
"optgroup",
|
||||
"select",
|
||||
"button",
|
||||
"datalist",
|
||||
"textarea",
|
||||
]);
|
||||
const pTag = new Set(["p"]);
|
||||
const tableSectionTags = new Set(["thead", "tbody"]);
|
||||
const ddtTags = new Set(["dd", "dt"]);
|
||||
const rtpTags = new Set(["rt", "rp"]);
|
||||
|
||||
const openImpliesClose = new Map<string, Set<string>>([
|
||||
["tr", new Set(["tr", "th", "td"])],
|
||||
["th", new Set(["th"])],
|
||||
["td", new Set(["thead", "th", "td"])],
|
||||
["body", new Set(["head", "link", "script"])],
|
||||
["li", new Set(["li"])],
|
||||
["p", pTag],
|
||||
["h1", pTag],
|
||||
["h2", pTag],
|
||||
["h3", pTag],
|
||||
["h4", pTag],
|
||||
["h5", pTag],
|
||||
["h6", pTag],
|
||||
["select", formTags],
|
||||
["input", formTags],
|
||||
["output", formTags],
|
||||
["button", formTags],
|
||||
["datalist", formTags],
|
||||
["textarea", formTags],
|
||||
["option", new Set(["option"])],
|
||||
["optgroup", new Set(["optgroup", "option"])],
|
||||
["dd", ddtTags],
|
||||
["dt", ddtTags],
|
||||
["address", pTag],
|
||||
["article", pTag],
|
||||
["aside", pTag],
|
||||
["blockquote", pTag],
|
||||
["details", pTag],
|
||||
["div", pTag],
|
||||
["dl", pTag],
|
||||
["fieldset", pTag],
|
||||
["figcaption", pTag],
|
||||
["figure", pTag],
|
||||
["footer", pTag],
|
||||
["form", pTag],
|
||||
["header", pTag],
|
||||
["hr", pTag],
|
||||
["main", pTag],
|
||||
["nav", pTag],
|
||||
["ol", pTag],
|
||||
["pre", pTag],
|
||||
["section", pTag],
|
||||
["table", pTag],
|
||||
["ul", pTag],
|
||||
["rt", rtpTags],
|
||||
["rp", rtpTags],
|
||||
["tbody", tableSectionTags],
|
||||
["tfoot", tableSectionTags],
|
||||
]);
|
||||
|
||||
const voidElements = new Set([
|
||||
"area",
|
||||
"base",
|
||||
"basefont",
|
||||
"br",
|
||||
"col",
|
||||
"command",
|
||||
"embed",
|
||||
"frame",
|
||||
"hr",
|
||||
"img",
|
||||
"input",
|
||||
"isindex",
|
||||
"keygen",
|
||||
"link",
|
||||
"meta",
|
||||
"param",
|
||||
"source",
|
||||
"track",
|
||||
"wbr",
|
||||
]);
|
||||
|
||||
const foreignContextElements = new Set(["math", "svg"]);
|
||||
|
||||
const htmlIntegrationElements = new Set([
|
||||
"mi",
|
||||
"mo",
|
||||
"mn",
|
||||
"ms",
|
||||
"mtext",
|
||||
"annotation-xml",
|
||||
"foreignobject",
|
||||
"desc",
|
||||
"title",
|
||||
]);
|
||||
|
||||
export interface ParserOptions {
|
||||
/**
|
||||
* Indicates whether special tags (`<script>`, `<style>`, and `<title>`) should get special treatment
|
||||
* and if "empty" tags (eg. `<br>`) can have children. If `false`, the content of special tags
|
||||
* will be text only. For feeds and other XML content (documents that don't consist of HTML),
|
||||
* set this to `true`.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
xmlMode?: boolean;
|
||||
|
||||
/**
|
||||
* Decode entities within the document.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
decodeEntities?: boolean;
|
||||
|
||||
/**
|
||||
* If set to true, all tags will be lowercased.
|
||||
*
|
||||
* @default !xmlMode
|
||||
*/
|
||||
lowerCaseTags?: boolean;
|
||||
|
||||
/**
|
||||
* If set to `true`, all attribute names will be lowercased. This has noticeable impact on speed.
|
||||
*
|
||||
* @default !xmlMode
|
||||
*/
|
||||
lowerCaseAttributeNames?: boolean;
|
||||
|
||||
/**
|
||||
* If set to true, CDATA sections will be recognized as text even if the xmlMode option is not enabled.
|
||||
* NOTE: If xmlMode is set to `true` then CDATA sections will always be recognized as text.
|
||||
*
|
||||
* @default xmlMode
|
||||
*/
|
||||
recognizeCDATA?: boolean;
|
||||
|
||||
/**
|
||||
* If set to `true`, self-closing tags will trigger the onclosetag event even if xmlMode is not set to `true`.
|
||||
* NOTE: If xmlMode is set to `true` then self-closing tags will always be recognized.
|
||||
*
|
||||
* @default xmlMode
|
||||
*/
|
||||
recognizeSelfClosing?: boolean;
|
||||
|
||||
/**
|
||||
* Allows the default tokenizer to be overwritten.
|
||||
*/
|
||||
Tokenizer?: typeof Tokenizer;
|
||||
}
|
||||
|
||||
export interface Handler {
|
||||
onparserinit(parser: Parser): void;
|
||||
|
||||
/**
|
||||
* Resets the handler back to starting state
|
||||
*/
|
||||
onreset(): void;
|
||||
|
||||
/**
|
||||
* Signals the handler that parsing is done
|
||||
*/
|
||||
onend(): void;
|
||||
onerror(error: Error): void;
|
||||
onclosetag(name: string, isImplied: boolean): void;
|
||||
onopentagname(name: string): void;
|
||||
/**
|
||||
*
|
||||
* @param name Name of the attribute
|
||||
* @param value Value of the attribute.
|
||||
* @param quote Quotes used around the attribute. `null` if the attribute has no quotes around the value, `undefined` if the attribute has no value.
|
||||
*/
|
||||
onattribute(
|
||||
name: string,
|
||||
value: string,
|
||||
quote?: string | undefined | null,
|
||||
): void;
|
||||
onopentag(
|
||||
name: string,
|
||||
attribs: { [s: string]: string },
|
||||
isImplied: boolean,
|
||||
): void;
|
||||
ontext(data: string): void;
|
||||
oncomment(data: string): void;
|
||||
oncdatastart(): void;
|
||||
oncdataend(): void;
|
||||
oncommentend(): void;
|
||||
onprocessinginstruction(name: string, data: string): void;
|
||||
}
|
||||
|
||||
const reNameEnd = /\s|\//;
|
||||
|
||||
export class Parser implements Callbacks {
|
||||
/** The start index of the last event. */
|
||||
public startIndex = 0;
|
||||
/** The end index of the last event. */
|
||||
public endIndex = 0;
|
||||
/**
|
||||
* Store the start index of the current open tag,
|
||||
* so we can update the start index for attributes.
|
||||
*/
|
||||
private openTagStart = 0;
|
||||
|
||||
private tagname = "";
|
||||
private attribname = "";
|
||||
private attribvalue = "";
|
||||
private attribs: null | { [key: string]: string } = null;
|
||||
private readonly stack: string[] = [];
|
||||
/** Determines whether self-closing tags are recognized. */
|
||||
private readonly foreignContext: boolean[];
|
||||
private readonly cbs: Partial<Handler>;
|
||||
private readonly lowerCaseTagNames: boolean;
|
||||
private readonly lowerCaseAttributeNames: boolean;
|
||||
private readonly recognizeSelfClosing: boolean;
|
||||
/** We are parsing HTML. Inverse of the `xmlMode` option. */
|
||||
private readonly htmlMode: boolean;
|
||||
private readonly tokenizer: Tokenizer;
|
||||
|
||||
private readonly buffers: string[] = [];
|
||||
private bufferOffset = 0;
|
||||
/** The index of the last written buffer. Used when resuming after a `pause()`. */
|
||||
private writeIndex = 0;
|
||||
/** Indicates whether the parser has finished running / `.end` has been called. */
|
||||
private ended = false;
|
||||
|
||||
constructor(
|
||||
cbs?: Partial<Handler> | null,
|
||||
private readonly options: ParserOptions = {},
|
||||
) {
|
||||
this.cbs = cbs ?? {};
|
||||
this.htmlMode = !this.options.xmlMode;
|
||||
this.lowerCaseTagNames = options.lowerCaseTags ?? this.htmlMode;
|
||||
this.lowerCaseAttributeNames =
|
||||
options.lowerCaseAttributeNames ?? this.htmlMode;
|
||||
this.recognizeSelfClosing =
|
||||
options.recognizeSelfClosing ?? !this.htmlMode;
|
||||
this.tokenizer = new (options.Tokenizer ?? Tokenizer)(
|
||||
this.options,
|
||||
this,
|
||||
);
|
||||
this.foreignContext = [!this.htmlMode];
|
||||
this.cbs.onparserinit?.(this);
|
||||
}
|
||||
|
||||
// Tokenizer event handlers
|
||||
|
||||
/** @internal */
|
||||
ontext(start: number, endIndex: number): void {
|
||||
const data = this.getSlice(start, endIndex);
|
||||
this.endIndex = endIndex - 1;
|
||||
this.cbs.ontext?.(data);
|
||||
this.startIndex = endIndex;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
ontextentity(cp: number, endIndex: number): void {
|
||||
this.endIndex = endIndex - 1;
|
||||
this.cbs.ontext?.(fromCodePoint(cp));
|
||||
this.startIndex = endIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the current tag is a void element. Override this if you want
|
||||
* to specify your own additional void elements.
|
||||
*/
|
||||
protected isVoidElement(name: string): boolean {
|
||||
return this.htmlMode && voidElements.has(name);
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onopentagname(start: number, endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
|
||||
let name = this.getSlice(start, endIndex);
|
||||
|
||||
if (this.lowerCaseTagNames) {
|
||||
name = name.toLowerCase();
|
||||
}
|
||||
|
||||
this.emitOpenTag(name);
|
||||
}
|
||||
|
||||
private emitOpenTag(name: string) {
|
||||
this.openTagStart = this.startIndex;
|
||||
this.tagname = name;
|
||||
|
||||
const impliesClose = this.htmlMode && openImpliesClose.get(name);
|
||||
|
||||
if (impliesClose) {
|
||||
while (this.stack.length > 0 && impliesClose.has(this.stack[0])) {
|
||||
const element = this.stack.shift()!;
|
||||
this.cbs.onclosetag?.(element, true);
|
||||
}
|
||||
}
|
||||
if (!this.isVoidElement(name)) {
|
||||
this.stack.unshift(name);
|
||||
|
||||
if (this.htmlMode) {
|
||||
if (foreignContextElements.has(name)) {
|
||||
this.foreignContext.unshift(true);
|
||||
} else if (htmlIntegrationElements.has(name)) {
|
||||
this.foreignContext.unshift(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.cbs.onopentagname?.(name);
|
||||
if (this.cbs.onopentag) this.attribs = {};
|
||||
}
|
||||
|
||||
private endOpenTag(isImplied: boolean) {
|
||||
this.startIndex = this.openTagStart;
|
||||
|
||||
if (this.attribs) {
|
||||
this.cbs.onopentag?.(this.tagname, this.attribs, isImplied);
|
||||
this.attribs = null;
|
||||
}
|
||||
if (this.cbs.onclosetag && this.isVoidElement(this.tagname)) {
|
||||
this.cbs.onclosetag(this.tagname, true);
|
||||
}
|
||||
|
||||
this.tagname = "";
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onopentagend(endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
this.endOpenTag(false);
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onclosetag(start: number, endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
|
||||
let name = this.getSlice(start, endIndex);
|
||||
|
||||
if (this.lowerCaseTagNames) {
|
||||
name = name.toLowerCase();
|
||||
}
|
||||
|
||||
if (
|
||||
this.htmlMode &&
|
||||
(foreignContextElements.has(name) ||
|
||||
htmlIntegrationElements.has(name))
|
||||
) {
|
||||
this.foreignContext.shift();
|
||||
}
|
||||
|
||||
if (!this.isVoidElement(name)) {
|
||||
const pos = this.stack.indexOf(name);
|
||||
if (pos !== -1) {
|
||||
for (let index = 0; index <= pos; index++) {
|
||||
const element = this.stack.shift()!;
|
||||
// We know the stack has sufficient elements.
|
||||
this.cbs.onclosetag?.(element, index !== pos);
|
||||
}
|
||||
} else if (this.htmlMode && name === "p") {
|
||||
// Implicit open before close
|
||||
this.emitOpenTag("p");
|
||||
this.closeCurrentTag(true);
|
||||
}
|
||||
} else if (this.htmlMode && name === "br") {
|
||||
// We can't use `emitOpenTag` for implicit open, as `br` would be implicitly closed.
|
||||
this.cbs.onopentagname?.("br");
|
||||
this.cbs.onopentag?.("br", {}, true);
|
||||
this.cbs.onclosetag?.("br", false);
|
||||
}
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onselfclosingtag(endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
if (this.recognizeSelfClosing || this.foreignContext[0]) {
|
||||
this.closeCurrentTag(false);
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
} else {
|
||||
// Ignore the fact that the tag is self-closing.
|
||||
this.onopentagend(endIndex);
|
||||
}
|
||||
}
|
||||
|
||||
private closeCurrentTag(isOpenImplied: boolean) {
|
||||
const name = this.tagname;
|
||||
this.endOpenTag(isOpenImplied);
|
||||
|
||||
// Self-closing tags will be on the top of the stack
|
||||
if (this.stack[0] === name) {
|
||||
// If the opening tag isn't implied, the closing tag has to be implied.
|
||||
this.cbs.onclosetag?.(name, !isOpenImplied);
|
||||
this.stack.shift();
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onattribname(start: number, endIndex: number): void {
|
||||
this.startIndex = start;
|
||||
const name = this.getSlice(start, endIndex);
|
||||
|
||||
this.attribname = this.lowerCaseAttributeNames
|
||||
? name.toLowerCase()
|
||||
: name;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onattribdata(start: number, endIndex: number): void {
|
||||
this.attribvalue += this.getSlice(start, endIndex);
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onattribentity(cp: number): void {
|
||||
this.attribvalue += fromCodePoint(cp);
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onattribend(quote: QuoteType, endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
|
||||
this.cbs.onattribute?.(
|
||||
this.attribname,
|
||||
this.attribvalue,
|
||||
quote === QuoteType.Double
|
||||
? '"'
|
||||
: quote === QuoteType.Single
|
||||
? "'"
|
||||
: quote === QuoteType.NoValue
|
||||
? undefined
|
||||
: null,
|
||||
);
|
||||
|
||||
if (
|
||||
this.attribs &&
|
||||
!Object.prototype.hasOwnProperty.call(this.attribs, this.attribname)
|
||||
) {
|
||||
this.attribs[this.attribname] = this.attribvalue;
|
||||
}
|
||||
this.attribvalue = "";
|
||||
}
|
||||
|
||||
private getInstructionName(value: string) {
|
||||
const index = value.search(reNameEnd);
|
||||
let name = index < 0 ? value : value.substr(0, index);
|
||||
|
||||
if (this.lowerCaseTagNames) {
|
||||
name = name.toLowerCase();
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
ondeclaration(start: number, endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
const value = this.getSlice(start, endIndex);
|
||||
|
||||
if (this.cbs.onprocessinginstruction) {
|
||||
const name = this.getInstructionName(value);
|
||||
this.cbs.onprocessinginstruction(`!${name}`, `!${value}`);
|
||||
}
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onprocessinginstruction(start: number, endIndex: number): void {
|
||||
this.endIndex = endIndex;
|
||||
const value = this.getSlice(start, endIndex);
|
||||
|
||||
if (this.cbs.onprocessinginstruction) {
|
||||
const name = this.getInstructionName(value);
|
||||
this.cbs.onprocessinginstruction(`?${name}`, `?${value}`);
|
||||
}
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
oncomment(start: number, endIndex: number, offset: number): void {
|
||||
this.endIndex = endIndex;
|
||||
|
||||
this.cbs.oncomment?.(this.getSlice(start, endIndex - offset));
|
||||
this.cbs.oncommentend?.();
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
oncdata(start: number, endIndex: number, offset: number): void {
|
||||
this.endIndex = endIndex;
|
||||
const value = this.getSlice(start, endIndex - offset);
|
||||
|
||||
if (!this.htmlMode || this.options.recognizeCDATA) {
|
||||
this.cbs.oncdatastart?.();
|
||||
this.cbs.ontext?.(value);
|
||||
this.cbs.oncdataend?.();
|
||||
} else {
|
||||
this.cbs.oncomment?.(`[CDATA[${value}]]`);
|
||||
this.cbs.oncommentend?.();
|
||||
}
|
||||
|
||||
// Set `startIndex` for next node
|
||||
this.startIndex = endIndex + 1;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
onend(): void {
|
||||
if (this.cbs.onclosetag) {
|
||||
// Set the end index for all remaining tags
|
||||
this.endIndex = this.startIndex;
|
||||
for (let index = 0; index < this.stack.length; index++) {
|
||||
this.cbs.onclosetag(this.stack[index], true);
|
||||
}
|
||||
}
|
||||
this.cbs.onend?.();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the parser to a blank state, ready to parse a new HTML document
|
||||
*/
|
||||
public reset(): void {
|
||||
this.cbs.onreset?.();
|
||||
this.tokenizer.reset();
|
||||
this.tagname = "";
|
||||
this.attribname = "";
|
||||
this.attribs = null;
|
||||
this.stack.length = 0;
|
||||
this.startIndex = 0;
|
||||
this.endIndex = 0;
|
||||
this.cbs.onparserinit?.(this);
|
||||
this.buffers.length = 0;
|
||||
this.foreignContext.length = 0;
|
||||
this.foreignContext.unshift(!this.htmlMode);
|
||||
this.bufferOffset = 0;
|
||||
this.writeIndex = 0;
|
||||
this.ended = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the parser, then parses a complete document and
|
||||
* pushes it to the handler.
|
||||
*
|
||||
* @param data Document to parse.
|
||||
*/
|
||||
public parseComplete(data: string): void {
|
||||
this.reset();
|
||||
this.end(data);
|
||||
}
|
||||
|
||||
private getSlice(start: number, end: number) {
|
||||
while (start - this.bufferOffset >= this.buffers[0].length) {
|
||||
this.shiftBuffer();
|
||||
}
|
||||
|
||||
let slice = this.buffers[0].slice(
|
||||
start - this.bufferOffset,
|
||||
end - this.bufferOffset,
|
||||
);
|
||||
|
||||
while (end - this.bufferOffset > this.buffers[0].length) {
|
||||
this.shiftBuffer();
|
||||
slice += this.buffers[0].slice(0, end - this.bufferOffset);
|
||||
}
|
||||
|
||||
return slice;
|
||||
}
|
||||
|
||||
private shiftBuffer(): void {
|
||||
this.bufferOffset += this.buffers[0].length;
|
||||
this.writeIndex--;
|
||||
this.buffers.shift();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a chunk of data and calls the corresponding callbacks.
|
||||
*
|
||||
* @param chunk Chunk to parse.
|
||||
*/
|
||||
public write(chunk: string): void {
|
||||
if (this.ended) {
|
||||
this.cbs.onerror?.(new Error(".write() after done!"));
|
||||
return;
|
||||
}
|
||||
|
||||
this.buffers.push(chunk);
|
||||
if (this.tokenizer.running) {
|
||||
this.tokenizer.write(chunk);
|
||||
this.writeIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the end of the buffer and clears the stack, calls onend.
|
||||
*
|
||||
* @param chunk Optional final chunk to parse.
|
||||
*/
|
||||
public end(chunk?: string): void {
|
||||
if (this.ended) {
|
||||
this.cbs.onerror?.(new Error(".end() after done!"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (chunk) this.write(chunk);
|
||||
this.ended = true;
|
||||
this.tokenizer.end();
|
||||
}
|
||||
|
||||
/**
|
||||
* Pauses parsing. The parser won't emit events until `resume` is called.
|
||||
*/
|
||||
public pause(): void {
|
||||
this.tokenizer.pause();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resumes parsing after `pause` was called.
|
||||
*/
|
||||
public resume(): void {
|
||||
this.tokenizer.resume();
|
||||
|
||||
while (
|
||||
this.tokenizer.running &&
|
||||
this.writeIndex < this.buffers.length
|
||||
) {
|
||||
this.tokenizer.write(this.buffers[this.writeIndex++]);
|
||||
}
|
||||
|
||||
if (this.ended) this.tokenizer.end();
|
||||
}
|
||||
|
||||
/**
|
||||
* Alias of `write`, for backwards compatibility.
|
||||
*
|
||||
* @param chunk Chunk to parse.
|
||||
* @deprecated
|
||||
*/
|
||||
public parseChunk(chunk: string): void {
|
||||
this.write(chunk);
|
||||
}
|
||||
/**
|
||||
* Alias of `end`, for backwards compatibility.
|
||||
*
|
||||
* @param chunk Optional final chunk to parse.
|
||||
* @deprecated
|
||||
*/
|
||||
public done(chunk?: string): void {
|
||||
this.end(chunk);
|
||||
}
|
||||
}
|
||||
162
node_modules/htmlparser2/src/Tokenizer.spec.ts
generated
vendored
Normal file
162
node_modules/htmlparser2/src/Tokenizer.spec.ts
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { Tokenizer } from "./index.js";
|
||||
import type { Callbacks } from "./Tokenizer.js";
|
||||
|
||||
function tokenize(data: string, options = {}) {
|
||||
const log: unknown[][] = [];
|
||||
const tokenizer = new Tokenizer(
|
||||
options,
|
||||
new Proxy(
|
||||
{},
|
||||
{
|
||||
get(_, property) {
|
||||
return (...values: unknown[]) =>
|
||||
log.push([property, ...values]);
|
||||
},
|
||||
},
|
||||
) as Callbacks,
|
||||
);
|
||||
|
||||
tokenizer.write(data);
|
||||
tokenizer.end();
|
||||
|
||||
return log;
|
||||
}
|
||||
|
||||
describe("Tokenizer", () => {
|
||||
describe("should support self-closing special tags", () => {
|
||||
it("for self-closing script tag", () => {
|
||||
expect(tokenize("<script /><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for self-closing style tag", () => {
|
||||
expect(tokenize("<style /><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for self-closing title tag", () => {
|
||||
expect(tokenize("<title /><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for self-closing textarea tag", () => {
|
||||
expect(tokenize("<textarea /><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for self-closing xmp tag", () => {
|
||||
expect(tokenize("<xmp /><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe("should support standard special tags", () => {
|
||||
it("for normal script tag", () => {
|
||||
expect(tokenize("<script></script><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for normal style tag", () => {
|
||||
expect(tokenize("<style></style><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for normal sitle tag", () => {
|
||||
expect(tokenize("<title></title><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
it("for normal textarea tag", () => {
|
||||
expect(
|
||||
tokenize("<textarea></textarea><div></div>"),
|
||||
).toMatchSnapshot();
|
||||
});
|
||||
it("for normal xmp tag", () => {
|
||||
expect(tokenize("<xmp></xmp><div></div>")).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe("should treat html inside special tags as text", () => {
|
||||
it("for div inside script tag", () => {
|
||||
expect(tokenize("<script><div></div></script>")).toMatchSnapshot();
|
||||
});
|
||||
it("for div inside style tag", () => {
|
||||
expect(tokenize("<style><div></div></style>")).toMatchSnapshot();
|
||||
});
|
||||
it("for div inside title tag", () => {
|
||||
expect(tokenize("<title><div></div></title>")).toMatchSnapshot();
|
||||
});
|
||||
it("for div inside textarea tag", () => {
|
||||
expect(
|
||||
tokenize("<textarea><div></div></textarea>"),
|
||||
).toMatchSnapshot();
|
||||
});
|
||||
it("for div inside xmp tag", () => {
|
||||
expect(tokenize("<xmp><div></div></xmp>")).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe("should correctly mark attributes", () => {
|
||||
it("for no value attribute", () => {
|
||||
expect(tokenize("<div aaaaaaa >")).toMatchSnapshot();
|
||||
});
|
||||
it("for no quotes attribute", () => {
|
||||
expect(tokenize("<div aaa=aaa >")).toMatchSnapshot();
|
||||
});
|
||||
it("for single quotes attribute", () => {
|
||||
expect(tokenize("<div aaa='a' >")).toMatchSnapshot();
|
||||
});
|
||||
it("for double quotes attribute", () => {
|
||||
expect(tokenize('<div aaa="a" >')).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe("should not break after special tag followed by an entity", () => {
|
||||
it("for normal special tag", () => {
|
||||
expect(tokenize("<style>a{}</style>'<br/>")).toMatchSnapshot();
|
||||
});
|
||||
it("for self-closing special tag", () => {
|
||||
expect(tokenize("<style />'<br/>")).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe("should handle entities", () => {
|
||||
it("for XML entities", () =>
|
||||
expect(
|
||||
tokenize("&>&<üabcde", {
|
||||
xmlMode: true,
|
||||
}),
|
||||
).toMatchSnapshot());
|
||||
|
||||
it("for entities in attributes (#276)", () =>
|
||||
expect(
|
||||
tokenize(
|
||||
'<img src="?&image_uri=1&ℑ=2&image=3"/>?&image_uri=1&ℑ=2&image=3',
|
||||
),
|
||||
).toMatchSnapshot());
|
||||
|
||||
it("for trailing legacy entity", () =>
|
||||
expect(tokenize("⨱×bar")).toMatchSnapshot());
|
||||
|
||||
it("for multi-byte entities", () =>
|
||||
expect(tokenize("≧̸")).toMatchSnapshot());
|
||||
});
|
||||
|
||||
it("should not lose data when pausing", () => {
|
||||
const log: unknown[][] = [];
|
||||
const tokenizer = new Tokenizer(
|
||||
{},
|
||||
new Proxy(
|
||||
{},
|
||||
{
|
||||
get(_, property) {
|
||||
return (...values: unknown[]) => {
|
||||
if (property === "ontext") {
|
||||
tokenizer.pause();
|
||||
}
|
||||
log.push([property, ...values]);
|
||||
};
|
||||
},
|
||||
},
|
||||
) as Callbacks,
|
||||
);
|
||||
|
||||
tokenizer.write("&am");
|
||||
tokenizer.write("p; it up!");
|
||||
tokenizer.resume();
|
||||
tokenizer.resume();
|
||||
|
||||
// Tokenizer shouldn't be paused
|
||||
expect(tokenizer).toHaveProperty("running", true);
|
||||
|
||||
tokenizer.end();
|
||||
|
||||
expect(log).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
870
node_modules/htmlparser2/src/Tokenizer.ts
generated
vendored
Normal file
870
node_modules/htmlparser2/src/Tokenizer.ts
generated
vendored
Normal file
@@ -0,0 +1,870 @@
|
||||
import {
|
||||
EntityDecoder,
|
||||
DecodingMode,
|
||||
htmlDecodeTree,
|
||||
xmlDecodeTree,
|
||||
} from "entities/decode";
|
||||
|
||||
const enum CharCodes {
|
||||
Tab = 0x9, // "\t"
|
||||
NewLine = 0xa, // "\n"
|
||||
FormFeed = 0xc, // "\f"
|
||||
CarriageReturn = 0xd, // "\r"
|
||||
Space = 0x20, // " "
|
||||
ExclamationMark = 0x21, // "!"
|
||||
Number = 0x23, // "#"
|
||||
Amp = 0x26, // "&"
|
||||
SingleQuote = 0x27, // "'"
|
||||
DoubleQuote = 0x22, // '"'
|
||||
Dash = 0x2d, // "-"
|
||||
Slash = 0x2f, // "/"
|
||||
Zero = 0x30, // "0"
|
||||
Nine = 0x39, // "9"
|
||||
Semi = 0x3b, // ";"
|
||||
Lt = 0x3c, // "<"
|
||||
Eq = 0x3d, // "="
|
||||
Gt = 0x3e, // ">"
|
||||
Questionmark = 0x3f, // "?"
|
||||
UpperA = 0x41, // "A"
|
||||
LowerA = 0x61, // "a"
|
||||
UpperF = 0x46, // "F"
|
||||
LowerF = 0x66, // "f"
|
||||
UpperZ = 0x5a, // "Z"
|
||||
LowerZ = 0x7a, // "z"
|
||||
LowerX = 0x78, // "x"
|
||||
OpeningSquareBracket = 0x5b, // "["
|
||||
}
|
||||
|
||||
/** All the states the tokenizer can be in. */
|
||||
const enum State {
|
||||
Text = 1,
|
||||
BeforeTagName, // After <
|
||||
InTagName,
|
||||
InSelfClosingTag,
|
||||
BeforeClosingTagName,
|
||||
InClosingTagName,
|
||||
AfterClosingTagName,
|
||||
|
||||
// Attributes
|
||||
BeforeAttributeName,
|
||||
InAttributeName,
|
||||
AfterAttributeName,
|
||||
BeforeAttributeValue,
|
||||
InAttributeValueDq, // "
|
||||
InAttributeValueSq, // '
|
||||
InAttributeValueNq,
|
||||
|
||||
// Declarations
|
||||
BeforeDeclaration, // !
|
||||
InDeclaration,
|
||||
|
||||
// Processing instructions
|
||||
InProcessingInstruction, // ?
|
||||
|
||||
// Comments & CDATA
|
||||
BeforeComment,
|
||||
CDATASequence,
|
||||
InSpecialComment,
|
||||
InCommentLike,
|
||||
|
||||
// Special tags
|
||||
BeforeSpecialS, // Decide if we deal with `<script` or `<style`
|
||||
BeforeSpecialT, // Decide if we deal with `<title` or `<textarea`
|
||||
SpecialStartSequence,
|
||||
InSpecialTag,
|
||||
|
||||
InEntity,
|
||||
}
|
||||
|
||||
function isWhitespace(c: number): boolean {
|
||||
return (
|
||||
c === CharCodes.Space ||
|
||||
c === CharCodes.NewLine ||
|
||||
c === CharCodes.Tab ||
|
||||
c === CharCodes.FormFeed ||
|
||||
c === CharCodes.CarriageReturn
|
||||
);
|
||||
}
|
||||
|
||||
function isEndOfTagSection(c: number): boolean {
|
||||
return c === CharCodes.Slash || c === CharCodes.Gt || isWhitespace(c);
|
||||
}
|
||||
|
||||
function isASCIIAlpha(c: number): boolean {
|
||||
return (
|
||||
(c >= CharCodes.LowerA && c <= CharCodes.LowerZ) ||
|
||||
(c >= CharCodes.UpperA && c <= CharCodes.UpperZ)
|
||||
);
|
||||
}
|
||||
|
||||
export enum QuoteType {
|
||||
NoValue = 0,
|
||||
Unquoted = 1,
|
||||
Single = 2,
|
||||
Double = 3,
|
||||
}
|
||||
|
||||
export interface Callbacks {
|
||||
onattribdata(start: number, endIndex: number): void;
|
||||
onattribentity(codepoint: number): void;
|
||||
onattribend(quote: QuoteType, endIndex: number): void;
|
||||
onattribname(start: number, endIndex: number): void;
|
||||
oncdata(start: number, endIndex: number, endOffset: number): void;
|
||||
onclosetag(start: number, endIndex: number): void;
|
||||
oncomment(start: number, endIndex: number, endOffset: number): void;
|
||||
ondeclaration(start: number, endIndex: number): void;
|
||||
onend(): void;
|
||||
onopentagend(endIndex: number): void;
|
||||
onopentagname(start: number, endIndex: number): void;
|
||||
onprocessinginstruction(start: number, endIndex: number): void;
|
||||
onselfclosingtag(endIndex: number): void;
|
||||
ontext(start: number, endIndex: number): void;
|
||||
ontextentity(codepoint: number, endIndex: number): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sequences used to match longer strings.
|
||||
*
|
||||
* We don't have `Script`, `Style`, or `Title` here. Instead, we re-use the *End
|
||||
* sequences with an increased offset.
|
||||
*/
|
||||
const Sequences = {
|
||||
Cdata: new Uint8Array([0x43, 0x44, 0x41, 0x54, 0x41, 0x5b]), // CDATA[
|
||||
CdataEnd: new Uint8Array([0x5d, 0x5d, 0x3e]), // ]]>
|
||||
CommentEnd: new Uint8Array([0x2d, 0x2d, 0x3e]), // `-->`
|
||||
ScriptEnd: new Uint8Array([0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74]), // `</script`
|
||||
StyleEnd: new Uint8Array([0x3c, 0x2f, 0x73, 0x74, 0x79, 0x6c, 0x65]), // `</style`
|
||||
TitleEnd: new Uint8Array([0x3c, 0x2f, 0x74, 0x69, 0x74, 0x6c, 0x65]), // `</title`
|
||||
TextareaEnd: new Uint8Array([
|
||||
0x3c, 0x2f, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61,
|
||||
]), // `</textarea`
|
||||
XmpEnd: new Uint8Array([0x3c, 0x2f, 0x78, 0x6d, 0x70]), // `</xmp`
|
||||
};
|
||||
|
||||
export default class Tokenizer {
|
||||
/** The current state the tokenizer is in. */
|
||||
private state = State.Text;
|
||||
/** The read buffer. */
|
||||
private buffer = "";
|
||||
/** The beginning of the section that is currently being read. */
|
||||
private sectionStart = 0;
|
||||
/** The index within the buffer that we are currently looking at. */
|
||||
private index = 0;
|
||||
/** The start of the last entity. */
|
||||
private entityStart = 0;
|
||||
/** Some behavior, eg. when decoding entities, is done while we are in another state. This keeps track of the other state type. */
|
||||
private baseState = State.Text;
|
||||
/** For special parsing behavior inside of script and style tags. */
|
||||
private isSpecial = false;
|
||||
/** Indicates whether the tokenizer has been paused. */
|
||||
public running = true;
|
||||
/** The offset of the current buffer. */
|
||||
private offset = 0;
|
||||
|
||||
private readonly xmlMode: boolean;
|
||||
private readonly decodeEntities: boolean;
|
||||
private readonly entityDecoder: EntityDecoder;
|
||||
|
||||
constructor(
|
||||
{
|
||||
xmlMode = false,
|
||||
decodeEntities = true,
|
||||
}: { xmlMode?: boolean; decodeEntities?: boolean },
|
||||
private readonly cbs: Callbacks,
|
||||
) {
|
||||
this.xmlMode = xmlMode;
|
||||
this.decodeEntities = decodeEntities;
|
||||
this.entityDecoder = new EntityDecoder(
|
||||
xmlMode ? xmlDecodeTree : htmlDecodeTree,
|
||||
(cp, consumed) => this.emitCodePoint(cp, consumed),
|
||||
);
|
||||
}
|
||||
|
||||
public reset(): void {
|
||||
this.state = State.Text;
|
||||
this.buffer = "";
|
||||
this.sectionStart = 0;
|
||||
this.index = 0;
|
||||
this.baseState = State.Text;
|
||||
this.currentSequence = undefined!;
|
||||
this.running = true;
|
||||
this.offset = 0;
|
||||
}
|
||||
|
||||
public write(chunk: string): void {
|
||||
this.offset += this.buffer.length;
|
||||
this.buffer = chunk;
|
||||
this.parse();
|
||||
}
|
||||
|
||||
public end(): void {
|
||||
if (this.running) this.finish();
|
||||
}
|
||||
|
||||
public pause(): void {
|
||||
this.running = false;
|
||||
}
|
||||
|
||||
public resume(): void {
|
||||
this.running = true;
|
||||
if (this.index < this.buffer.length + this.offset) {
|
||||
this.parse();
|
||||
}
|
||||
}
|
||||
|
||||
private stateText(c: number): void {
|
||||
if (
|
||||
c === CharCodes.Lt ||
|
||||
(!this.decodeEntities && this.fastForwardTo(CharCodes.Lt))
|
||||
) {
|
||||
if (this.index > this.sectionStart) {
|
||||
this.cbs.ontext(this.sectionStart, this.index);
|
||||
}
|
||||
this.state = State.BeforeTagName;
|
||||
this.sectionStart = this.index;
|
||||
} else if (this.decodeEntities && c === CharCodes.Amp) {
|
||||
this.startEntity();
|
||||
}
|
||||
}
|
||||
|
||||
private currentSequence: Uint8Array = undefined!;
|
||||
private sequenceIndex = 0;
|
||||
private stateSpecialStartSequence(c: number): void {
|
||||
const isEnd = this.sequenceIndex === this.currentSequence.length;
|
||||
const isMatch = isEnd
|
||||
? // If we are at the end of the sequence, make sure the tag name has ended
|
||||
isEndOfTagSection(c)
|
||||
: // Otherwise, do a case-insensitive comparison
|
||||
(c | 0x20) === this.currentSequence[this.sequenceIndex];
|
||||
|
||||
if (!isMatch) {
|
||||
this.isSpecial = false;
|
||||
} else if (!isEnd) {
|
||||
this.sequenceIndex++;
|
||||
return;
|
||||
}
|
||||
|
||||
this.sequenceIndex = 0;
|
||||
this.state = State.InTagName;
|
||||
this.stateInTagName(c);
|
||||
}
|
||||
|
||||
/** Look for an end tag. For <title> tags, also decode entities. */
|
||||
private stateInSpecialTag(c: number): void {
|
||||
if (this.sequenceIndex === this.currentSequence.length) {
|
||||
if (c === CharCodes.Gt || isWhitespace(c)) {
|
||||
const endOfText = this.index - this.currentSequence.length;
|
||||
|
||||
if (this.sectionStart < endOfText) {
|
||||
// Spoof the index so that reported locations match up.
|
||||
const actualIndex = this.index;
|
||||
this.index = endOfText;
|
||||
this.cbs.ontext(this.sectionStart, endOfText);
|
||||
this.index = actualIndex;
|
||||
}
|
||||
|
||||
this.isSpecial = false;
|
||||
this.sectionStart = endOfText + 2; // Skip over the `</`
|
||||
this.stateInClosingTagName(c);
|
||||
return; // We are done; skip the rest of the function.
|
||||
}
|
||||
|
||||
this.sequenceIndex = 0;
|
||||
}
|
||||
|
||||
if ((c | 0x20) === this.currentSequence[this.sequenceIndex]) {
|
||||
this.sequenceIndex += 1;
|
||||
} else if (this.sequenceIndex === 0) {
|
||||
if (this.currentSequence === Sequences.TitleEnd) {
|
||||
// We have to parse entities in <title> tags.
|
||||
if (this.decodeEntities && c === CharCodes.Amp) {
|
||||
this.startEntity();
|
||||
}
|
||||
} else if (this.fastForwardTo(CharCodes.Lt)) {
|
||||
// Outside of <title> tags, we can fast-forward.
|
||||
this.sequenceIndex = 1;
|
||||
}
|
||||
} else {
|
||||
// If we see a `<`, set the sequence index to 1; useful for eg. `<</script>`.
|
||||
this.sequenceIndex = Number(c === CharCodes.Lt);
|
||||
}
|
||||
}
|
||||
|
||||
private stateCDATASequence(c: number): void {
|
||||
if (c === Sequences.Cdata[this.sequenceIndex]) {
|
||||
if (++this.sequenceIndex === Sequences.Cdata.length) {
|
||||
this.state = State.InCommentLike;
|
||||
this.currentSequence = Sequences.CdataEnd;
|
||||
this.sequenceIndex = 0;
|
||||
this.sectionStart = this.index + 1;
|
||||
}
|
||||
} else {
|
||||
this.sequenceIndex = 0;
|
||||
this.state = State.InDeclaration;
|
||||
this.stateInDeclaration(c); // Reconsume the character
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* When we wait for one specific character, we can speed things up
|
||||
* by skipping through the buffer until we find it.
|
||||
*
|
||||
* @returns Whether the character was found.
|
||||
*/
|
||||
private fastForwardTo(c: number): boolean {
|
||||
while (++this.index < this.buffer.length + this.offset) {
|
||||
if (this.buffer.charCodeAt(this.index - this.offset) === c) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We increment the index at the end of the `parse` loop,
|
||||
* so set it to `buffer.length - 1` here.
|
||||
*
|
||||
* TODO: Refactor `parse` to increment index before calling states.
|
||||
*/
|
||||
this.index = this.buffer.length + this.offset - 1;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Comments and CDATA end with `-->` and `]]>`.
|
||||
*
|
||||
* Their common qualities are:
|
||||
* - Their end sequences have a distinct character they start with.
|
||||
* - That character is then repeated, so we have to check multiple repeats.
|
||||
* - All characters but the start character of the sequence can be skipped.
|
||||
*/
|
||||
private stateInCommentLike(c: number): void {
|
||||
if (c === this.currentSequence[this.sequenceIndex]) {
|
||||
if (++this.sequenceIndex === this.currentSequence.length) {
|
||||
if (this.currentSequence === Sequences.CdataEnd) {
|
||||
this.cbs.oncdata(this.sectionStart, this.index, 2);
|
||||
} else {
|
||||
this.cbs.oncomment(this.sectionStart, this.index, 2);
|
||||
}
|
||||
|
||||
this.sequenceIndex = 0;
|
||||
this.sectionStart = this.index + 1;
|
||||
this.state = State.Text;
|
||||
}
|
||||
} else if (this.sequenceIndex === 0) {
|
||||
// Fast-forward to the first character of the sequence
|
||||
if (this.fastForwardTo(this.currentSequence[0])) {
|
||||
this.sequenceIndex = 1;
|
||||
}
|
||||
} else if (c !== this.currentSequence[this.sequenceIndex - 1]) {
|
||||
// Allow long sequences, eg. --->, ]]]>
|
||||
this.sequenceIndex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* HTML only allows ASCII alpha characters (a-z and A-Z) at the beginning of a tag name.
|
||||
*
|
||||
* XML allows a lot more characters here (@see https://www.w3.org/TR/REC-xml/#NT-NameStartChar).
|
||||
* We allow anything that wouldn't end the tag.
|
||||
*/
|
||||
private isTagStartChar(c: number) {
|
||||
return this.xmlMode ? !isEndOfTagSection(c) : isASCIIAlpha(c);
|
||||
}
|
||||
|
||||
private startSpecial(sequence: Uint8Array, offset: number) {
|
||||
this.isSpecial = true;
|
||||
this.currentSequence = sequence;
|
||||
this.sequenceIndex = offset;
|
||||
this.state = State.SpecialStartSequence;
|
||||
}
|
||||
|
||||
private stateBeforeTagName(c: number): void {
|
||||
if (c === CharCodes.ExclamationMark) {
|
||||
this.state = State.BeforeDeclaration;
|
||||
this.sectionStart = this.index + 1;
|
||||
} else if (c === CharCodes.Questionmark) {
|
||||
this.state = State.InProcessingInstruction;
|
||||
this.sectionStart = this.index + 1;
|
||||
} else if (this.isTagStartChar(c)) {
|
||||
const lower = c | 0x20;
|
||||
this.sectionStart = this.index;
|
||||
if (this.xmlMode) {
|
||||
this.state = State.InTagName;
|
||||
} else if (lower === Sequences.ScriptEnd[2]) {
|
||||
this.state = State.BeforeSpecialS;
|
||||
} else if (
|
||||
lower === Sequences.TitleEnd[2] ||
|
||||
lower === Sequences.XmpEnd[2]
|
||||
) {
|
||||
this.state = State.BeforeSpecialT;
|
||||
} else {
|
||||
this.state = State.InTagName;
|
||||
}
|
||||
} else if (c === CharCodes.Slash) {
|
||||
this.state = State.BeforeClosingTagName;
|
||||
} else {
|
||||
this.state = State.Text;
|
||||
this.stateText(c);
|
||||
}
|
||||
}
|
||||
private stateInTagName(c: number): void {
|
||||
if (isEndOfTagSection(c)) {
|
||||
this.cbs.onopentagname(this.sectionStart, this.index);
|
||||
this.sectionStart = -1;
|
||||
this.state = State.BeforeAttributeName;
|
||||
this.stateBeforeAttributeName(c);
|
||||
}
|
||||
}
|
||||
private stateBeforeClosingTagName(c: number): void {
|
||||
if (isWhitespace(c)) {
|
||||
// Ignore
|
||||
} else if (c === CharCodes.Gt) {
|
||||
this.state = State.Text;
|
||||
} else {
|
||||
this.state = this.isTagStartChar(c)
|
||||
? State.InClosingTagName
|
||||
: State.InSpecialComment;
|
||||
this.sectionStart = this.index;
|
||||
}
|
||||
}
|
||||
private stateInClosingTagName(c: number): void {
|
||||
if (c === CharCodes.Gt || isWhitespace(c)) {
|
||||
this.cbs.onclosetag(this.sectionStart, this.index);
|
||||
this.sectionStart = -1;
|
||||
this.state = State.AfterClosingTagName;
|
||||
this.stateAfterClosingTagName(c);
|
||||
}
|
||||
}
|
||||
private stateAfterClosingTagName(c: number): void {
|
||||
// Skip everything until ">"
|
||||
if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) {
|
||||
this.state = State.Text;
|
||||
this.sectionStart = this.index + 1;
|
||||
}
|
||||
}
|
||||
private stateBeforeAttributeName(c: number): void {
|
||||
if (c === CharCodes.Gt) {
|
||||
this.cbs.onopentagend(this.index);
|
||||
if (this.isSpecial) {
|
||||
this.state = State.InSpecialTag;
|
||||
this.sequenceIndex = 0;
|
||||
} else {
|
||||
this.state = State.Text;
|
||||
}
|
||||
this.sectionStart = this.index + 1;
|
||||
} else if (c === CharCodes.Slash) {
|
||||
this.state = State.InSelfClosingTag;
|
||||
} else if (!isWhitespace(c)) {
|
||||
this.state = State.InAttributeName;
|
||||
this.sectionStart = this.index;
|
||||
}
|
||||
}
|
||||
private stateInSelfClosingTag(c: number): void {
|
||||
if (c === CharCodes.Gt) {
|
||||
this.cbs.onselfclosingtag(this.index);
|
||||
this.state = State.Text;
|
||||
this.sectionStart = this.index + 1;
|
||||
this.isSpecial = false; // Reset special state, in case of self-closing special tags
|
||||
} else if (!isWhitespace(c)) {
|
||||
this.state = State.BeforeAttributeName;
|
||||
this.stateBeforeAttributeName(c);
|
||||
}
|
||||
}
|
||||
private stateInAttributeName(c: number): void {
|
||||
if (c === CharCodes.Eq || isEndOfTagSection(c)) {
|
||||
this.cbs.onattribname(this.sectionStart, this.index);
|
||||
this.sectionStart = this.index;
|
||||
this.state = State.AfterAttributeName;
|
||||
this.stateAfterAttributeName(c);
|
||||
}
|
||||
}
|
||||
private stateAfterAttributeName(c: number): void {
|
||||
if (c === CharCodes.Eq) {
|
||||
this.state = State.BeforeAttributeValue;
|
||||
} else if (c === CharCodes.Slash || c === CharCodes.Gt) {
|
||||
this.cbs.onattribend(QuoteType.NoValue, this.sectionStart);
|
||||
this.sectionStart = -1;
|
||||
this.state = State.BeforeAttributeName;
|
||||
this.stateBeforeAttributeName(c);
|
||||
} else if (!isWhitespace(c)) {
|
||||
this.cbs.onattribend(QuoteType.NoValue, this.sectionStart);
|
||||
this.state = State.InAttributeName;
|
||||
this.sectionStart = this.index;
|
||||
}
|
||||
}
|
||||
private stateBeforeAttributeValue(c: number): void {
|
||||
if (c === CharCodes.DoubleQuote) {
|
||||
this.state = State.InAttributeValueDq;
|
||||
this.sectionStart = this.index + 1;
|
||||
} else if (c === CharCodes.SingleQuote) {
|
||||
this.state = State.InAttributeValueSq;
|
||||
this.sectionStart = this.index + 1;
|
||||
} else if (!isWhitespace(c)) {
|
||||
this.sectionStart = this.index;
|
||||
this.state = State.InAttributeValueNq;
|
||||
this.stateInAttributeValueNoQuotes(c); // Reconsume token
|
||||
}
|
||||
}
|
||||
private handleInAttributeValue(c: number, quote: number) {
|
||||
if (
|
||||
c === quote ||
|
||||
(!this.decodeEntities && this.fastForwardTo(quote))
|
||||
) {
|
||||
this.cbs.onattribdata(this.sectionStart, this.index);
|
||||
this.sectionStart = -1;
|
||||
this.cbs.onattribend(
|
||||
quote === CharCodes.DoubleQuote
|
||||
? QuoteType.Double
|
||||
: QuoteType.Single,
|
||||
this.index + 1,
|
||||
);
|
||||
this.state = State.BeforeAttributeName;
|
||||
} else if (this.decodeEntities && c === CharCodes.Amp) {
|
||||
this.startEntity();
|
||||
}
|
||||
}
|
||||
private stateInAttributeValueDoubleQuotes(c: number): void {
|
||||
this.handleInAttributeValue(c, CharCodes.DoubleQuote);
|
||||
}
|
||||
private stateInAttributeValueSingleQuotes(c: number): void {
|
||||
this.handleInAttributeValue(c, CharCodes.SingleQuote);
|
||||
}
|
||||
private stateInAttributeValueNoQuotes(c: number): void {
|
||||
if (isWhitespace(c) || c === CharCodes.Gt) {
|
||||
this.cbs.onattribdata(this.sectionStart, this.index);
|
||||
this.sectionStart = -1;
|
||||
this.cbs.onattribend(QuoteType.Unquoted, this.index);
|
||||
this.state = State.BeforeAttributeName;
|
||||
this.stateBeforeAttributeName(c);
|
||||
} else if (this.decodeEntities && c === CharCodes.Amp) {
|
||||
this.startEntity();
|
||||
}
|
||||
}
|
||||
private stateBeforeDeclaration(c: number): void {
|
||||
if (c === CharCodes.OpeningSquareBracket) {
|
||||
this.state = State.CDATASequence;
|
||||
this.sequenceIndex = 0;
|
||||
} else {
|
||||
this.state =
|
||||
c === CharCodes.Dash
|
||||
? State.BeforeComment
|
||||
: State.InDeclaration;
|
||||
}
|
||||
}
|
||||
private stateInDeclaration(c: number): void {
|
||||
if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) {
|
||||
this.cbs.ondeclaration(this.sectionStart, this.index);
|
||||
this.state = State.Text;
|
||||
this.sectionStart = this.index + 1;
|
||||
}
|
||||
}
|
||||
private stateInProcessingInstruction(c: number): void {
|
||||
if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) {
|
||||
this.cbs.onprocessinginstruction(this.sectionStart, this.index);
|
||||
this.state = State.Text;
|
||||
this.sectionStart = this.index + 1;
|
||||
}
|
||||
}
|
||||
private stateBeforeComment(c: number): void {
|
||||
if (c === CharCodes.Dash) {
|
||||
this.state = State.InCommentLike;
|
||||
this.currentSequence = Sequences.CommentEnd;
|
||||
// Allow short comments (eg. <!-->)
|
||||
this.sequenceIndex = 2;
|
||||
this.sectionStart = this.index + 1;
|
||||
} else {
|
||||
this.state = State.InDeclaration;
|
||||
}
|
||||
}
|
||||
private stateInSpecialComment(c: number): void {
|
||||
if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) {
|
||||
this.cbs.oncomment(this.sectionStart, this.index, 0);
|
||||
this.state = State.Text;
|
||||
this.sectionStart = this.index + 1;
|
||||
}
|
||||
}
|
||||
private stateBeforeSpecialS(c: number): void {
|
||||
const lower = c | 0x20;
|
||||
if (lower === Sequences.ScriptEnd[3]) {
|
||||
this.startSpecial(Sequences.ScriptEnd, 4);
|
||||
} else if (lower === Sequences.StyleEnd[3]) {
|
||||
this.startSpecial(Sequences.StyleEnd, 4);
|
||||
} else {
|
||||
this.state = State.InTagName;
|
||||
this.stateInTagName(c); // Consume the token again
|
||||
}
|
||||
}
|
||||
|
||||
private stateBeforeSpecialT(c: number): void {
|
||||
const lower = c | 0x20;
|
||||
switch (lower) {
|
||||
case Sequences.TitleEnd[3]: {
|
||||
this.startSpecial(Sequences.TitleEnd, 4);
|
||||
|
||||
break;
|
||||
}
|
||||
case Sequences.TextareaEnd[3]: {
|
||||
this.startSpecial(Sequences.TextareaEnd, 4);
|
||||
|
||||
break;
|
||||
}
|
||||
case Sequences.XmpEnd[3]: {
|
||||
this.startSpecial(Sequences.XmpEnd, 4);
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
this.state = State.InTagName;
|
||||
this.stateInTagName(c); // Consume the token again
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private startEntity() {
|
||||
this.baseState = this.state;
|
||||
this.state = State.InEntity;
|
||||
this.entityStart = this.index;
|
||||
this.entityDecoder.startEntity(
|
||||
this.xmlMode
|
||||
? DecodingMode.Strict
|
||||
: this.baseState === State.Text ||
|
||||
this.baseState === State.InSpecialTag
|
||||
? DecodingMode.Legacy
|
||||
: DecodingMode.Attribute,
|
||||
);
|
||||
}
|
||||
|
||||
private stateInEntity(): void {
|
||||
const length = this.entityDecoder.write(
|
||||
this.buffer,
|
||||
this.index - this.offset,
|
||||
);
|
||||
|
||||
// If `length` is positive, we are done with the entity.
|
||||
if (length >= 0) {
|
||||
this.state = this.baseState;
|
||||
|
||||
if (length === 0) {
|
||||
this.index = this.entityStart;
|
||||
}
|
||||
} else {
|
||||
// Mark buffer as consumed.
|
||||
this.index = this.offset + this.buffer.length - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove data that has already been consumed from the buffer.
|
||||
*/
|
||||
private cleanup() {
|
||||
// If we are inside of text or attributes, emit what we already have.
|
||||
if (this.running && this.sectionStart !== this.index) {
|
||||
if (
|
||||
this.state === State.Text ||
|
||||
(this.state === State.InSpecialTag && this.sequenceIndex === 0)
|
||||
) {
|
||||
this.cbs.ontext(this.sectionStart, this.index);
|
||||
this.sectionStart = this.index;
|
||||
} else if (
|
||||
this.state === State.InAttributeValueDq ||
|
||||
this.state === State.InAttributeValueSq ||
|
||||
this.state === State.InAttributeValueNq
|
||||
) {
|
||||
this.cbs.onattribdata(this.sectionStart, this.index);
|
||||
this.sectionStart = this.index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private shouldContinue() {
|
||||
return this.index < this.buffer.length + this.offset && this.running;
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterates through the buffer, calling the function corresponding to the current state.
|
||||
*
|
||||
* States that are more likely to be hit are higher up, as a performance improvement.
|
||||
*/
|
||||
private parse() {
|
||||
while (this.shouldContinue()) {
|
||||
const c = this.buffer.charCodeAt(this.index - this.offset);
|
||||
switch (this.state) {
|
||||
case State.Text: {
|
||||
this.stateText(c);
|
||||
break;
|
||||
}
|
||||
case State.SpecialStartSequence: {
|
||||
this.stateSpecialStartSequence(c);
|
||||
break;
|
||||
}
|
||||
case State.InSpecialTag: {
|
||||
this.stateInSpecialTag(c);
|
||||
break;
|
||||
}
|
||||
case State.CDATASequence: {
|
||||
this.stateCDATASequence(c);
|
||||
break;
|
||||
}
|
||||
case State.InAttributeValueDq: {
|
||||
this.stateInAttributeValueDoubleQuotes(c);
|
||||
break;
|
||||
}
|
||||
case State.InAttributeName: {
|
||||
this.stateInAttributeName(c);
|
||||
break;
|
||||
}
|
||||
case State.InCommentLike: {
|
||||
this.stateInCommentLike(c);
|
||||
break;
|
||||
}
|
||||
case State.InSpecialComment: {
|
||||
this.stateInSpecialComment(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeAttributeName: {
|
||||
this.stateBeforeAttributeName(c);
|
||||
break;
|
||||
}
|
||||
case State.InTagName: {
|
||||
this.stateInTagName(c);
|
||||
break;
|
||||
}
|
||||
case State.InClosingTagName: {
|
||||
this.stateInClosingTagName(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeTagName: {
|
||||
this.stateBeforeTagName(c);
|
||||
break;
|
||||
}
|
||||
case State.AfterAttributeName: {
|
||||
this.stateAfterAttributeName(c);
|
||||
break;
|
||||
}
|
||||
case State.InAttributeValueSq: {
|
||||
this.stateInAttributeValueSingleQuotes(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeAttributeValue: {
|
||||
this.stateBeforeAttributeValue(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeClosingTagName: {
|
||||
this.stateBeforeClosingTagName(c);
|
||||
break;
|
||||
}
|
||||
case State.AfterClosingTagName: {
|
||||
this.stateAfterClosingTagName(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeSpecialS: {
|
||||
this.stateBeforeSpecialS(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeSpecialT: {
|
||||
this.stateBeforeSpecialT(c);
|
||||
break;
|
||||
}
|
||||
case State.InAttributeValueNq: {
|
||||
this.stateInAttributeValueNoQuotes(c);
|
||||
break;
|
||||
}
|
||||
case State.InSelfClosingTag: {
|
||||
this.stateInSelfClosingTag(c);
|
||||
break;
|
||||
}
|
||||
case State.InDeclaration: {
|
||||
this.stateInDeclaration(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeDeclaration: {
|
||||
this.stateBeforeDeclaration(c);
|
||||
break;
|
||||
}
|
||||
case State.BeforeComment: {
|
||||
this.stateBeforeComment(c);
|
||||
break;
|
||||
}
|
||||
case State.InProcessingInstruction: {
|
||||
this.stateInProcessingInstruction(c);
|
||||
break;
|
||||
}
|
||||
case State.InEntity: {
|
||||
this.stateInEntity();
|
||||
break;
|
||||
}
|
||||
}
|
||||
this.index++;
|
||||
}
|
||||
this.cleanup();
|
||||
}
|
||||
|
||||
private finish() {
|
||||
if (this.state === State.InEntity) {
|
||||
this.entityDecoder.end();
|
||||
this.state = this.baseState;
|
||||
}
|
||||
|
||||
this.handleTrailingData();
|
||||
|
||||
this.cbs.onend();
|
||||
}
|
||||
|
||||
/** Handle any trailing data. */
|
||||
private handleTrailingData() {
|
||||
const endIndex = this.buffer.length + this.offset;
|
||||
|
||||
// If there is no remaining data, we are done.
|
||||
if (this.sectionStart >= endIndex) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.state === State.InCommentLike) {
|
||||
if (this.currentSequence === Sequences.CdataEnd) {
|
||||
this.cbs.oncdata(this.sectionStart, endIndex, 0);
|
||||
} else {
|
||||
this.cbs.oncomment(this.sectionStart, endIndex, 0);
|
||||
}
|
||||
} else if (
|
||||
this.state === State.InTagName ||
|
||||
this.state === State.BeforeAttributeName ||
|
||||
this.state === State.BeforeAttributeValue ||
|
||||
this.state === State.AfterAttributeName ||
|
||||
this.state === State.InAttributeName ||
|
||||
this.state === State.InAttributeValueSq ||
|
||||
this.state === State.InAttributeValueDq ||
|
||||
this.state === State.InAttributeValueNq ||
|
||||
this.state === State.InClosingTagName
|
||||
) {
|
||||
/*
|
||||
* If we are currently in an opening or closing tag, us not calling the
|
||||
* respective callback signals that the tag should be ignored.
|
||||
*/
|
||||
} else {
|
||||
this.cbs.ontext(this.sectionStart, endIndex);
|
||||
}
|
||||
}
|
||||
|
||||
private emitCodePoint(cp: number, consumed: number): void {
|
||||
if (
|
||||
this.baseState !== State.Text &&
|
||||
this.baseState !== State.InSpecialTag
|
||||
) {
|
||||
if (this.sectionStart < this.entityStart) {
|
||||
this.cbs.onattribdata(this.sectionStart, this.entityStart);
|
||||
}
|
||||
this.sectionStart = this.entityStart + consumed;
|
||||
this.index = this.sectionStart - 1;
|
||||
|
||||
this.cbs.onattribentity(cp);
|
||||
} else {
|
||||
if (this.sectionStart < this.entityStart) {
|
||||
this.cbs.ontext(this.sectionStart, this.entityStart);
|
||||
}
|
||||
this.sectionStart = this.entityStart + consumed;
|
||||
this.index = this.sectionStart - 1;
|
||||
|
||||
this.cbs.ontextentity(cp, this.sectionStart);
|
||||
}
|
||||
}
|
||||
}
|
||||
82
node_modules/htmlparser2/src/WritableStream.spec.ts
generated
vendored
Normal file
82
node_modules/htmlparser2/src/WritableStream.spec.ts
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
import { createReadStream } from "node:fs";
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as stream from "node:stream";
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import type { Handler, ParserOptions } from "./Parser.js";
|
||||
import { WritableStream } from "./WritableStream.js";
|
||||
import * as helper from "./__fixtures__/testHelper.js";
|
||||
|
||||
describe("WritableStream", () => {
|
||||
it("should decode fragmented unicode characters", () => {
|
||||
const ontext = vi.fn();
|
||||
const stream = new WritableStream({ ontext });
|
||||
|
||||
stream.write(Buffer.from([0xe2, 0x82]));
|
||||
stream.write(Buffer.from([0xac]));
|
||||
stream.write("");
|
||||
stream.end();
|
||||
|
||||
expect(ontext).toHaveBeenCalledWith("€");
|
||||
});
|
||||
|
||||
it("Basic html", () => testStream("Basic.html"));
|
||||
it("Attributes", () => testStream("Attributes.html"));
|
||||
it("SVG", () => testStream("Svg.html"));
|
||||
it("RSS feed", () => testStream("RSS_Example.xml", { xmlMode: true }));
|
||||
it("Atom feed", () => testStream("Atom_Example.xml", { xmlMode: true }));
|
||||
it("RDF feed", () => testStream("RDF_Example.xml", { xmlMode: true }));
|
||||
});
|
||||
|
||||
function getPromiseEventCollector(): [
|
||||
handler: Partial<Handler>,
|
||||
promise: Promise<unknown>,
|
||||
] {
|
||||
let handler: Partial<Handler> | undefined;
|
||||
const promise = new Promise<unknown>((resolve, reject) => {
|
||||
handler = helper.getEventCollector((error, events) => {
|
||||
if (error) {
|
||||
reject(error);
|
||||
} else {
|
||||
resolve(events);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return [handler!, promise];
|
||||
}
|
||||
|
||||
// TODO[engine:node@>=16]: Use promise version of `stream.finished` instead.
|
||||
function finished(input: Parameters<typeof stream.finished>[0]): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
stream.finished(input, (error) => (error ? reject(error) : resolve()));
|
||||
});
|
||||
}
|
||||
|
||||
async function testStream(
|
||||
file: string,
|
||||
options?: ParserOptions,
|
||||
): Promise<void> {
|
||||
const filePath = new URL(`__fixtures__/Documents/${file}`, import.meta.url);
|
||||
|
||||
const [streamHandler, eventsPromise] = getPromiseEventCollector();
|
||||
|
||||
const fsStream = createReadStream(filePath).pipe(
|
||||
new WritableStream(streamHandler, options),
|
||||
);
|
||||
|
||||
await finished(fsStream);
|
||||
|
||||
const events = await eventsPromise;
|
||||
|
||||
expect(events).toMatchSnapshot();
|
||||
|
||||
const [singlePassHandler, singlePassPromise] = getPromiseEventCollector();
|
||||
|
||||
const singlePassStream = new WritableStream(singlePassHandler, options).end(
|
||||
await fs.readFile(filePath),
|
||||
);
|
||||
|
||||
await finished(singlePassStream);
|
||||
|
||||
expect(await singlePassPromise).toStrictEqual(events);
|
||||
}
|
||||
43
node_modules/htmlparser2/src/WritableStream.ts
generated
vendored
Normal file
43
node_modules/htmlparser2/src/WritableStream.ts
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
import { Parser, type Handler, type ParserOptions } from "./Parser.js";
|
||||
/*
|
||||
* NOTE: If either of these two imports produces a type error,
|
||||
* please update your @types/node dependency!
|
||||
*/
|
||||
import { Writable } from "node:stream";
|
||||
import { StringDecoder } from "node:string_decoder";
|
||||
|
||||
// Following the example in https://nodejs.org/api/stream.html#stream_decoding_buffers_in_a_writable_stream
|
||||
function isBuffer(_chunk: string | Buffer, encoding: string): _chunk is Buffer {
|
||||
return encoding === "buffer";
|
||||
}
|
||||
|
||||
/**
|
||||
* WritableStream makes the `Parser` interface available as a NodeJS stream.
|
||||
*
|
||||
* @see Parser
|
||||
*/
|
||||
export class WritableStream extends Writable {
|
||||
private readonly _parser: Parser;
|
||||
private readonly _decoder = new StringDecoder();
|
||||
|
||||
constructor(cbs: Partial<Handler>, options?: ParserOptions) {
|
||||
super({ decodeStrings: false });
|
||||
this._parser = new Parser(cbs, options);
|
||||
}
|
||||
|
||||
override _write(
|
||||
chunk: string | Buffer,
|
||||
encoding: string,
|
||||
callback: () => void,
|
||||
): void {
|
||||
this._parser.write(
|
||||
isBuffer(chunk, encoding) ? this._decoder.write(chunk) : chunk,
|
||||
);
|
||||
callback();
|
||||
}
|
||||
|
||||
override _final(callback: () => void): void {
|
||||
this._parser.end(this._decoder.end());
|
||||
callback();
|
||||
}
|
||||
}
|
||||
27
node_modules/htmlparser2/src/__fixtures__/Documents/Atom_Example.xml
generated
vendored
Normal file
27
node_modules/htmlparser2/src/__fixtures__/Documents/Atom_Example.xml
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- http://en.wikipedia.org/wiki/Atom_%28standard%29 -->
|
||||
<feed xmlns="http://www.w3.org/2005/Atom">
|
||||
<title>Example Feed</title>
|
||||
<subtitle>A subtitle.</subtitle>
|
||||
<link href="http://example.org/feed/" rel="self" />
|
||||
<link href="http://example.org/" />
|
||||
<id>urn:uuid:60a76c80-d399-11d9-b91C-0003939e0af6</id>
|
||||
<updated>2003-12-13T18:30:02Z</updated>
|
||||
<author>
|
||||
<name>John Doe</name>
|
||||
<email>johndoe@example.com</email>
|
||||
</author>
|
||||
|
||||
<entry>
|
||||
<title>Atom-Powered Robots Run Amok</title>
|
||||
<link href="http://example.org/2003/12/13/atom03" />
|
||||
<link rel="alternate" type="text/html" href="http://example.org/2003/12/13/atom03.html"/>
|
||||
<link rel="edit" href="http://example.org/2003/12/13/atom03/edit"/>
|
||||
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
|
||||
<updated>2003-12-13T18:30:02Z</updated>
|
||||
<content type="html"><p>Some content.</p></content>
|
||||
</entry>
|
||||
|
||||
<entry/>
|
||||
|
||||
</feed>
|
||||
16
node_modules/htmlparser2/src/__fixtures__/Documents/Attributes.html
generated
vendored
Normal file
16
node_modules/htmlparser2/src/__fixtures__/Documents/Attributes.html
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Attributes test</title>
|
||||
</head>
|
||||
<body>
|
||||
<!-- Normal attributes -->
|
||||
<button id="test0" class="value0" title="value1">class="value0" title="value1"</button>
|
||||
|
||||
<!-- Attributes with no quotes or value -->
|
||||
<button id="test1" class=value2 disabled>class=value2 disabled</button>
|
||||
|
||||
<!-- Attributes with no space between them. No valid, but accepted by the browser -->
|
||||
<button id="test2" class="value4"title="value5">class="value4"title="value5"</button>
|
||||
</body>
|
||||
</html>
|
||||
1
node_modules/htmlparser2/src/__fixtures__/Documents/Basic.html
generated
vendored
Normal file
1
node_modules/htmlparser2/src/__fixtures__/Documents/Basic.html
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<!DOCTYPE html><html><title>The Title</title><body>Hello world</body></html>
|
||||
63
node_modules/htmlparser2/src/__fixtures__/Documents/RDF_Example.xml
generated
vendored
Normal file
63
node_modules/htmlparser2/src/__fixtures__/Documents/RDF_Example.xml
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://purl.org/rss/1.0/" xmlns:ev="http://purl.org/rss/1.0/modules/event/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:admin="http://webns.net/mvcb/">
|
||||
<channel rdf:about="https://github.com/fb55/htmlparser2/">
|
||||
<title>A title to parse and remember</title>
|
||||
<link>https://github.com/fb55/htmlparser2/</link>
|
||||
<description/>
|
||||
<dc:language>en-us</dc:language>
|
||||
<dc:rights>Copyright 2015 the authors</dc:rights>
|
||||
<dc:publisher>webmaster@thisisafakedoma.in</dc:publisher>
|
||||
<dc:creator>webmaster@thisisafakedoma.in</dc:creator>
|
||||
<dc:source>https://github.com/fb55/htmlparser2/</dc:source>
|
||||
<dc:title>A title to parse and remember</dc:title>
|
||||
<dc:type>Collection</dc:type>
|
||||
<syn:updateBase>2011-11-04T09:39:10-07:00</syn:updateBase>
|
||||
<syn:updateFrequency>4</syn:updateFrequency>
|
||||
<syn:updatePeriod>hourly</syn:updatePeriod>
|
||||
<items>
|
||||
<rdf:Seq>
|
||||
<rdf:li rdf:resource="http://somefakesite/path/to/something.html"/>
|
||||
</rdf:Seq>
|
||||
</items>
|
||||
</channel>
|
||||
<item rdf:about="http://somefakesite/path/to/something.html">
|
||||
<title><![CDATA[ Fast HTML Parsing ]]></title>
|
||||
<link>
|
||||
http://somefakesite/path/to/something.html
|
||||
</link>
|
||||
<description><![CDATA[
|
||||
Great test content<br>A link: <a href="http://github.com">Github</a>
|
||||
]]></description>
|
||||
<dc:date>2011-11-04T09:35:17-07:00</dc:date>
|
||||
<dc:language>en-us</dc:language>
|
||||
<dc:rights>Copyright 2015 the authors</dc:rights>
|
||||
<dc:source>
|
||||
http://somefakesite/path/to/something.html
|
||||
</dc:source>
|
||||
<dc:title><![CDATA[ Fast HTML Parsing ]]></dc:title>
|
||||
<dc:type>text</dc:type>
|
||||
<dcterms:issued>2011-11-04T09:35:17-07:00</dcterms:issued>
|
||||
</item>
|
||||
<item rdf:about="http://somefakesite/path/to/something-else.html">
|
||||
<title><![CDATA[
|
||||
This space intentionally left blank
|
||||
]]></title>
|
||||
<link>
|
||||
http://somefakesite/path/to/something-else.html
|
||||
</link>
|
||||
<description><![CDATA[
|
||||
The early bird gets the worm
|
||||
]]></description>
|
||||
<dc:date>2011-11-04T09:34:54-07:00</dc:date>
|
||||
<dc:language>en-us</dc:language>
|
||||
<dc:rights>Copyright 2015 the authors</dc:rights>
|
||||
<dc:source>
|
||||
http://somefakesite/path/to/something-else.html
|
||||
</dc:source>
|
||||
<dc:title><![CDATA[
|
||||
This space intentionally left blank
|
||||
]]></dc:title>
|
||||
<dc:type>text</dc:type>
|
||||
<dcterms:issued>2011-11-04T09:34:54-07:00</dcterms:issued>
|
||||
</item>
|
||||
</rdf:RDF>
|
||||
49
node_modules/htmlparser2/src/__fixtures__/Documents/RSS_Example.xml
generated
vendored
Normal file
49
node_modules/htmlparser2/src/__fixtures__/Documents/RSS_Example.xml
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
<?xml version="1.0"?>
|
||||
<!-- http://cyber.law.harvard.edu/rss/examples/rss2sample.xml -->
|
||||
<rss version="2.0">
|
||||
<channel>
|
||||
<title>Liftoff News</title>
|
||||
<link>http://liftoff.msfc.nasa.gov/</link>
|
||||
<description>Liftoff to Space Exploration.</description>
|
||||
<language>en-us</language>
|
||||
<pubDate>Tue, 10 Jun 2003 04:00:00 GMT</pubDate>
|
||||
|
||||
<lastBuildDate>Tue, 10 Jun 2003 09:41:01 GMT</lastBuildDate>
|
||||
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
|
||||
<generator>Weblog Editor 2.0</generator>
|
||||
<managingEditor>editor@example.com</managingEditor>
|
||||
<webMaster>webmaster@example.com</webMaster>
|
||||
<item>
|
||||
|
||||
<title>Star City</title>
|
||||
<link>http://liftoff.msfc.nasa.gov/news/2003/news-starcity.asp</link>
|
||||
<description>How do Americans get ready to work with Russians aboard the International Space Station? They take a crash course in culture, language and protocol at Russia's <a href="http://howe.iki.rssi.ru/GCTC/gctc_e.htm">Star City</a>.</description>
|
||||
<pubDate>Tue, 03 Jun 2003 09:39:21 GMT</pubDate>
|
||||
<guid>http://liftoff.msfc.nasa.gov/2003/06/03.html#item573</guid>
|
||||
|
||||
</item>
|
||||
<item>
|
||||
<description>Sky watchers in Europe, Asia, and parts of Alaska and Canada will experience a <a href="http://science.nasa.gov/headlines/y2003/30may_solareclipse.htm">partial eclipse of the Sun</a> on Saturday, May 31st.</description>
|
||||
<pubDate>Fri, 30 May 2003 11:06:42 GMT</pubDate>
|
||||
<guid>http://liftoff.msfc.nasa.gov/2003/05/30.html#item572</guid>
|
||||
|
||||
</item>
|
||||
<item>
|
||||
<title>The Engine That Does More</title>
|
||||
<link>http://liftoff.msfc.nasa.gov/news/2003/news-VASIMR.asp</link>
|
||||
<description>Before man travels to Mars, NASA hopes to design new engines that will let us fly through the Solar System more quickly. The proposed VASIMR engine would do that.</description>
|
||||
<pubDate>Tue, 27 May 2003 08:37:32 GMT</pubDate>
|
||||
<guid>http://liftoff.msfc.nasa.gov/2003/05/27.html#item571</guid>
|
||||
|
||||
</item>
|
||||
<item>
|
||||
<title>Astronauts' Dirty Laundry</title>
|
||||
<link>http://liftoff.msfc.nasa.gov/news/2003/news-laundry.asp</link>
|
||||
<description>Compared to earlier spacecraft, the International Space Station has many luxuries, but laundry facilities are not one of them. Instead, astronauts have other options.</description>
|
||||
<pubDate>Tue, 20 May 2003 08:56:02 GMT</pubDate>
|
||||
<guid>http://liftoff.msfc.nasa.gov/2003/05/20.html#item570</guid>
|
||||
|
||||
<media:content height="200" medium="image" url="https://picsum.photos/200" width="200"/>
|
||||
</item>
|
||||
</channel>
|
||||
</rss>
|
||||
19
node_modules/htmlparser2/src/__fixtures__/Documents/Svg.html
generated
vendored
Normal file
19
node_modules/htmlparser2/src/__fixtures__/Documents/Svg.html
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<title>SVG test</title>
|
||||
</head>
|
||||
<body>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<title>Test</title>
|
||||
<animate />
|
||||
<polygon />
|
||||
<g>
|
||||
<path>
|
||||
<title>x</title>
|
||||
<animate />
|
||||
</path>
|
||||
</g>
|
||||
</svg>
|
||||
</body>
|
||||
</html>
|
||||
89
node_modules/htmlparser2/src/__fixtures__/testHelper.ts
generated
vendored
Normal file
89
node_modules/htmlparser2/src/__fixtures__/testHelper.ts
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
import type { Parser, Handler } from "../Parser.js";
|
||||
|
||||
interface Event {
|
||||
$event: string;
|
||||
data: unknown[];
|
||||
startIndex: number;
|
||||
endIndex: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a handler that calls the supplied callback with simplified events on
|
||||
* completion.
|
||||
*
|
||||
* @internal
|
||||
* @param callback Function to call with all events.
|
||||
*/
|
||||
export function getEventCollector(
|
||||
callback: (error: Error | null, events?: Event[]) => void,
|
||||
): Partial<Handler> {
|
||||
const events: Event[] = [];
|
||||
let parser: Parser;
|
||||
|
||||
function handle(event: string, data: unknown[]): void {
|
||||
switch (event) {
|
||||
case "onerror": {
|
||||
callback(data[0] as Error);
|
||||
|
||||
break;
|
||||
}
|
||||
case "onend": {
|
||||
callback(null, events);
|
||||
|
||||
break;
|
||||
}
|
||||
case "onreset": {
|
||||
events.length = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
case "onparserinit": {
|
||||
parser = data[0] as Parser;
|
||||
|
||||
// Don't collect event
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
// eslint-disable-next-line unicorn/prefer-at
|
||||
const last = events[events.length - 1];
|
||||
|
||||
// Combine text nodes
|
||||
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
|
||||
if (event === "ontext" && last && last.$event === "text") {
|
||||
(last.data[0] as string) += data[0];
|
||||
last.endIndex = parser.endIndex;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// Remove `undefined`s from attribute responses, as they cannot be represented in JSON.
|
||||
if (event === "onattribute" && data[2] === undefined) {
|
||||
data.pop();
|
||||
}
|
||||
|
||||
if (!(parser.startIndex <= parser.endIndex)) {
|
||||
throw new Error(
|
||||
`Invalid start/end index ${parser.startIndex} > ${parser.endIndex}`,
|
||||
);
|
||||
}
|
||||
|
||||
events.push({
|
||||
$event: event.slice(2),
|
||||
startIndex: parser.startIndex,
|
||||
endIndex: parser.endIndex,
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new Proxy(
|
||||
{},
|
||||
{
|
||||
get:
|
||||
(_, event: string) =>
|
||||
(...data: unknown[]) =>
|
||||
handle(event, data),
|
||||
},
|
||||
);
|
||||
}
|
||||
103
node_modules/htmlparser2/src/__snapshots__/FeedHandler.spec.ts.snap
generated
vendored
Normal file
103
node_modules/htmlparser2/src/__snapshots__/FeedHandler.spec.ts.snap
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
||||
|
||||
exports[`parseFeed > (atomFeed) 1`] = `
|
||||
{
|
||||
"author": "johndoe@example.com",
|
||||
"description": "A subtitle.",
|
||||
"id": "urn:uuid:60a76c80-d399-11d9-b91C-0003939e0af6",
|
||||
"items": [
|
||||
{
|
||||
"description": "Some content.",
|
||||
"id": "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
|
||||
"link": "http://example.org/2003/12/13/atom03",
|
||||
"media": [],
|
||||
"pubDate": 2003-12-13T18:30:02.000Z,
|
||||
"title": "Atom-Powered Robots Run Amok",
|
||||
},
|
||||
{
|
||||
"media": [],
|
||||
},
|
||||
],
|
||||
"link": "http://example.org/feed/",
|
||||
"title": "Example Feed",
|
||||
"type": "atom",
|
||||
"updated": 2003-12-13T18:30:02.000Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`parseFeed > (rdfFeed) 1`] = `
|
||||
{
|
||||
"id": "",
|
||||
"items": [
|
||||
{
|
||||
"description": "Great test content<br>A link: <a href="http://github.com">Github</a>",
|
||||
"link": "http://somefakesite/path/to/something.html",
|
||||
"media": [],
|
||||
"pubDate": 2011-11-04T16:35:17.000Z,
|
||||
"title": "Fast HTML Parsing",
|
||||
},
|
||||
{
|
||||
"description": "The early bird gets the worm",
|
||||
"link": "http://somefakesite/path/to/something-else.html",
|
||||
"media": [],
|
||||
"pubDate": 2011-11-04T16:34:54.000Z,
|
||||
"title": "This space intentionally left blank",
|
||||
},
|
||||
],
|
||||
"link": "https://github.com/fb55/htmlparser2/",
|
||||
"title": "A title to parse and remember",
|
||||
"type": "rdf",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`parseFeed > (rssFeed) 1`] = `
|
||||
{
|
||||
"author": "editor@example.com",
|
||||
"description": "Liftoff to Space Exploration.",
|
||||
"id": "",
|
||||
"items": [
|
||||
{
|
||||
"description": "How do Americans get ready to work with Russians aboard the International Space Station? They take a crash course in culture, language and protocol at Russia's <a href="http://howe.iki.rssi.ru/GCTC/gctc_e.htm">Star City</a>.",
|
||||
"id": "http://liftoff.msfc.nasa.gov/2003/06/03.html#item573",
|
||||
"link": "http://liftoff.msfc.nasa.gov/news/2003/news-starcity.asp",
|
||||
"media": [],
|
||||
"pubDate": 2003-06-03T09:39:21.000Z,
|
||||
"title": "Star City",
|
||||
},
|
||||
{
|
||||
"description": "Sky watchers in Europe, Asia, and parts of Alaska and Canada will experience a <a href="http://science.nasa.gov/headlines/y2003/30may_solareclipse.htm">partial eclipse of the Sun</a> on Saturday, May 31st.",
|
||||
"id": "http://liftoff.msfc.nasa.gov/2003/05/30.html#item572",
|
||||
"media": [],
|
||||
"pubDate": 2003-05-30T11:06:42.000Z,
|
||||
},
|
||||
{
|
||||
"description": "Before man travels to Mars, NASA hopes to design new engines that will let us fly through the Solar System more quickly. The proposed VASIMR engine would do that.",
|
||||
"id": "http://liftoff.msfc.nasa.gov/2003/05/27.html#item571",
|
||||
"link": "http://liftoff.msfc.nasa.gov/news/2003/news-VASIMR.asp",
|
||||
"media": [],
|
||||
"pubDate": 2003-05-27T08:37:32.000Z,
|
||||
"title": "The Engine That Does More",
|
||||
},
|
||||
{
|
||||
"description": "Compared to earlier spacecraft, the International Space Station has many luxuries, but laundry facilities are not one of them. Instead, astronauts have other options.",
|
||||
"id": "http://liftoff.msfc.nasa.gov/2003/05/20.html#item570",
|
||||
"link": "http://liftoff.msfc.nasa.gov/news/2003/news-laundry.asp",
|
||||
"media": [
|
||||
{
|
||||
"height": 200,
|
||||
"isDefault": false,
|
||||
"medium": "image",
|
||||
"url": "https://picsum.photos/200",
|
||||
"width": 200,
|
||||
},
|
||||
],
|
||||
"pubDate": 2003-05-20T08:56:02.000Z,
|
||||
"title": "Astronauts' Dirty Laundry",
|
||||
},
|
||||
],
|
||||
"link": "http://liftoff.msfc.nasa.gov/",
|
||||
"title": "Liftoff News",
|
||||
"type": "rss",
|
||||
"updated": 2003-06-10T09:41:01.000Z,
|
||||
}
|
||||
`;
|
||||
3749
node_modules/htmlparser2/src/__snapshots__/Parser.events.spec.ts.snap
generated
vendored
Normal file
3749
node_modules/htmlparser2/src/__snapshots__/Parser.events.spec.ts.snap
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
834
node_modules/htmlparser2/src/__snapshots__/Tokenizer.spec.ts.snap
generated
vendored
Normal file
834
node_modules/htmlparser2/src/__snapshots__/Tokenizer.spec.ts.snap
generated
vendored
Normal file
@@ -0,0 +1,834 @@
|
||||
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
||||
|
||||
exports[`Tokenizer > should correctly mark attributes > for double quotes attribute 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onattribname",
|
||||
5,
|
||||
8,
|
||||
],
|
||||
[
|
||||
"onattribdata",
|
||||
10,
|
||||
11,
|
||||
],
|
||||
[
|
||||
"onattribend",
|
||||
3,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should correctly mark attributes > for no quotes attribute 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onattribname",
|
||||
5,
|
||||
8,
|
||||
],
|
||||
[
|
||||
"onattribdata",
|
||||
9,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onattribend",
|
||||
1,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should correctly mark attributes > for no value attribute 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onattribname",
|
||||
5,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onattribend",
|
||||
0,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should correctly mark attributes > for single quotes attribute 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onattribname",
|
||||
5,
|
||||
8,
|
||||
],
|
||||
[
|
||||
"onattribdata",
|
||||
10,
|
||||
11,
|
||||
],
|
||||
[
|
||||
"onattribend",
|
||||
2,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should handle entities > for XML entities 1`] = `
|
||||
[
|
||||
[
|
||||
"ontextentity",
|
||||
38,
|
||||
5,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
62,
|
||||
9,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
9,
|
||||
13,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
60,
|
||||
17,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
17,
|
||||
23,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
97,
|
||||
29,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
29,
|
||||
34,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
99,
|
||||
39,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
39,
|
||||
49,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should handle entities > for entities in attributes (#276) 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onattribname",
|
||||
5,
|
||||
8,
|
||||
],
|
||||
[
|
||||
"onattribdata",
|
||||
10,
|
||||
24,
|
||||
],
|
||||
[
|
||||
"onattribentity",
|
||||
8465,
|
||||
],
|
||||
[
|
||||
"onattribdata",
|
||||
31,
|
||||
41,
|
||||
],
|
||||
[
|
||||
"onattribend",
|
||||
3,
|
||||
42,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
43,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
44,
|
||||
58,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
8465,
|
||||
65,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
65,
|
||||
75,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should handle entities > for multi-byte entities 1`] = `
|
||||
[
|
||||
[
|
||||
"ontextentity",
|
||||
8807,
|
||||
21,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
824,
|
||||
21,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should handle entities > for trailing legacy entity 1`] = `
|
||||
[
|
||||
[
|
||||
"ontextentity",
|
||||
10801,
|
||||
10,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
215,
|
||||
16,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
16,
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should not break after special tag followed by an entity > for normal special tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
6,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
7,
|
||||
10,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
12,
|
||||
17,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
39,
|
||||
24,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
25,
|
||||
27,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
28,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should not break after special tag followed by an entity > for self-closing special tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
8,
|
||||
],
|
||||
[
|
||||
"ontextentity",
|
||||
39,
|
||||
15,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
16,
|
||||
18,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should not lose data when pausing 1`] = `
|
||||
[
|
||||
[
|
||||
"ontextentity",
|
||||
38,
|
||||
5,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
5,
|
||||
12,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support self-closing special tags > for self-closing script tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
7,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
9,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
11,
|
||||
14,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
14,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
17,
|
||||
20,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support self-closing special tags > for self-closing style tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
8,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
10,
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
16,
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support self-closing special tags > for self-closing textarea tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
9,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
11,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
13,
|
||||
16,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
16,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
19,
|
||||
22,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support self-closing special tags > for self-closing title tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
8,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
10,
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
13,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
16,
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support self-closing special tags > for self-closing xmp tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onselfclosingtag",
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
8,
|
||||
11,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
11,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
14,
|
||||
17,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support standard special tags > for normal script tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
7,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
7,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
10,
|
||||
16,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
18,
|
||||
21,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
21,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
24,
|
||||
27,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support standard special tags > for normal sitle tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
9,
|
||||
14,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
16,
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
22,
|
||||
25,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support standard special tags > for normal style tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
9,
|
||||
14,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
16,
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
22,
|
||||
25,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support standard special tags > for normal textarea tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
9,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
9,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
12,
|
||||
20,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
22,
|
||||
25,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
25,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
28,
|
||||
31,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should support standard special tags > for normal xmp tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
7,
|
||||
10,
|
||||
],
|
||||
[
|
||||
"onopentagname",
|
||||
12,
|
||||
15,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
15,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
18,
|
||||
21,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should treat html inside special tags as text > for div inside script tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
7,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
7,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
8,
|
||||
19,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
21,
|
||||
27,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should treat html inside special tags as text > for div inside style tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
6,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
7,
|
||||
18,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
20,
|
||||
25,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should treat html inside special tags as text > for div inside textarea tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
9,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
9,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
10,
|
||||
21,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
23,
|
||||
31,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should treat html inside special tags as text > for div inside title tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
6,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
6,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
7,
|
||||
18,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
20,
|
||||
25,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Tokenizer > should treat html inside special tags as text > for div inside xmp tag 1`] = `
|
||||
[
|
||||
[
|
||||
"onopentagname",
|
||||
1,
|
||||
4,
|
||||
],
|
||||
[
|
||||
"onopentagend",
|
||||
4,
|
||||
],
|
||||
[
|
||||
"ontext",
|
||||
5,
|
||||
16,
|
||||
],
|
||||
[
|
||||
"onclosetag",
|
||||
18,
|
||||
21,
|
||||
],
|
||||
[
|
||||
"onend",
|
||||
],
|
||||
]
|
||||
`;
|
||||
5809
node_modules/htmlparser2/src/__snapshots__/WritableStream.spec.ts.snap
generated
vendored
Normal file
5809
node_modules/htmlparser2/src/__snapshots__/WritableStream.spec.ts.snap
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
87
node_modules/htmlparser2/src/__snapshots__/index.spec.ts.snap
generated
vendored
Normal file
87
node_modules/htmlparser2/src/__snapshots__/index.spec.ts.snap
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
||||
|
||||
exports[`Index > createDocumentStream 1`] = `
|
||||
Document {
|
||||
"children": [
|
||||
&This is text,
|
||||
<!-- and comments -->,
|
||||
<tags />,
|
||||
],
|
||||
"endIndex": null,
|
||||
"next": null,
|
||||
"parent": null,
|
||||
"prev": null,
|
||||
"startIndex": null,
|
||||
"type": "root",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`Index > createDomStream 1`] = `
|
||||
[
|
||||
&This is text,
|
||||
<!-- and comments -->,
|
||||
<tags />,
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Index > parseDOM 1`] = `
|
||||
[
|
||||
<a
|
||||
foo=""
|
||||
>
|
||||
<b>
|
||||
<c>
|
||||
ProcessingInstruction {
|
||||
"data": "?foo",
|
||||
"endIndex": null,
|
||||
"name": "?foo",
|
||||
"next": Yay!,
|
||||
"parent": <c>
|
||||
[Circular]
|
||||
Yay!
|
||||
</c>,
|
||||
"prev": null,
|
||||
"startIndex": null,
|
||||
"type": "directive",
|
||||
}
|
||||
Yay!
|
||||
</c>
|
||||
</b>
|
||||
</a>,
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`Index > parseDocument 1`] = `
|
||||
Document {
|
||||
"children": [
|
||||
<a
|
||||
foo=""
|
||||
>
|
||||
<b>
|
||||
<c>
|
||||
ProcessingInstruction {
|
||||
"data": "?foo",
|
||||
"endIndex": null,
|
||||
"name": "?foo",
|
||||
"next": Yay!,
|
||||
"parent": <c>
|
||||
[Circular]
|
||||
Yay!
|
||||
</c>,
|
||||
"prev": null,
|
||||
"startIndex": null,
|
||||
"type": "directive",
|
||||
}
|
||||
Yay!
|
||||
</c>
|
||||
</b>
|
||||
</a>,
|
||||
],
|
||||
"endIndex": null,
|
||||
"next": null,
|
||||
"parent": null,
|
||||
"prev": null,
|
||||
"startIndex": null,
|
||||
"type": "root",
|
||||
}
|
||||
`;
|
||||
79
node_modules/htmlparser2/src/index.spec.ts
generated
vendored
Normal file
79
node_modules/htmlparser2/src/index.spec.ts
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
parseDocument,
|
||||
parseDOM,
|
||||
createDocumentStream,
|
||||
createDomStream,
|
||||
DomHandler,
|
||||
DefaultHandler,
|
||||
type Parser,
|
||||
} from "./index.js";
|
||||
import { Element } from "domhandler";
|
||||
|
||||
// Add an `attributes` prop to the Element for now, to make it possible for Jest to render DOM nodes.
|
||||
Object.defineProperty(Element.prototype, "attributes", {
|
||||
get() {
|
||||
return Object.keys(this.attribs).map((name) => ({
|
||||
name,
|
||||
value: this.attribs[name],
|
||||
}));
|
||||
},
|
||||
configurable: true,
|
||||
enumerable: false,
|
||||
});
|
||||
|
||||
describe("Index", () => {
|
||||
it("parseDocument", () => {
|
||||
const dom = parseDocument("<a foo><b><c><?foo>Yay!");
|
||||
expect(dom).toMatchSnapshot();
|
||||
});
|
||||
|
||||
it("parseDOM", () => {
|
||||
const dom = parseDOM("<a foo><b><c><?foo>Yay!");
|
||||
expect(dom).toMatchSnapshot();
|
||||
});
|
||||
|
||||
it("createDocumentStream", () => {
|
||||
let documentStream!: Parser;
|
||||
|
||||
const documentPromise = new Promise(
|
||||
(resolve, reject) =>
|
||||
(documentStream = createDocumentStream((error, dom) =>
|
||||
error ? reject(error) : resolve(dom),
|
||||
)),
|
||||
);
|
||||
|
||||
for (const c of "&This is text<!-- and comments --><tags>") {
|
||||
documentStream.write(c);
|
||||
}
|
||||
|
||||
documentStream.end();
|
||||
|
||||
return expect(documentPromise).resolves.toMatchSnapshot();
|
||||
});
|
||||
|
||||
it("createDomStream", () => {
|
||||
let domStream!: Parser;
|
||||
|
||||
const domPromise = new Promise(
|
||||
(resolve, reject) =>
|
||||
(domStream = createDomStream((error, dom) =>
|
||||
error ? reject(error) : resolve(dom),
|
||||
)),
|
||||
);
|
||||
|
||||
for (const c of "&This is text<!-- and comments --><tags>") {
|
||||
domStream.write(c);
|
||||
}
|
||||
|
||||
domStream.end();
|
||||
|
||||
return expect(domPromise).resolves.toMatchSnapshot();
|
||||
});
|
||||
|
||||
describe("API", () => {
|
||||
it("should export the appropriate APIs", () => {
|
||||
expect(DomHandler).toEqual(DefaultHandler);
|
||||
});
|
||||
});
|
||||
});
|
||||
115
node_modules/htmlparser2/src/index.ts
generated
vendored
Normal file
115
node_modules/htmlparser2/src/index.ts
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
import { Parser, type ParserOptions } from "./Parser.js";
|
||||
export type { Handler, ParserOptions } from "./Parser.js";
|
||||
export { Parser } from "./Parser.js";
|
||||
|
||||
import {
|
||||
DomHandler,
|
||||
type DomHandlerOptions,
|
||||
type ChildNode,
|
||||
type Element,
|
||||
type Document,
|
||||
} from "domhandler";
|
||||
|
||||
export {
|
||||
DomHandler,
|
||||
// Old name for DomHandler
|
||||
DomHandler as DefaultHandler,
|
||||
type DomHandlerOptions,
|
||||
} from "domhandler";
|
||||
|
||||
export type Options = ParserOptions & DomHandlerOptions;
|
||||
|
||||
// Helper methods
|
||||
|
||||
/**
|
||||
* Parses the data, returns the resulting document.
|
||||
*
|
||||
* @param data The data that should be parsed.
|
||||
* @param options Optional options for the parser and DOM handler.
|
||||
*/
|
||||
export function parseDocument(data: string, options?: Options): Document {
|
||||
const handler = new DomHandler(undefined, options);
|
||||
new Parser(handler, options).end(data);
|
||||
return handler.root;
|
||||
}
|
||||
/**
|
||||
* Parses data, returns an array of the root nodes.
|
||||
*
|
||||
* Note that the root nodes still have a `Document` node as their parent.
|
||||
* Use `parseDocument` to get the `Document` node instead.
|
||||
*
|
||||
* @param data The data that should be parsed.
|
||||
* @param options Optional options for the parser and DOM handler.
|
||||
* @deprecated Use `parseDocument` instead.
|
||||
*/
|
||||
export function parseDOM(data: string, options?: Options): ChildNode[] {
|
||||
return parseDocument(data, options).children;
|
||||
}
|
||||
/**
|
||||
* Creates a parser instance, with an attached DOM handler.
|
||||
*
|
||||
* @param callback A callback that will be called once parsing has been completed, with the resulting document.
|
||||
* @param options Optional options for the parser and DOM handler.
|
||||
* @param elementCallback An optional callback that will be called every time a tag has been completed inside of the DOM.
|
||||
*/
|
||||
export function createDocumentStream(
|
||||
callback: (error: Error | null, document: Document) => void,
|
||||
options?: Options,
|
||||
elementCallback?: (element: Element) => void,
|
||||
): Parser {
|
||||
const handler: DomHandler = new DomHandler(
|
||||
(error: Error | null) => callback(error, handler.root),
|
||||
options,
|
||||
elementCallback,
|
||||
);
|
||||
return new Parser(handler, options);
|
||||
}
|
||||
/**
|
||||
* Creates a parser instance, with an attached DOM handler.
|
||||
*
|
||||
* @param callback A callback that will be called once parsing has been completed, with an array of root nodes.
|
||||
* @param options Optional options for the parser and DOM handler.
|
||||
* @param elementCallback An optional callback that will be called every time a tag has been completed inside of the DOM.
|
||||
* @deprecated Use `createDocumentStream` instead.
|
||||
*/
|
||||
export function createDomStream(
|
||||
callback: (error: Error | null, dom: ChildNode[]) => void,
|
||||
options?: Options,
|
||||
elementCallback?: (element: Element) => void,
|
||||
): Parser {
|
||||
const handler = new DomHandler(callback, options, elementCallback);
|
||||
return new Parser(handler, options);
|
||||
}
|
||||
|
||||
export {
|
||||
default as Tokenizer,
|
||||
type Callbacks as TokenizerCallbacks,
|
||||
QuoteType,
|
||||
} from "./Tokenizer.js";
|
||||
|
||||
/*
|
||||
* All of the following exports exist for backwards-compatibility.
|
||||
* They should probably be removed eventually.
|
||||
*/
|
||||
export * as ElementType from "domelementtype";
|
||||
|
||||
import { getFeed, type Feed } from "domutils";
|
||||
|
||||
export { getFeed, type Feed } from "domutils";
|
||||
|
||||
const parseFeedDefaultOptions = { xmlMode: true };
|
||||
|
||||
/**
|
||||
* Parse a feed.
|
||||
*
|
||||
* @param feed The feed that should be parsed, as a string.
|
||||
* @param options Optionally, options for parsing. When using this, you should set `xmlMode` to `true`.
|
||||
*/
|
||||
export function parseFeed(
|
||||
feed: string,
|
||||
options: Options = parseFeedDefaultOptions,
|
||||
): Feed | null {
|
||||
return getFeed(parseDOM(feed, options));
|
||||
}
|
||||
|
||||
export * as DomUtils from "domutils";
|
||||
Reference in New Issue
Block a user