TypeScript/tests/cases/unittests/services/colorization.ts

449 lines
20 KiB
TypeScript
Raw Normal View History

2014-10-17 03:13:26 +02:00
/// <reference path="..\..\..\..\src\harness\external\mocha.d.ts" />
/// <reference path="..\..\..\..\src\harness\harnessLanguageService.ts" />
2014-08-09 01:45:10 +02:00
interface ClassificationEntry {
value: any;
classification: ts.TokenClass;
position?: number;
2014-08-09 01:45:10 +02:00
}
2014-08-08 07:21:12 +02:00
describe("Colorization", function () {
2015-02-09 18:19:50 +01:00
// Use the shim adapter to ensure test coverage of the shim layer for the classifier
const languageServiceAdapter = new Harness.LanguageService.ShimLanguageServiceAdapter(/*preprocessToResolve*/ false);
const classifier = languageServiceAdapter.getClassifier();
function getEntryAtPosition(result: ts.ClassificationResult, position: number) {
let entryPosition = 0;
for (let i = 0, n = result.entries.length; i < n; i++) {
const entry = result.entries[i];
if (entryPosition === position) {
return entry;
}
entryPosition += entry.length;
2014-08-09 01:45:10 +02:00
}
return undefined;
}
2014-07-13 01:04:16 +02:00
function punctuation(text: string, position?: number) { return createClassification(text, ts.TokenClass.Punctuation, position); }
function keyword(text: string, position?: number) { return createClassification(text, ts.TokenClass.Keyword, position); }
function operator(text: string, position?: number) { return createClassification(text, ts.TokenClass.Operator, position); }
function comment(text: string, position?: number) { return createClassification(text, ts.TokenClass.Comment, position); }
function whitespace(text: string, position?: number) { return createClassification(text, ts.TokenClass.Whitespace, position); }
function identifier(text: string, position?: number) { return createClassification(text, ts.TokenClass.Identifier, position); }
function numberLiteral(text: string, position?: number) { return createClassification(text, ts.TokenClass.NumberLiteral, position); }
function stringLiteral(text: string, position?: number) { return createClassification(text, ts.TokenClass.StringLiteral, position); }
function finalEndOfLineState(value: number): ClassificationEntry { return { value: value, classification: undefined, position: 0 }; }
function createClassification(text: string, tokenClass: ts.TokenClass, position?: number): ClassificationEntry {
return {
value: text,
classification: tokenClass,
position: position,
};
}
2014-08-09 01:45:10 +02:00
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
function testLexicalClassification(text: string, initialEndOfLineState: ts.EndOfLineState, ...expectedEntries: ClassificationEntry[]): void {
const result = classifier.getClassificationsForLine(text, initialEndOfLineState, /*syntacticClassifierAbsent*/ false);
2014-08-09 01:45:10 +02:00
for (let i = 0, n = expectedEntries.length; i < n; i++) {
const expectedEntry = expectedEntries[i];
2014-08-09 01:45:10 +02:00
if (expectedEntry.classification === undefined) {
assert.equal(result.finalLexState, expectedEntry.value, "final endOfLineState does not match expected.");
2014-08-09 01:45:10 +02:00
}
else {
const actualEntryPosition = expectedEntry.position !== undefined ? expectedEntry.position : text.indexOf(expectedEntry.value);
2014-08-09 01:45:10 +02:00
assert(actualEntryPosition >= 0, "token: '" + expectedEntry.value + "' does not exit in text: '" + text + "'.");
const actualEntry = getEntryAtPosition(result, actualEntryPosition);
2014-08-09 01:45:10 +02:00
assert(actualEntry, "Could not find classification entry for '" + expectedEntry.value + "' at position: " + actualEntryPosition);
assert.equal(actualEntry.classification, expectedEntry.classification, "Classification class does not match expected. Expected: " + ts.TokenClass[expectedEntry.classification] + ", Actual: " + ts.TokenClass[actualEntry.classification]);
2014-09-23 21:23:33 +02:00
assert.equal(actualEntry.length, expectedEntry.value.length, "Classification length does not match expected. Expected: " + ts.TokenClass[expectedEntry.value.length] + ", Actual: " + ts.TokenClass[actualEntry.length]);
2014-08-09 01:45:10 +02:00
}
}
}
2014-07-13 01:04:16 +02:00
2014-08-09 01:45:10 +02:00
describe("test getClassifications", function () {
it("Returns correct token classes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("var x: string = \"foo\"; //Hello",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
keyword("var"),
whitespace(" "),
identifier("x"),
punctuation(":"),
keyword("string"),
operator("="),
stringLiteral("\"foo\""),
comment("//Hello"),
punctuation(";"));
2014-07-13 01:04:16 +02:00
});
2014-09-23 23:14:27 +02:00
it("correctly classifies a comment after a divide operator", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("1 / 2 // comment",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
numberLiteral("1"),
whitespace(" "),
operator("/"),
numberLiteral("2"),
comment("// comment"));
2014-07-13 01:04:16 +02:00
});
2014-09-23 23:14:27 +02:00
it("correctly classifies a literal after a divide operator", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("1 / 2, 3 / 4",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
numberLiteral("1"),
whitespace(" "),
operator("/"),
numberLiteral("2"),
numberLiteral("3"),
numberLiteral("4"),
operator(","));
2014-07-13 01:04:16 +02:00
});
it("correctly classifies a multi-line string with one backslash", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("'line1\\",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
stringLiteral("'line1\\"),
finalEndOfLineState(ts.EndOfLineState.InSingleQuoteStringLiteral));
2014-07-13 01:04:16 +02:00
});
it("correctly classifies a multi-line string with three backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("'line1\\\\\\",
ts.EndOfLineState.None,
stringLiteral("'line1\\\\\\"),
finalEndOfLineState(ts.EndOfLineState.InSingleQuoteStringLiteral));
});
it("correctly classifies an unterminated single-line string with no backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("'line1",
ts.EndOfLineState.None,
stringLiteral("'line1"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("correctly classifies an unterminated single-line string with two backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("'line1\\\\",
ts.EndOfLineState.None,
stringLiteral("'line1\\\\"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("correctly classifies an unterminated single-line string with four backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("'line1\\\\\\\\",
ts.EndOfLineState.None,
stringLiteral("'line1\\\\\\\\"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("correctly classifies the continuing line of a multi-line string ending in one backslash", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("\\",
ts.EndOfLineState.InDoubleQuoteStringLiteral,
stringLiteral("\\"),
finalEndOfLineState(ts.EndOfLineState.InDoubleQuoteStringLiteral));
});
it("correctly classifies the continuing line of a multi-line string ending in three backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("\\",
2014-08-09 01:45:10 +02:00
ts.EndOfLineState.InDoubleQuoteStringLiteral,
stringLiteral("\\"),
finalEndOfLineState(ts.EndOfLineState.InDoubleQuoteStringLiteral));
2014-07-13 01:04:16 +02:00
});
it("correctly classifies the last line of an unterminated multi-line string ending in no backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification(" ",
ts.EndOfLineState.InDoubleQuoteStringLiteral,
stringLiteral(" "),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("correctly classifies the last line of an unterminated multi-line string ending in two backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("\\\\",
ts.EndOfLineState.InDoubleQuoteStringLiteral,
stringLiteral("\\\\"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("correctly classifies the last line of an unterminated multi-line string ending in four backslashes", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("\\\\\\\\",
ts.EndOfLineState.InDoubleQuoteStringLiteral,
stringLiteral("\\\\\\\\"),
finalEndOfLineState(ts.EndOfLineState.None));
});
2014-09-23 23:14:27 +02:00
it("correctly classifies the last line of a multi-line string", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("'",
2014-08-09 01:45:10 +02:00
ts.EndOfLineState.InSingleQuoteStringLiteral,
stringLiteral("'"),
finalEndOfLineState(ts.EndOfLineState.None));
2014-07-13 01:04:16 +02:00
});
2014-09-23 23:14:27 +02:00
it("correctly classifies an unterminated multiline comment", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("/*",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
comment("/*"),
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
});
it("correctly classifies the termination of a multiline comment", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification(" */ ",
ts.EndOfLineState.InMultiLineCommentTrivia,
comment(" */"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("correctly classifies the continuation of a multiline comment", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("LOREM IPSUM DOLOR ",
ts.EndOfLineState.InMultiLineCommentTrivia,
comment("LOREM IPSUM DOLOR "),
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
});
it("correctly classifies an unterminated multiline comment on a line ending in '/*/'", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification(" /*/",
ts.EndOfLineState.None,
comment("/*/"),
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
});
2014-09-23 23:14:27 +02:00
it("correctly classifies an unterminated multiline comment with trailing space", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("/* ",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
comment("/* "),
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
});
2014-09-23 23:14:27 +02:00
it("correctly classifies a keyword after a dot", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("a.var",
ts.EndOfLineState.None,
2014-08-09 01:45:10 +02:00
identifier("var"));
});
it("correctly classifies a string literal after a dot", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("a.\"var\"",
ts.EndOfLineState.None,
stringLiteral("\"var\""));
});
it("correctly classifies a keyword after a dot separated by comment trivia", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("a./*hello world*/ var",
ts.EndOfLineState.None,
identifier("a"),
punctuation("."),
comment("/*hello world*/"),
identifier("var"));
});
it("classifies a property access with whitespace around the dot", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification(" x .\tfoo ()",
ts.EndOfLineState.None,
identifier("x"),
identifier("foo"));
});
it("classifies a keyword after a dot on previous line", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("var",
ts.EndOfLineState.None,
keyword("var"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies multiple keywords properly", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("public static",
ts.EndOfLineState.None,
keyword("public"),
keyword("static"),
finalEndOfLineState(ts.EndOfLineState.None));
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("public var",
ts.EndOfLineState.None,
keyword("public"),
identifier("var"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies a single line no substitution template string correctly", () => {
testLexicalClassification("`number number public string`",
ts.EndOfLineState.None,
stringLiteral("`number number public string`"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies substitution parts of a template string correctly", () => {
testLexicalClassification("`number '${ 1 + 1 }' string '${ 'hello' }'`",
ts.EndOfLineState.None,
stringLiteral("`number '${"),
numberLiteral("1"),
operator("+"),
numberLiteral("1"),
stringLiteral("}' string '${"),
stringLiteral("'hello'"),
stringLiteral("}'`"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies an unterminated no substitution template string correctly", () => {
testLexicalClassification("`hello world",
ts.EndOfLineState.None,
stringLiteral("`hello world"),
finalEndOfLineState(ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate));
});
it("classifies the entire line of an unterminated multiline no-substitution/head template", () => {
testLexicalClassification("...",
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
stringLiteral("..."),
finalEndOfLineState(ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate));
});
it("classifies the entire line of an unterminated multiline template middle/end", () => {
testLexicalClassification("...",
ts.EndOfLineState.InTemplateMiddleOrTail,
stringLiteral("..."),
finalEndOfLineState(ts.EndOfLineState.InTemplateMiddleOrTail));
});
it("classifies a termination of a multiline template head", () => {
testLexicalClassification("...${",
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
stringLiteral("...${"),
finalEndOfLineState(ts.EndOfLineState.InTemplateSubstitutionPosition));
});
it("classifies the termination of a multiline no substitution template", () => {
testLexicalClassification("...`",
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
stringLiteral("...`"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies the substitution parts and middle/tail of a multiline template string", () => {
testLexicalClassification("${ 1 + 1 }...`",
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
stringLiteral("${"),
numberLiteral("1"),
operator("+"),
numberLiteral("1"),
stringLiteral("}...`"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies a template middle and propagates the end of line state", () => {
testLexicalClassification("${ 1 + 1 }...`",
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
stringLiteral("${"),
numberLiteral("1"),
operator("+"),
numberLiteral("1"),
stringLiteral("}...`"),
finalEndOfLineState(ts.EndOfLineState.None));
});
it("classifies substitution expressions with curly braces appropriately", () => {
let pos = 0;
let lastLength = 0;
testLexicalClassification("...${ () => { } } ${ { x: `1` } }...`",
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
stringLiteral(track("...${"), pos),
punctuation(track(" ", "("), pos),
punctuation(track(")"), pos),
punctuation(track(" ", "=>"), pos),
punctuation(track(" ", "{"), pos),
punctuation(track(" ", "}"), pos),
stringLiteral(track(" ", "} ${"), pos),
punctuation(track(" ", "{"), pos),
identifier(track(" ", "x"), pos),
punctuation(track(":"), pos),
stringLiteral(track(" ", "`1`"), pos),
punctuation(track(" ", "}"), pos),
stringLiteral(track(" ", "}...`"), pos),
finalEndOfLineState(ts.EndOfLineState.None));
// Adjusts 'pos' by accounting for the length of each portion of the string,
// but only return the last given string
function track(...vals: string[]): string {
for (let i = 0, n = vals.length; i < n; i++) {
pos += lastLength;
lastLength = vals[i].length;
}
return ts.lastOrUndefined(vals);
}
});
it("classifies partially written generics correctly.", function () {
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("Foo<number",
ts.EndOfLineState.None,
identifier("Foo"),
operator("<"),
identifier("number"),
finalEndOfLineState(ts.EndOfLineState.None));
// Looks like a cast, should get classified as a keyword.
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("<number",
ts.EndOfLineState.None,
operator("<"),
keyword("number"),
finalEndOfLineState(ts.EndOfLineState.None));
// handle nesting properly.
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification("Foo<Foo,Foo<number",
ts.EndOfLineState.None,
identifier("Foo"),
operator("<"),
identifier("Foo"),
2014-10-20 22:01:21 +02:00
operator(","),
identifier("Foo"),
operator("<"),
identifier("number"),
finalEndOfLineState(ts.EndOfLineState.None));
2014-12-12 02:04:21 +01:00
});
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
it("LexicallyClassifiesConflictTokens", () => {
2014-12-12 02:04:21 +01:00
// Test conflict markers.
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification(
2014-12-12 02:04:21 +01:00
"class C {\r\n\
<<<<<<< HEAD\r\n\
v = 1;\r\n\
=======\r\n\
v = 2;\r\n\
>>>>>>> Branch - a\r\n\
}",
ts.EndOfLineState.None,
2014-12-12 02:04:21 +01:00
keyword("class"),
identifier("C"),
punctuation("{"),
comment("<<<<<<< HEAD"),
identifier("v"),
operator("="),
numberLiteral("1"),
punctuation(";"),
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
comment("=======\r\n v = 2;\r\n"),
2014-12-12 02:04:21 +01:00
comment(">>>>>>> Branch - a"),
punctuation("}"),
finalEndOfLineState(ts.EndOfLineState.None));
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
testLexicalClassification(
"<<<<<<< HEAD\r\n\
class C { }\r\n\
=======\r\n\
class D { }\r\n\
>>>>>>> Branch - a\r\n",
ts.EndOfLineState.None,
Provide better error recovery when we encounter merge markers in the source. Previously we would just treat each merge marker as trivia and then continue scanning and parsing like normal. This worked well in some scenarios, but fell down in others like: ``` class C { public foo() { <<<<<<< HEAD this.bar(); } ======= this.baz(); } >>>>>>> Branch public bar() { } } ``` The problem stems from the previous approach trying to incorporate both branches of the merge into the final tree. In a case like this, that approach breaks down entirely. The the parser ends up seeing the close curly in both included sections, and it considers the class finished. Then, it starts erroring when it encounters "public bar()". The fix is to only incorporate one of these sections into the tree. Specifically, we only include the first section. The second sectoin is treated like trivia and does not affect the parse at all. To make the experience more pleasant we do *lexically* classify the second section. That way it does not appear as just plain black text in the editor. Instead, it will have appropriate lexicla classifications for keywords, literals, comments, operators, punctuation, etc. However, any syntactic or semantic feature will not work in the second block due to this being trivia as far as any feature is concerned. This experience is still much better than what we had originally (where merge markers would absolutely) destroy the parse tree. And it is better than what we checked in last week, which could easily create a borked tree for many types of merges. Now, almost all merges should still leave the tree in good shape. All LS features will work in the first section, and lexical classification will work in the second.
2014-12-19 04:18:13 +01:00
comment("<<<<<<< HEAD"),
keyword("class"),
identifier("C"),
punctuation("{"),
punctuation("}"),
comment("=======\r\nclass D { }\r\n"),
comment(">>>>>>> Branch - a"),
finalEndOfLineState(ts.EndOfLineState.None));
});
2015-02-20 21:15:37 +01:00
it("'of' keyword", function () {
testLexicalClassification("for (var of of of) { }",
ts.EndOfLineState.None,
2015-02-20 21:15:37 +01:00
keyword("for"),
punctuation("("),
keyword("var"),
keyword("of"),
keyword("of"),
keyword("of"),
punctuation(")"),
punctuation("{"),
punctuation("}"),
finalEndOfLineState(ts.EndOfLineState.None));
2015-02-20 21:15:37 +01:00
});
});
});