-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
36 changed files
with
1,394 additions
and
110 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,10 @@ | ||
node_modules | ||
.idea | ||
dist | ||
src/**/*.d.ts.map | ||
src/**/*.d.ts | ||
src/**/*.js.map | ||
src/**/*.js | ||
lib | ||
.nyc_output | ||
Develop |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,203 @@ | ||
import {expect} from 'chai' | ||
import 'mocha' | ||
import NumberToken from "../tokens/layer-0/number-token"; | ||
import WordToken from "../tokens/layer-0/word-token"; | ||
import CommentToken from "../tokens/layer-0/comment-token"; | ||
import OperatorToken from "../tokens/layer-0/operator-token"; | ||
import PeriodToken from "../tokens/layer-0/period-token"; | ||
import LineEndToken from "../tokens/layer-0/line-end-token"; | ||
import SpaceToken from "../tokens/layer-0/space-token"; | ||
import WildcardToken from "../tokens/layer-0/wildcard-token"; | ||
import CommentStartToken from "../tokens/layer-0/comment-start-token"; | ||
import TokenCollection from "../tokens/collection"; | ||
import CalendarRoundToken from "../tokens/layer-1/calendar-round-token"; | ||
import LongCountToken from "../tokens/layer-1/long-count-token"; | ||
import {IToken} from "../tokens/i-token"; | ||
import Layer0Parser from "../parsers/layer-0-parser"; | ||
|
||
const NT = (n: number) => new NumberToken(n) | ||
const WT = (w: string) => new WordToken(w) | ||
const CT = (c: string) => new CommentToken(c) | ||
const OT = (o: string) => new OperatorToken(o) | ||
const PT = new PeriodToken() | ||
const LET = new LineEndToken() | ||
const ST = new SpaceToken() | ||
const WCT = new WildcardToken() | ||
const CST = new CommentStartToken() | ||
|
||
|
||
describe('layer-1 parser', () => { | ||
|
||
describe('should parse operators', () => { | ||
const looseOperations: [string, IToken[]][] = [ | ||
[ | ||
'4 Ajaw 8 Kumk\'u - 5 Kimi 4 Mol', | ||
[ | ||
CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')]), | ||
OT('-'), | ||
CalendarRoundToken.parse([NT(5), WT('Kimi'), NT(4), WT('Mol')]), | ||
|
||
] | ||
], | ||
[ | ||
'9.2.10.10.10 + 10.5.1', | ||
[ | ||
LongCountToken.parse([NT(9), PT, NT(2), PT, NT(10), PT, NT(10), PT, NT(10)]), | ||
OT('+'), | ||
LongCountToken.parse([NT(10), PT, NT(5), PT, NT(1)]) | ||
] | ||
] | ||
] | ||
const operations: [string, TokenCollection][] = looseOperations.map((row: [string, IToken[]]) => { | ||
return [row[0], new TokenCollection(row[1])]; | ||
}) | ||
operations.forEach((pattern) => { | ||
const [rawText, expectedTokens]: [string, TokenCollection] = pattern | ||
it(`${rawText} -> ${expectedTokens}`, () => { | ||
|
||
const layer1Tokens = new Layer0Parser().parse(rawText).processLayer1() | ||
|
||
expect(layer1Tokens.length).to.eq(expectedTokens.length) | ||
for (let i = 0; i < layer1Tokens.length; i++) { | ||
expect( | ||
layer1Tokens.index(i).equal( | ||
expectedTokens.index(i) | ||
), `Comparing ${i}` | ||
).to.be.true | ||
} | ||
}) | ||
}); | ||
}) | ||
|
||
describe('should parse calendar rounds', () => { | ||
const looseCrs: [string, IToken[]][] = [ | ||
[ | ||
'4 Ajaw 8 Kumk\'u', | ||
[CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')])] | ||
], | ||
[ | ||
"4 Ajaw 8 Kumk\'u\n3 Kawak **", | ||
[ | ||
CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')]), | ||
CalendarRoundToken.parse([NT(3), WT('Kawak'), WCT, WCT]) | ||
] | ||
], | ||
[ | ||
'3 Kawak **', | ||
[CalendarRoundToken.parse([NT(3), WT('Kawak'), WCT, WCT])] | ||
], | ||
[ | ||
'* Ajaw 8 Kumk\'u', | ||
[CalendarRoundToken.parse([WCT, WT('Ajaw'), NT(8), WT('Kumk\'u')])] | ||
], | ||
[ | ||
'6 Manik\' 5 Mol', | ||
[CalendarRoundToken.parse([NT(6), WT('Manik\''), NT(5), WT('Mol')])] | ||
], | ||
[ | ||
'6 Manik\' 5 Mol', | ||
[CalendarRoundToken.parse([NT(6), WT('Manik\''), NT(5), WT('Mol')])] | ||
], | ||
[ | ||
'* * 12 Mol', | ||
[CalendarRoundToken.parse([WCT, WCT, NT(12), WT('Mol')])] | ||
], | ||
[ | ||
'3 Kawak 7 Kumk\'u', | ||
[CalendarRoundToken.parse([NT(3), WT('Kawak'), NT(7), WT('Kumk\'u')])] | ||
], | ||
[ | ||
'4 Ajaw 8 Kumk\'u', | ||
[CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')])] | ||
], | ||
[ | ||
'** 13 Xul', | ||
[CalendarRoundToken.parse([WCT, WCT, NT(13), WT('Xul')])] | ||
], | ||
[ | ||
'6 Kimi * * ', | ||
[CalendarRoundToken.parse([NT(6), WT('Kimi'), WCT, WCT])] | ||
], | ||
[ | ||
'5 Kimi 4 Mol', | ||
[CalendarRoundToken.parse([NT(5), WT('Kimi'), NT(4), WT('Mol')])] | ||
], | ||
[ | ||
'* Chikchan 3 Mol #Hello, world', | ||
[ | ||
CalendarRoundToken.parse([WCT, WT('Chikchan'), NT(3), WT('Mol')]), | ||
CT('Hello, world') | ||
] | ||
], | ||
] | ||
const crs: [string, TokenCollection][] = looseCrs.map((row: [string, IToken[]]) => [row[0], new TokenCollection(row[1])]) | ||
crs.forEach((pattern) => { | ||
const [rawString, expectedTokens]: [string, TokenCollection] = pattern | ||
it(`${rawString} -> ${expectedTokens}`, () => { | ||
|
||
const tokenised = new Layer0Parser().parse(rawString).processLayer1() | ||
|
||
expect(tokenised.length).to.eq(expectedTokens.length) | ||
for (let i = 0; i < tokenised.length; i++) { | ||
expect( | ||
tokenised.index(i).equal( | ||
expectedTokens.index(i) | ||
), `Comparing ${i}` | ||
).to.be.true | ||
} | ||
}) | ||
}) | ||
}) | ||
|
||
describe('should parse long counts', () => { | ||
const looseLongCounts: [string, IToken[]][] = [ | ||
// [ | ||
// '7.13', | ||
// [LongCountToken.parse([NT(7), PT, NT(13)])] | ||
// ], | ||
[ | ||
'0.0.0.7.13', | ||
[LongCountToken.parse([NT(0), PT, NT(0), PT, NT(0), PT, NT(7), PT, NT(13)])] | ||
], | ||
[ | ||
'9.16.19.17.19', | ||
[LongCountToken.parse([NT(9), PT, NT(16), PT, NT(19), PT, NT(17), PT, NT(19)])] | ||
], | ||
[ | ||
"10.10\n9.9", | ||
[LongCountToken.parse([NT(10), PT, NT(10)]), LongCountToken.parse([NT(9), PT, NT(9)])] | ||
], | ||
[ | ||
' 8. 7. 6. 5. 4.17. 2. 1', | ||
[LongCountToken.parse([ | ||
NT(8), PT, | ||
NT(7), PT, | ||
NT(6), PT, | ||
NT(5), PT, | ||
NT(4), PT, | ||
NT(17), PT, | ||
NT(2), PT, | ||
NT(1), | ||
])] | ||
] | ||
] | ||
|
||
const longcounts: [string, TokenCollection][] = looseLongCounts.map((row: [string, IToken[]]) => [row[0], new TokenCollection(row[1])]) | ||
longcounts.forEach((pattern) => { | ||
const [rawString, expectedTokens]: [string, TokenCollection] = pattern | ||
it(`${rawString} -> ${expectedTokens}`, () => { | ||
const tokenised = new Layer0Parser().parse(rawString).processLayer1() | ||
expect(tokenised.length).to.eq(expectedTokens.length) | ||
for (let i = 0; i < tokenised.length; i++) { | ||
expect( | ||
tokenised.index(i).equal( | ||
expectedTokens.index(i) | ||
), `Comparing ${i}` | ||
).to.be.true | ||
} | ||
}) | ||
}) | ||
}) | ||
|
||
}) | ||
|
Oops, something went wrong.