Skip to content

Commit

Permalink
Fix resolve
Browse files Browse the repository at this point in the history
  • Loading branch information
drewsonne committed Aug 23, 2020
2 parents b893c27 + d2dae12 commit f5fd2b8
Show file tree
Hide file tree
Showing 36 changed files with 1,394 additions and 110 deletions.
7 changes: 7 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
node_modules
.idea
dist
src/**/*.d.ts.map
src/**/*.d.ts
src/**/*.js.map
src/**/*.js
lib
.nyc_output
Develop
14 changes: 7 additions & 7 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"typescript": "^3.9.7"
},
"dependencies": {
"@drewsonne/maya-dates": "^1.0.15"
"@drewsonne/maya-dates": "^1.0.20"
},
"scripts": {
"test": "mocha -r ts-node/register 'src/**/*.spec.ts'",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
import {expect} from 'chai'
import 'mocha'
import PrimitiveParser from "../parsers/primitive";
import {IToken} from "../tokens/base";
import NumberToken from "../tokens/primitive/number-token";
import PeriodToken from "../tokens/primitive/period-token";
import Layer0Parser from "../parsers/layer-0-parser";
import NumberToken from "../tokens/layer-0/number-token";
import PeriodToken from "../tokens/layer-0/period-token";
import TokenCollection from "../tokens/collection";
import LineEndToken from "../tokens/primitive/line-end-token";
import SpaceToken from "../tokens/primitive/space-token";
import WordToken from "../tokens/primitive/word-token";
import WildcardToken from "../tokens/primitive/wildcard-token";
import CommentStartToken from "../tokens/primitive/comment-start-token";
import CommentToken from "../tokens/primitive/comment-token";
import OperatorToken from "../tokens/primitive/operator-token";
import LineEndToken from "../tokens/layer-0/line-end-token";
import SpaceToken from "../tokens/layer-0/space-token";
import WordToken from "../tokens/layer-0/word-token";
import WildcardToken from "../tokens/layer-0/wildcard-token";
import CommentStartToken from "../tokens/layer-0/comment-start-token";
import CommentToken from "../tokens/layer-0/comment-token";
import OperatorToken from "../tokens/layer-0/operator-token";
import {IToken} from "../tokens/i-token";


const NT = (n: number) => new NumberToken(n)
Expand All @@ -24,7 +24,7 @@ const ST = new SpaceToken()
const WCT = new WildcardToken()
const CST = new CommentStartToken()

describe('primitive parser', () => {
describe('layer-0 parser', () => {

describe('should parse operators', () => {
const looseOperations: [string, IToken[]][] = [
Expand All @@ -50,7 +50,7 @@ describe('primitive parser', () => {
operations.forEach((pattern) => {
const [rawString, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {
const tokenised = new PrimitiveParser().parse(rawString)
const tokenised = new Layer0Parser().parse(rawString)
expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
Expand Down Expand Up @@ -86,7 +86,7 @@ describe('primitive parser', () => {
fullLines.forEach((pattern) => {
const [rawString, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {
const tokenised = new PrimitiveParser().parse(rawString)
const tokenised = new Layer0Parser().parse(rawString)
// expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
Expand All @@ -110,7 +110,7 @@ describe('primitive parser', () => {
fullDates.forEach((pattern) => {
const [rawString, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {
const tokenised = new PrimitiveParser().parse(rawString)
const tokenised = new Layer0Parser().parse(rawString)
expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
Expand Down Expand Up @@ -141,7 +141,7 @@ describe('primitive parser', () => {
crs.forEach((pattern) => {
const [rawString, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {
const tokenised = new PrimitiveParser().parse(rawString)
const tokenised = new Layer0Parser().parse(rawString)
expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
Expand Down Expand Up @@ -169,14 +169,15 @@ describe('primitive parser', () => {
NT(17), PT,
ST, NT(2), PT,
ST, NT(1),
]]
]],
['*.*.*.7.13', [WCT, PT, WCT, PT, WCT, PT, NT(7), PT, NT(13)]]
]

const parsed: [string, TokenCollection][] = dates.map((row: [string, IToken[]]) => [row[0], new TokenCollection(row[1])])
parsed.forEach((pattern) => {
const [rawString, expectedTokens] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {
const tokenised = new PrimitiveParser().parse(rawString)
const tokenised = new Layer0Parser().parse(rawString)
expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
Expand Down
203 changes: 203 additions & 0 deletions src/__tests__/layer-1-parser.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
import {expect} from 'chai'
import 'mocha'
import NumberToken from "../tokens/layer-0/number-token";
import WordToken from "../tokens/layer-0/word-token";
import CommentToken from "../tokens/layer-0/comment-token";
import OperatorToken from "../tokens/layer-0/operator-token";
import PeriodToken from "../tokens/layer-0/period-token";
import LineEndToken from "../tokens/layer-0/line-end-token";
import SpaceToken from "../tokens/layer-0/space-token";
import WildcardToken from "../tokens/layer-0/wildcard-token";
import CommentStartToken from "../tokens/layer-0/comment-start-token";
import TokenCollection from "../tokens/collection";
import CalendarRoundToken from "../tokens/layer-1/calendar-round-token";
import LongCountToken from "../tokens/layer-1/long-count-token";
import {IToken} from "../tokens/i-token";
import Layer0Parser from "../parsers/layer-0-parser";

const NT = (n: number) => new NumberToken(n)
const WT = (w: string) => new WordToken(w)
const CT = (c: string) => new CommentToken(c)
const OT = (o: string) => new OperatorToken(o)
const PT = new PeriodToken()
const LET = new LineEndToken()
const ST = new SpaceToken()
const WCT = new WildcardToken()
const CST = new CommentStartToken()


describe('layer-1 parser', () => {

describe('should parse operators', () => {
const looseOperations: [string, IToken[]][] = [
[
'4 Ajaw 8 Kumk\'u - 5 Kimi 4 Mol',
[
CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')]),
OT('-'),
CalendarRoundToken.parse([NT(5), WT('Kimi'), NT(4), WT('Mol')]),

]
],
[
'9.2.10.10.10 + 10.5.1',
[
LongCountToken.parse([NT(9), PT, NT(2), PT, NT(10), PT, NT(10), PT, NT(10)]),
OT('+'),
LongCountToken.parse([NT(10), PT, NT(5), PT, NT(1)])
]
]
]
const operations: [string, TokenCollection][] = looseOperations.map((row: [string, IToken[]]) => {
return [row[0], new TokenCollection(row[1])];
})
operations.forEach((pattern) => {
const [rawText, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawText} -> ${expectedTokens}`, () => {

const layer1Tokens = new Layer0Parser().parse(rawText).processLayer1()

expect(layer1Tokens.length).to.eq(expectedTokens.length)
for (let i = 0; i < layer1Tokens.length; i++) {
expect(
layer1Tokens.index(i).equal(
expectedTokens.index(i)
), `Comparing ${i}`
).to.be.true
}
})
});
})

describe('should parse calendar rounds', () => {
const looseCrs: [string, IToken[]][] = [
[
'4 Ajaw 8 Kumk\'u',
[CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')])]
],
[
"4 Ajaw 8 Kumk\'u\n3 Kawak **",
[
CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')]),
CalendarRoundToken.parse([NT(3), WT('Kawak'), WCT, WCT])
]
],
[
'3 Kawak **',
[CalendarRoundToken.parse([NT(3), WT('Kawak'), WCT, WCT])]
],
[
'* Ajaw 8 Kumk\'u',
[CalendarRoundToken.parse([WCT, WT('Ajaw'), NT(8), WT('Kumk\'u')])]
],
[
'6 Manik\' 5 Mol',
[CalendarRoundToken.parse([NT(6), WT('Manik\''), NT(5), WT('Mol')])]
],
[
'6 Manik\' 5 Mol',
[CalendarRoundToken.parse([NT(6), WT('Manik\''), NT(5), WT('Mol')])]
],
[
'* * 12 Mol',
[CalendarRoundToken.parse([WCT, WCT, NT(12), WT('Mol')])]
],
[
'3 Kawak 7 Kumk\'u',
[CalendarRoundToken.parse([NT(3), WT('Kawak'), NT(7), WT('Kumk\'u')])]
],
[
'4 Ajaw 8 Kumk\'u',
[CalendarRoundToken.parse([NT(4), WT('Ajaw'), NT(8), WT('Kumk\'u')])]
],
[
'** 13 Xul',
[CalendarRoundToken.parse([WCT, WCT, NT(13), WT('Xul')])]
],
[
'6 Kimi * * ',
[CalendarRoundToken.parse([NT(6), WT('Kimi'), WCT, WCT])]
],
[
'5 Kimi 4 Mol',
[CalendarRoundToken.parse([NT(5), WT('Kimi'), NT(4), WT('Mol')])]
],
[
'* Chikchan 3 Mol #Hello, world',
[
CalendarRoundToken.parse([WCT, WT('Chikchan'), NT(3), WT('Mol')]),
CT('Hello, world')
]
],
]
const crs: [string, TokenCollection][] = looseCrs.map((row: [string, IToken[]]) => [row[0], new TokenCollection(row[1])])
crs.forEach((pattern) => {
const [rawString, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {

const tokenised = new Layer0Parser().parse(rawString).processLayer1()

expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
tokenised.index(i).equal(
expectedTokens.index(i)
), `Comparing ${i}`
).to.be.true
}
})
})
})

describe('should parse long counts', () => {
const looseLongCounts: [string, IToken[]][] = [
// [
// '7.13',
// [LongCountToken.parse([NT(7), PT, NT(13)])]
// ],
[
'0.0.0.7.13',
[LongCountToken.parse([NT(0), PT, NT(0), PT, NT(0), PT, NT(7), PT, NT(13)])]
],
[
'9.16.19.17.19',
[LongCountToken.parse([NT(9), PT, NT(16), PT, NT(19), PT, NT(17), PT, NT(19)])]
],
[
"10.10\n9.9",
[LongCountToken.parse([NT(10), PT, NT(10)]), LongCountToken.parse([NT(9), PT, NT(9)])]
],
[
' 8. 7. 6. 5. 4.17. 2. 1',
[LongCountToken.parse([
NT(8), PT,
NT(7), PT,
NT(6), PT,
NT(5), PT,
NT(4), PT,
NT(17), PT,
NT(2), PT,
NT(1),
])]
]
]

const longcounts: [string, TokenCollection][] = looseLongCounts.map((row: [string, IToken[]]) => [row[0], new TokenCollection(row[1])])
longcounts.forEach((pattern) => {
const [rawString, expectedTokens]: [string, TokenCollection] = pattern
it(`${rawString} -> ${expectedTokens}`, () => {
const tokenised = new Layer0Parser().parse(rawString).processLayer1()
expect(tokenised.length).to.eq(expectedTokens.length)
for (let i = 0; i < tokenised.length; i++) {
expect(
tokenised.index(i).equal(
expectedTokens.index(i)
), `Comparing ${i}`
).to.be.true
}
})
})
})

})

Loading

0 comments on commit f5fd2b8

Please sign in to comment.