diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8442747cf..1969c1297 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,20 +6,19 @@ on: pull_request: branches: [ main ] +env: + GO_VERSION: '1.19.2' + jobs: build: name: build - strategy: - matrix: - go_version: - - 1.19.2 runs-on: ubuntu-latest steps: - - name: Set up Go ${{ matrix.go_version }} + - name: Set up Go ${{ env.GO_VERSION }} uses: actions/setup-go@v3 with: - go-version: ${{ matrix.go_version }} + go-version: ${{ env.GO_VERSION }} - name: Setup buf action uses: bufbuild/buf-setup-action@v1 @@ -51,7 +50,7 @@ jobs: run: make bench - name: Download previous benchmark data - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ./cache key: ${{ runner.os }}-benchmark @@ -69,6 +68,6 @@ jobs: comment-always: true - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 with: file: ./coverage.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 759dab218..cba6c3668 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,35 @@ and Yorkie adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) ## [Unreleased] +## [0.4.6] - 2023-08-25 + +### Added +* Set cobra default output to stdout by @blurfx in https://github.com/yorkie-team/yorkie/pull/599 +* Fetch latest snapshot metadata to determine snapshot creation need by @hyemmie in https://github.com/yorkie-team/yorkie/pull/597 +* Update contributing docs by @MoonGyu1 in https://github.com/yorkie-team/yorkie/pull/601 +* Add Pagination to Listing Projects for Housekeeping by @tedkimdev in https://github.com/yorkie-team/yorkie/pull/587 +* Update workflow with latest versions of the actions which runs on Node16 by @jongwooo in https://github.com/yorkie-team/yorkie/pull/620 +* Add integration tree test for sync with js-sdk by @MoonGyu1 in https://github.com/yorkie-team/yorkie/pull/611 +* Add testcases for sync with js-sdk by @MoonGyu1 in https://github.com/yorkie-team/yorkie/pull/621 +* Add tree document by @MoonGyu1 in https://github.com/yorkie-team/yorkie/pull/608 +* Cache ProjectInfo by @blurfx in https://github.com/yorkie-team/yorkie/pull/586 +* Handle concurrent editing of Tree.Edit by @hackerwins, @MoonGyu1, @sejongk in https://github.com/yorkie-team/yorkie/pull/607 +* Support multi-level and partial element selection by @sejongk, @hackerwins in https://github.com/yorkie-team/yorkie/pull/624 + +### Changed +* Remove Select operation from Text by @joonhyukchoi in https://github.com/yorkie-team/yorkie/pull/589 +* Change 'Documents' from plural to singular in DocEvent by @chacha912 in https://github.com/yorkie-team/yorkie/pull/613 +* Cleanup proto by @chacha912 in https://github.com/yorkie-team/yorkie/pull/614 +* Replace matrix strategy with environment variable by @jongwooo in https://github.com/yorkie-team/yorkie/pull/619 +* Change TreeNode to have IDs instead of insPrev, insNext by @JOOHOJANG in https://github.com/yorkie-team/yorkie/pull/622 + +### Fixed +* Fix typos and invalid link in the yorkie design document by @g2hhh2ee in https://github.com/yorkie-team/yorkie/pull/591 +* Clean up code by @hackerwins in https://github.com/yorkie-team/yorkie/pull/595 +* Clean up methods related to presence by @chacha912 in https://github.com/yorkie-team/yorkie/pull/594 +* Remove panic from crdt.RGATreeList by @sejongk in https://github.com/yorkie-team/yorkie/pull/596 +* Fix treePos calculating logic for text node by @JOOHOJANG in https://github.com/yorkie-team/yorkie/pull/615 + ## [0.4.5] - 2023-07-20 ### Added diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 95d8adb5e..fe7cdf91f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -51,6 +51,12 @@ docker-compose -f build/docker/docker-compose.yml up --build -d make test ``` +You can automatically check the programmatic and stylistic errors of your code. + +```sh +make lint +``` + ## Design Documents For developers, [design documents](design/README.md) about core features are provided. You can refer to the docs for understanding the overall structure of Yorkie. @@ -88,6 +94,13 @@ There are multiple types of tests. The location of the test code varies with typ - Integration: These tests cover interactions of package components or interactions between Yorkie packages and some other non-Yorkie system resource (eg: MongoDB, ETCD). - Benchmark: These confirm that the performance of the implemented function. +### Code Coverage +We are using [Codecov](https://about.codecov.io) for analyzing PR's code coverage. If you want to check the coverage of your code in local browser, you can run the command below. + +```sh +make coverage +``` + ## Contributor License Agreement (CLA) We require that all contributors sign our Contributor License Agreement ("CLA") before we can accept the contribution. @@ -98,6 +111,8 @@ Open a pull request ("PR") to any of our open source projects to sign the CLA. A Follow the steps given by the bot to sign the CLA. This will require you to log in with GitHub. We will only use this information for CLA tracking. You only have to sign the CLA once. Once you've signed the CLA, future contributions to the project will not require you to sign again. +If the bot still require you to sign the CLA although you had already signed, you can check your commit's author is equal to the Github account that you logged in. + ### Why Require a CLA? Agreeing to a CLA explicitly states that you are entitled to provide a contribution, that you cannot withdraw permission to use your contribution at a later date, and that Yorkie Team has permission to use your contribution. diff --git a/Makefile b/Makefile index b865a1793..1514a27c2 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -YORKIE_VERSION := 0.4.5 +YORKIE_VERSION := 0.4.6 GO_PROJECT = github.com/yorkie-team/yorkie @@ -54,6 +54,12 @@ fmt: ## applies format and simplify codes lint: ## runs the golang-ci lint, checks for lint violations golangci-lint run ./... +coverage: ## runs coverage tests + go clean -testcache + go test -tags integration -race -coverprofile=coverage.txt -covermode=atomic ./... + go tool cover -html=coverage.txt + rm -f coverage.txt + test: ## runs integration tests that require local applications such as MongoDB go clean -testcache go test -tags integration -race ./... diff --git a/api/converter/converter_test.go b/api/converter/converter_test.go index c92b047bc..f7477adb0 100644 --- a/api/converter/converter_test.go +++ b/api/converter/converter_test.go @@ -173,8 +173,7 @@ func TestConverter(t *testing.T) { Edit(0, 1, "한"). Edit(0, 1, "하"). Edit(1, 1, "느"). - Edit(1, 2, "늘"). - Select(1, 2) + Edit(1, 2, "늘") // rich text root.SetNewText("k3"). diff --git a/api/converter/from_bytes.go b/api/converter/from_bytes.go index 6f0b6e8fa..3c18f9971 100644 --- a/api/converter/from_bytes.go +++ b/api/converter/from_bytes.go @@ -147,7 +147,9 @@ func fromJSONArray(pbArr *api.JSONElement_JSONArray) (*crdt.Array, error) { if err != nil { return nil, err } - elements.Add(elem) + if err = elements.Add(elem); err != nil { + return nil, err + } } createdAt, err := fromTimeTicket(pbArr.CreatedAt) @@ -265,12 +267,19 @@ func fromJSONCounter(pbCnt *api.JSONElement_Counter) (*crdt.Counter, error) { if err != nil { return nil, err } + counterValue, err := crdt.CounterValueFromBytes(counterType, pbCnt.Value) + if err != nil { + return nil, err + } - counter := crdt.NewCounter( + counter, err := crdt.NewCounter( counterType, - crdt.CounterValueFromBytes(counterType, pbCnt.Value), + counterValue, createdAt, ) + if err != nil { + return nil, err + } counter.SetMovedAt(movedAt) counter.SetRemovedAt(removedAt) diff --git a/api/converter/from_pb.go b/api/converter/from_pb.go index 7295103be..dcc119b6f 100644 --- a/api/converter/from_pb.go +++ b/api/converter/from_pb.go @@ -29,7 +29,6 @@ import ( "github.com/yorkie-team/yorkie/pkg/document/key" "github.com/yorkie-team/yorkie/pkg/document/operations" "github.com/yorkie-team/yorkie/pkg/document/time" - "github.com/yorkie-team/yorkie/server/backend/sync" ) // FromUser converts the given Protobuf formats to model format. @@ -192,16 +191,6 @@ func fromChangeID(id *api.ChangeID) (change.ID, error) { ), nil } -// FromDocumentKey converts the given Protobuf formats to model format. -func FromDocumentKey(pbKey string) (key.Key, error) { - k := key.Key(pbKey) - if err := k.Validate(); err != nil { - return "", err - } - - return k, nil -} - // FromDocumentID converts the given Protobuf formats to model format. func FromDocumentID(pbID string) (types.ID, error) { id := types.ID(pbID) @@ -215,34 +204,16 @@ func FromDocumentID(pbID string) (types.ID, error) { // FromEventType converts the given Protobuf formats to model format. func FromEventType(pbDocEventType api.DocEventType) (types.DocEventType, error) { switch pbDocEventType { - case api.DocEventType_DOC_EVENT_TYPE_DOCUMENTS_CHANGED: - return types.DocumentsChangedEvent, nil - case api.DocEventType_DOC_EVENT_TYPE_DOCUMENTS_WATCHED: - return types.DocumentsWatchedEvent, nil - case api.DocEventType_DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED: - return types.DocumentsUnwatchedEvent, nil + case api.DocEventType_DOC_EVENT_TYPE_DOCUMENT_CHANGED: + return types.DocumentChangedEvent, nil + case api.DocEventType_DOC_EVENT_TYPE_DOCUMENT_WATCHED: + return types.DocumentWatchedEvent, nil + case api.DocEventType_DOC_EVENT_TYPE_DOCUMENT_UNWATCHED: + return types.DocumentUnwatchedEvent, nil } return "", fmt.Errorf("%v: %w", pbDocEventType, ErrUnsupportedEventType) } -// FromDocEvent converts the given Protobuf formats to model format. -func FromDocEvent(docEvent *api.DocEvent) (*sync.DocEvent, error) { - client, err := time.ActorIDFromBytes(docEvent.Publisher) - if err != nil { - return nil, err - } - - eventType, err := FromEventType(docEvent.Type) - if err != nil { - return nil, err - } - - return &sync.DocEvent{ - Type: eventType, - Publisher: client, - }, nil -} - // FromOperations converts the given Protobuf formats to model format. func FromOperations(pbOps []*api.Operation) ([]operations.Operation, error) { var ops []operations.Operation @@ -260,10 +231,11 @@ func FromOperations(pbOps []*api.Operation) ([]operations.Operation, error) { op, err = fromRemove(decoded.Remove) case *api.Operation_Edit_: op, err = fromEdit(decoded.Edit) - case *api.Operation_Select_: - op, err = fromSelect(decoded.Select) case *api.Operation_Style_: op, err = fromStyle(decoded.Style) + case *api.Operation_Select_: + // NOTE(hackerwins): Operation_Select is deprecated. + continue case *api.Operation_Increase_: op, err = fromIncrease(decoded.Increase) case *api.Operation_TreeEdit_: @@ -421,31 +393,6 @@ func fromRemove(pbRemove *api.Operation_Remove) (*operations.Remove, error) { ), nil } -func fromSelect(pbSelect *api.Operation_Select) (*operations.Select, error) { - parentCreatedAt, err := fromTimeTicket(pbSelect.ParentCreatedAt) - if err != nil { - return nil, err - } - from, err := fromTextNodePos(pbSelect.From) - if err != nil { - return nil, err - } - to, err := fromTextNodePos(pbSelect.To) - if err != nil { - return nil, err - } - executedAt, err := fromTimeTicket(pbSelect.ExecutedAt) - if err != nil { - return nil, err - } - return operations.NewSelect( - parentCreatedAt, - from, - to, - executedAt, - ), nil -} - func fromEdit(pbEdit *api.Operation_Edit) (*operations.Edit, error) { parentCreatedAt, err := fromTimeTicket(pbEdit.ParentCreatedAt) if err != nil { @@ -547,6 +494,13 @@ func fromTreeEdit(pbTreeEdit *api.Operation_TreeEdit) (*operations.TreeEdit, err return nil, err } + createdAtMapByActor, err := fromCreatedAtMapByActor( + pbTreeEdit.CreatedAtMapByActor, + ) + if err != nil { + return nil, err + } + nodes, err := FromTreeNodesWhenEdit(pbTreeEdit.Contents) if err != nil { return nil, err @@ -556,6 +510,7 @@ func fromTreeEdit(pbTreeEdit *api.Operation_TreeEdit) (*operations.TreeEdit, err parentCreatedAt, from, to, + createdAtMapByActor, nodes, executedAt, ), nil @@ -676,7 +631,7 @@ func FromTreeNodesWhenEdit(pbNodes []*api.TreeNodes) ([]*crdt.TreeNode, error) { } func fromTreeNode(pbNode *api.TreeNode) (*crdt.TreeNode, error) { - pos, err := fromTreePos(pbNode.Pos) + id, err := fromTreeNodeID(pbNode.Id) if err != nil { return nil, err } @@ -690,21 +645,56 @@ func fromTreeNode(pbNode *api.TreeNode) (*crdt.TreeNode, error) { attrs.Set(k, pbAttr.Value, updatedAt) } - return crdt.NewTreeNode( - pos, + node := crdt.NewTreeNode( + id, pbNode.Type, attrs, pbNode.Value, - ), nil + ) + + if pbNode.GetInsPrevId() != nil { + node.InsPrevID, err = fromTreeNodeID(pbNode.GetInsPrevId()) + if err != nil { + return nil, err + } + } + + if pbNode.GetInsNextId() != nil { + node.InsNextID, err = fromTreeNodeID(pbNode.GetInsNextId()) + if err != nil { + return nil, err + } + } + + node.RemovedAt, err = fromTimeTicket(pbNode.RemovedAt) + if err != nil { + return nil, err + } + + return node, nil } func fromTreePos(pbPos *api.TreePos) (*crdt.TreePos, error) { + parentID, err := fromTreeNodeID(pbPos.ParentId) + if err != nil { + return nil, err + } + + leftSiblingID, err := fromTreeNodeID(pbPos.LeftSiblingId) + if err != nil { + return nil, err + } + + return crdt.NewTreePos(parentID, leftSiblingID), nil +} + +func fromTreeNodeID(pbPos *api.TreeNodeID) (*crdt.TreeNodeID, error) { createdAt, err := fromTimeTicket(pbPos.CreatedAt) if err != nil { return nil, err } - return crdt.NewTreePos( + return crdt.NewTreeNodeID( createdAt, int(pbPos.Offset), ), nil @@ -793,11 +783,20 @@ func fromElement(pbElement *api.JSONElementSimple) (crdt.Element, error) { if err != nil { return nil, err } - return crdt.NewCounter( + counterValue, err := crdt.CounterValueFromBytes(counterType, pbElement.Value) + if err != nil { + return nil, err + } + + counter, err := crdt.NewCounter( counterType, - crdt.CounterValueFromBytes(counterType, pbElement.Value), + counterValue, createdAt, - ), nil + ) + if err != nil { + return nil, err + } + return counter, nil case api.ValueType_VALUE_TYPE_TREE: return BytesToTree(pbElement.Value) } diff --git a/api/converter/to_bytes.go b/api/converter/to_bytes.go index 29fbd54b5..851e6daa5 100644 --- a/api/converter/to_bytes.go +++ b/api/converter/to_bytes.go @@ -29,7 +29,7 @@ import ( ) // SnapshotToBytes converts the given document to byte array. -func SnapshotToBytes(obj *crdt.Object, presences *innerpresence.Map) ([]byte, error) { +func SnapshotToBytes(obj *crdt.Object, presences map[string]innerpresence.Presence) ([]byte, error) { pbElem, err := toJSONElement(obj) if err != nil { return nil, err @@ -158,11 +158,15 @@ func toCounter(counter *crdt.Counter) (*api.JSONElement, error) { if err != nil { return nil, err } + counterValue, err := counter.Bytes() + if err != nil { + return nil, err + } return &api.JSONElement{ Body: &api.JSONElement_Counter_{Counter: &api.JSONElement_Counter{ Type: pbCounterType, - Value: counter.Bytes(), + Value: counterValue, CreatedAt: ToTimeTicket(counter.CreatedAt()), MovedAt: ToTimeTicket(counter.MovedAt()), RemovedAt: ToTimeTicket(counter.RemovedAt()), @@ -294,7 +298,7 @@ func toTreeNode(treeNode *crdt.TreeNode, depth int) *api.TreeNode { } pbNode := &api.TreeNode{ - Pos: toTreePos(treeNode.Pos), + Id: toTreeNodeID(treeNode.ID), Type: treeNode.Type(), Value: treeNode.Value, RemovedAt: ToTimeTicket(treeNode.RemovedAt), @@ -302,16 +306,33 @@ func toTreeNode(treeNode *crdt.TreeNode, depth int) *api.TreeNode { Attributes: attrs, } - if treeNode.InsPrev != nil { - pbNode.InsPrevPos = toTreePos(treeNode.InsPrev.Pos) + if treeNode.InsPrevID != nil { + pbNode.InsPrevId = toTreeNodeID(treeNode.InsPrevID) + } + + if treeNode.InsNextID != nil { + pbNode.InsNextId = toTreeNodeID(treeNode.InsNextID) } return pbNode } -func toTreePos(pos *crdt.TreePos) *api.TreePos { - return &api.TreePos{ +func toTreeNodeID(pos *crdt.TreeNodeID) *api.TreeNodeID { + return &api.TreeNodeID{ CreatedAt: ToTimeTicket(pos.CreatedAt), Offset: int32(pos.Offset), } } + +func toTreePos(pos *crdt.TreePos) *api.TreePos { + return &api.TreePos{ + ParentId: &api.TreeNodeID{ + CreatedAt: ToTimeTicket(pos.ParentID.CreatedAt), + Offset: int32(pos.ParentID.Offset), + }, + LeftSiblingId: &api.TreeNodeID{ + CreatedAt: ToTimeTicket(pos.LeftSiblingID.CreatedAt), + Offset: int32(pos.LeftSiblingID.Offset), + }, + } +} diff --git a/api/converter/to_pb.go b/api/converter/to_pb.go index 0867437e5..1b1206203 100644 --- a/api/converter/to_pb.go +++ b/api/converter/to_pb.go @@ -29,7 +29,6 @@ import ( "github.com/yorkie-team/yorkie/pkg/document/innerpresence" "github.com/yorkie-team/yorkie/pkg/document/operations" "github.com/yorkie-team/yorkie/pkg/document/time" - "github.com/yorkie-team/yorkie/server/backend/sync" ) // ToUser converts the given model format to Protobuf format. @@ -123,12 +122,11 @@ func ToDocumentSummary(summary *types.DocumentSummary) (*api.DocumentSummary, er } // ToPresences converts the given model to Protobuf format. -func ToPresences(presences *innerpresence.Map) map[string]*api.Presence { +func ToPresences(presences map[string]innerpresence.Presence) map[string]*api.Presence { pbPresences := make(map[string]*api.Presence) - presences.Range(func(k string, v innerpresence.Presence) bool { + for k, v := range presences { pbPresences[k] = ToPresence(v) - return true - }) + } return pbPresences } @@ -203,30 +201,17 @@ func ToChangeID(id change.ID) *api.ChangeID { // ToDocEventType converts the given model format to Protobuf format. func ToDocEventType(eventType types.DocEventType) (api.DocEventType, error) { switch eventType { - case types.DocumentsChangedEvent: - return api.DocEventType_DOC_EVENT_TYPE_DOCUMENTS_CHANGED, nil - case types.DocumentsWatchedEvent: - return api.DocEventType_DOC_EVENT_TYPE_DOCUMENTS_WATCHED, nil - case types.DocumentsUnwatchedEvent: - return api.DocEventType_DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED, nil + case types.DocumentChangedEvent: + return api.DocEventType_DOC_EVENT_TYPE_DOCUMENT_CHANGED, nil + case types.DocumentWatchedEvent: + return api.DocEventType_DOC_EVENT_TYPE_DOCUMENT_WATCHED, nil + case types.DocumentUnwatchedEvent: + return api.DocEventType_DOC_EVENT_TYPE_DOCUMENT_UNWATCHED, nil default: return 0, fmt.Errorf("%s: %w", eventType, ErrUnsupportedEventType) } } -// ToDocEvent converts the given model to Protobuf format. -func ToDocEvent(docEvent sync.DocEvent) (*api.DocEvent, error) { - eventType, err := ToDocEventType(docEvent.Type) - if err != nil { - return nil, err - } - - return &api.DocEvent{ - Type: eventType, - Publisher: docEvent.Publisher.Bytes(), - }, nil -} - // ToOperations converts the given model format to Protobuf format. func ToOperations(ops []operations.Operation) ([]*api.Operation, error) { var pbOperations []*api.Operation @@ -245,8 +230,6 @@ func ToOperations(ops []operations.Operation) ([]*api.Operation, error) { pbOperation.Body, err = toRemove(op) case *operations.Edit: pbOperation.Body, err = toEdit(op) - case *operations.Select: - pbOperation.Body, err = toSelect(op) case *operations.Style: pbOperation.Body, err = toStyle(op) case *operations.Increase: @@ -368,17 +351,6 @@ func toEdit(e *operations.Edit) (*api.Operation_Edit_, error) { }, nil } -func toSelect(s *operations.Select) (*api.Operation_Select_, error) { - return &api.Operation_Select_{ - Select: &api.Operation_Select{ - ParentCreatedAt: ToTimeTicket(s.ParentCreatedAt()), - From: toTextNodePos(s.From()), - To: toTextNodePos(s.To()), - ExecutedAt: ToTimeTicket(s.ExecutedAt()), - }, - }, nil -} - func toStyle(style *operations.Style) (*api.Operation_Style_, error) { return &api.Operation_Style_{ Style: &api.Operation_Style{ @@ -409,11 +381,12 @@ func toIncrease(increase *operations.Increase) (*api.Operation_Increase_, error) func toTreeEdit(e *operations.TreeEdit) (*api.Operation_TreeEdit_, error) { return &api.Operation_TreeEdit_{ TreeEdit: &api.Operation_TreeEdit{ - ParentCreatedAt: ToTimeTicket(e.ParentCreatedAt()), - From: toTreePos(e.FromPos()), - To: toTreePos(e.ToPos()), - Contents: ToTreeNodesWhenEdit(e.Contents()), - ExecutedAt: ToTimeTicket(e.ExecutedAt()), + ParentCreatedAt: ToTimeTicket(e.ParentCreatedAt()), + From: toTreePos(e.FromPos()), + To: toTreePos(e.ToPos()), + CreatedAtMapByActor: toCreatedAtMapByActor(e.CreatedAtMapByActor()), + Contents: ToTreeNodesWhenEdit(e.Contents()), + ExecutedAt: ToTimeTicket(e.ExecutedAt()), }, }, nil } @@ -463,11 +436,15 @@ func toJSONElementSimple(elem crdt.Element) (*api.JSONElementSimple, error) { if err != nil { return nil, err } + counterValue, err := elem.Bytes() + if err != nil { + return nil, err + } return &api.JSONElementSimple{ Type: pbCounterType, CreatedAt: ToTimeTicket(elem.CreatedAt()), - Value: elem.Bytes(), + Value: counterValue, }, nil case *crdt.Tree: bytes, err := TreeToBytes(elem) diff --git a/api/types/change_summary_test.go b/api/types/change_summary_test.go index 7a9a729d5..773a3b021 100644 --- a/api/types/change_summary_test.go +++ b/api/types/change_summary_test.go @@ -122,7 +122,7 @@ func TestChangeSummary(t *testing.T) { PageSize: 3, IsForward: true, }, lastSeq) - assert.Equal(t, int64(lastSeq+1), from) - assert.Equal(t, int64(lastSeq+1), to) + assert.Equal(t, lastSeq+1, from) + assert.Equal(t, lastSeq+1, to) }) } diff --git a/api/types/event.go b/api/types/event.go index d04de35bc..dc4a2baba 100644 --- a/api/types/event.go +++ b/api/types/event.go @@ -4,15 +4,15 @@ package types type DocEventType string const ( - // DocumentsChangedEvent is an event indicating that documents are being + // DocumentChangedEvent is an event indicating that document is being // modified by a change. - DocumentsChangedEvent DocEventType = "documents-changed" + DocumentChangedEvent DocEventType = "document-changed" - // DocumentsWatchedEvent is an event that occurs when documents are watched + // DocumentWatchedEvent is an event that occurs when document is watched // by other clients. - DocumentsWatchedEvent DocEventType = "documents-watched" + DocumentWatchedEvent DocEventType = "document-watched" - // DocumentsUnwatchedEvent is an event that occurs when documents are + // DocumentUnwatchedEvent is an event that occurs when document is // unwatched by other clients. - DocumentsUnwatchedEvent DocEventType = "documents-unwatched" + DocumentUnwatchedEvent DocEventType = "document-unwatched" ) diff --git a/api/yorkie/v1/resources.pb.go b/api/yorkie/v1/resources.pb.go index 3e8c98cc3..e7e1c9682 100644 --- a/api/yorkie/v1/resources.pb.go +++ b/api/yorkie/v1/resources.pb.go @@ -87,21 +87,21 @@ func (ValueType) EnumDescriptor() ([]byte, []int) { type DocEventType int32 const ( - DocEventType_DOC_EVENT_TYPE_DOCUMENTS_CHANGED DocEventType = 0 - DocEventType_DOC_EVENT_TYPE_DOCUMENTS_WATCHED DocEventType = 1 - DocEventType_DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED DocEventType = 2 + DocEventType_DOC_EVENT_TYPE_DOCUMENT_CHANGED DocEventType = 0 + DocEventType_DOC_EVENT_TYPE_DOCUMENT_WATCHED DocEventType = 1 + DocEventType_DOC_EVENT_TYPE_DOCUMENT_UNWATCHED DocEventType = 2 ) var DocEventType_name = map[int32]string{ - 0: "DOC_EVENT_TYPE_DOCUMENTS_CHANGED", - 1: "DOC_EVENT_TYPE_DOCUMENTS_WATCHED", - 2: "DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED", + 0: "DOC_EVENT_TYPE_DOCUMENT_CHANGED", + 1: "DOC_EVENT_TYPE_DOCUMENT_WATCHED", + 2: "DOC_EVENT_TYPE_DOCUMENT_UNWATCHED", } var DocEventType_value = map[string]int32{ - "DOC_EVENT_TYPE_DOCUMENTS_CHANGED": 0, - "DOC_EVENT_TYPE_DOCUMENTS_WATCHED": 1, - "DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED": 2, + "DOC_EVENT_TYPE_DOCUMENT_CHANGED": 0, + "DOC_EVENT_TYPE_DOCUMENT_WATCHED": 1, + "DOC_EVENT_TYPE_DOCUMENT_UNWATCHED": 2, } func (x DocEventType) String() string { @@ -140,7 +140,7 @@ func (x PresenceChange_ChangeType) String() string { } func (PresenceChange_ChangeType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{19, 0} + return fileDescriptor_36361b2f5d0f0896, []int{20, 0} } // /////////////////////////////////////// @@ -996,6 +996,10 @@ func (m *Operation_Edit) GetAttributes() map[string]string { return nil } +// NOTE(hackerwins): Select Operation is not used in the current version. +// In the previous version, it was used to represent selection of Text. +// However, it has been replaced by Presence now. It is retained for backward +// compatibility purposes. type Operation_Select struct { ParentCreatedAt *TimeTicket `protobuf:"bytes,1,opt,name=parent_created_at,json=parentCreatedAt,proto3" json:"parent_created_at,omitempty"` From *TextNodePos `protobuf:"bytes,2,opt,name=from,proto3" json:"from,omitempty"` @@ -1210,14 +1214,15 @@ func (m *Operation_Increase) GetExecutedAt() *TimeTicket { } type Operation_TreeEdit struct { - ParentCreatedAt *TimeTicket `protobuf:"bytes,1,opt,name=parent_created_at,json=parentCreatedAt,proto3" json:"parent_created_at,omitempty"` - From *TreePos `protobuf:"bytes,2,opt,name=from,proto3" json:"from,omitempty"` - To *TreePos `protobuf:"bytes,3,opt,name=to,proto3" json:"to,omitempty"` - Contents []*TreeNodes `protobuf:"bytes,4,rep,name=contents,proto3" json:"contents,omitempty"` - ExecutedAt *TimeTicket `protobuf:"bytes,5,opt,name=executed_at,json=executedAt,proto3" json:"executed_at,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ParentCreatedAt *TimeTicket `protobuf:"bytes,1,opt,name=parent_created_at,json=parentCreatedAt,proto3" json:"parent_created_at,omitempty"` + From *TreePos `protobuf:"bytes,2,opt,name=from,proto3" json:"from,omitempty"` + To *TreePos `protobuf:"bytes,3,opt,name=to,proto3" json:"to,omitempty"` + CreatedAtMapByActor map[string]*TimeTicket `protobuf:"bytes,4,rep,name=created_at_map_by_actor,json=createdAtMapByActor,proto3" json:"created_at_map_by_actor,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Contents []*TreeNodes `protobuf:"bytes,5,rep,name=contents,proto3" json:"contents,omitempty"` + ExecutedAt *TimeTicket `protobuf:"bytes,6,opt,name=executed_at,json=executedAt,proto3" json:"executed_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Operation_TreeEdit) Reset() { *m = Operation_TreeEdit{} } @@ -1274,6 +1279,13 @@ func (m *Operation_TreeEdit) GetTo() *TreePos { return nil } +func (m *Operation_TreeEdit) GetCreatedAtMapByActor() map[string]*TimeTicket { + if m != nil { + return m.CreatedAtMapByActor + } + return nil +} + func (m *Operation_TreeEdit) GetContents() []*TreeNodes { if m != nil { return m.Contents @@ -2329,13 +2341,14 @@ func (m *TextNodeID) GetOffset() int32 { } type TreeNode struct { - Pos *TreePos `protobuf:"bytes,1,opt,name=pos,proto3" json:"pos,omitempty"` + Id *TreeNodeID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` RemovedAt *TimeTicket `protobuf:"bytes,4,opt,name=removed_at,json=removedAt,proto3" json:"removed_at,omitempty"` - InsPrevPos *TreePos `protobuf:"bytes,5,opt,name=ins_prev_pos,json=insPrevPos,proto3" json:"ins_prev_pos,omitempty"` - Depth int32 `protobuf:"varint,6,opt,name=depth,proto3" json:"depth,omitempty"` - Attributes map[string]*NodeAttr `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + InsPrevId *TreeNodeID `protobuf:"bytes,5,opt,name=ins_prev_id,json=insPrevId,proto3" json:"ins_prev_id,omitempty"` + InsNextId *TreeNodeID `protobuf:"bytes,6,opt,name=ins_next_id,json=insNextId,proto3" json:"ins_next_id,omitempty"` + Depth int32 `protobuf:"varint,7,opt,name=depth,proto3" json:"depth,omitempty"` + Attributes map[string]*NodeAttr `protobuf:"bytes,8,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2374,9 +2387,9 @@ func (m *TreeNode) XXX_DiscardUnknown() { var xxx_messageInfo_TreeNode proto.InternalMessageInfo -func (m *TreeNode) GetPos() *TreePos { +func (m *TreeNode) GetId() *TreeNodeID { if m != nil { - return m.Pos + return m.Id } return nil } @@ -2402,9 +2415,16 @@ func (m *TreeNode) GetRemovedAt() *TimeTicket { return nil } -func (m *TreeNode) GetInsPrevPos() *TreePos { +func (m *TreeNode) GetInsPrevId() *TreeNodeID { + if m != nil { + return m.InsPrevId + } + return nil +} + +func (m *TreeNode) GetInsNextId() *TreeNodeID { if m != nil { - return m.InsPrevPos + return m.InsNextId } return nil } @@ -2470,7 +2490,7 @@ func (m *TreeNodes) GetContent() []*TreeNode { return nil } -type TreePos struct { +type TreeNodeID struct { CreatedAt *TimeTicket `protobuf:"bytes,1,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -2478,11 +2498,66 @@ type TreePos struct { XXX_sizecache int32 `json:"-"` } +func (m *TreeNodeID) Reset() { *m = TreeNodeID{} } +func (m *TreeNodeID) String() string { return proto.CompactTextString(m) } +func (*TreeNodeID) ProtoMessage() {} +func (*TreeNodeID) Descriptor() ([]byte, []int) { + return fileDescriptor_36361b2f5d0f0896, []int{14} +} +func (m *TreeNodeID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TreeNodeID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TreeNodeID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TreeNodeID) XXX_Merge(src proto.Message) { + xxx_messageInfo_TreeNodeID.Merge(m, src) +} +func (m *TreeNodeID) XXX_Size() int { + return m.Size() +} +func (m *TreeNodeID) XXX_DiscardUnknown() { + xxx_messageInfo_TreeNodeID.DiscardUnknown(m) +} + +var xxx_messageInfo_TreeNodeID proto.InternalMessageInfo + +func (m *TreeNodeID) GetCreatedAt() *TimeTicket { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *TreeNodeID) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +type TreePos struct { + ParentId *TreeNodeID `protobuf:"bytes,1,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + LeftSiblingId *TreeNodeID `protobuf:"bytes,2,opt,name=left_sibling_id,json=leftSiblingId,proto3" json:"left_sibling_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + func (m *TreePos) Reset() { *m = TreePos{} } func (m *TreePos) String() string { return proto.CompactTextString(m) } func (*TreePos) ProtoMessage() {} func (*TreePos) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{14} + return fileDescriptor_36361b2f5d0f0896, []int{15} } func (m *TreePos) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2511,18 +2586,18 @@ func (m *TreePos) XXX_DiscardUnknown() { var xxx_messageInfo_TreePos proto.InternalMessageInfo -func (m *TreePos) GetCreatedAt() *TimeTicket { +func (m *TreePos) GetParentId() *TreeNodeID { if m != nil { - return m.CreatedAt + return m.ParentId } return nil } -func (m *TreePos) GetOffset() int32 { +func (m *TreePos) GetLeftSiblingId() *TreeNodeID { if m != nil { - return m.Offset + return m.LeftSiblingId } - return 0 + return nil } type User struct { @@ -2538,7 +2613,7 @@ func (m *User) Reset() { *m = User{} } func (m *User) String() string { return proto.CompactTextString(m) } func (*User) ProtoMessage() {} func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{15} + return fileDescriptor_36361b2f5d0f0896, []int{16} } func (m *User) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2607,7 +2682,7 @@ func (m *Project) Reset() { *m = Project{} } func (m *Project) String() string { return proto.CompactTextString(m) } func (*Project) ProtoMessage() {} func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{16} + return fileDescriptor_36361b2f5d0f0896, []int{17} } func (m *Project) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2713,7 +2788,7 @@ func (m *UpdatableProjectFields) Reset() { *m = UpdatableProjectFields{} func (m *UpdatableProjectFields) String() string { return proto.CompactTextString(m) } func (*UpdatableProjectFields) ProtoMessage() {} func (*UpdatableProjectFields) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{17} + return fileDescriptor_36361b2f5d0f0896, []int{18} } func (m *UpdatableProjectFields) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2785,7 +2860,7 @@ func (m *UpdatableProjectFields_AuthWebhookMethods) String() string { } func (*UpdatableProjectFields_AuthWebhookMethods) ProtoMessage() {} func (*UpdatableProjectFields_AuthWebhookMethods) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{17, 0} + return fileDescriptor_36361b2f5d0f0896, []int{18, 0} } func (m *UpdatableProjectFields_AuthWebhookMethods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2837,7 +2912,7 @@ func (m *DocumentSummary) Reset() { *m = DocumentSummary{} } func (m *DocumentSummary) String() string { return proto.CompactTextString(m) } func (*DocumentSummary) ProtoMessage() {} func (*DocumentSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{18} + return fileDescriptor_36361b2f5d0f0896, []int{19} } func (m *DocumentSummary) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2920,7 +2995,7 @@ func (m *PresenceChange) Reset() { *m = PresenceChange{} } func (m *PresenceChange) String() string { return proto.CompactTextString(m) } func (*PresenceChange) ProtoMessage() {} func (*PresenceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{19} + return fileDescriptor_36361b2f5d0f0896, []int{20} } func (m *PresenceChange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2974,7 +3049,7 @@ func (m *Presence) Reset() { *m = Presence{} } func (m *Presence) String() string { return proto.CompactTextString(m) } func (*Presence) ProtoMessage() {} func (*Presence) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{20} + return fileDescriptor_36361b2f5d0f0896, []int{21} } func (m *Presence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3022,7 +3097,7 @@ func (m *Checkpoint) Reset() { *m = Checkpoint{} } func (m *Checkpoint) String() string { return proto.CompactTextString(m) } func (*Checkpoint) ProtoMessage() {} func (*Checkpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{21} + return fileDescriptor_36361b2f5d0f0896, []int{22} } func (m *Checkpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3078,7 +3153,7 @@ func (m *TextNodePos) Reset() { *m = TextNodePos{} } func (m *TextNodePos) String() string { return proto.CompactTextString(m) } func (*TextNodePos) ProtoMessage() {} func (*TextNodePos) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{22} + return fileDescriptor_36361b2f5d0f0896, []int{23} } func (m *TextNodePos) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3141,7 +3216,7 @@ func (m *TimeTicket) Reset() { *m = TimeTicket{} } func (m *TimeTicket) String() string { return proto.CompactTextString(m) } func (*TimeTicket) ProtoMessage() {} func (*TimeTicket) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{23} + return fileDescriptor_36361b2f5d0f0896, []int{24} } func (m *TimeTicket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3193,7 +3268,7 @@ func (m *TimeTicket) GetActorId() []byte { type DocEvent struct { Type DocEventType `protobuf:"varint,1,opt,name=type,proto3,enum=yorkie.v1.DocEventType" json:"type,omitempty"` - Publisher []byte `protobuf:"bytes,2,opt,name=publisher,proto3" json:"publisher,omitempty"` + Publisher string `protobuf:"bytes,2,opt,name=publisher,proto3" json:"publisher,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3203,7 +3278,7 @@ func (m *DocEvent) Reset() { *m = DocEvent{} } func (m *DocEvent) String() string { return proto.CompactTextString(m) } func (*DocEvent) ProtoMessage() {} func (*DocEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_36361b2f5d0f0896, []int{24} + return fileDescriptor_36361b2f5d0f0896, []int{25} } func (m *DocEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3236,14 +3311,14 @@ func (m *DocEvent) GetType() DocEventType { if m != nil { return m.Type } - return DocEventType_DOC_EVENT_TYPE_DOCUMENTS_CHANGED + return DocEventType_DOC_EVENT_TYPE_DOCUMENT_CHANGED } -func (m *DocEvent) GetPublisher() []byte { +func (m *DocEvent) GetPublisher() string { if m != nil { return m.Publisher } - return nil + return "" } func init() { @@ -3268,6 +3343,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "yorkie.v1.Operation.Style.AttributesEntry") proto.RegisterType((*Operation_Increase)(nil), "yorkie.v1.Operation.Increase") proto.RegisterType((*Operation_TreeEdit)(nil), "yorkie.v1.Operation.TreeEdit") + proto.RegisterMapType((map[string]*TimeTicket)(nil), "yorkie.v1.Operation.TreeEdit.CreatedAtMapByActorEntry") proto.RegisterType((*Operation_TreeStyle)(nil), "yorkie.v1.Operation.TreeStyle") proto.RegisterMapType((map[string]string)(nil), "yorkie.v1.Operation.TreeStyle.AttributesEntry") proto.RegisterType((*JSONElementSimple)(nil), "yorkie.v1.JSONElementSimple") @@ -3287,6 +3363,7 @@ func init() { proto.RegisterType((*TreeNode)(nil), "yorkie.v1.TreeNode") proto.RegisterMapType((map[string]*NodeAttr)(nil), "yorkie.v1.TreeNode.AttributesEntry") proto.RegisterType((*TreeNodes)(nil), "yorkie.v1.TreeNodes") + proto.RegisterType((*TreeNodeID)(nil), "yorkie.v1.TreeNodeID") proto.RegisterType((*TreePos)(nil), "yorkie.v1.TreePos") proto.RegisterType((*User)(nil), "yorkie.v1.User") proto.RegisterType((*Project)(nil), "yorkie.v1.Project") @@ -3305,166 +3382,170 @@ func init() { func init() { proto.RegisterFile("yorkie/v1/resources.proto", fileDescriptor_36361b2f5d0f0896) } var fileDescriptor_36361b2f5d0f0896 = []byte{ - // 2541 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xdd, 0x73, 0xdb, 0x58, - 0x15, 0x8f, 0xe4, 0x4f, 0x9d, 0xa4, 0x89, 0x7b, 0xd3, 0x0f, 0xd7, 0x6d, 0x43, 0xd6, 0x5b, 0x4a, - 0xda, 0x82, 0x93, 0x86, 0x16, 0x96, 0x96, 0x05, 0x1c, 0x5b, 0x6d, 0xd2, 0x4d, 0x9d, 0x20, 0x3b, - 0x2d, 0xdd, 0x81, 0xd1, 0x28, 0xd2, 0x6d, 0xa3, 0x8d, 0x6d, 0x79, 0x25, 0xd9, 0x5b, 0xbf, 0x2e, - 0x30, 0xc3, 0x2b, 0x6f, 0xfc, 0x0b, 0xfc, 0x09, 0xfb, 0x08, 0x0f, 0x0c, 0x33, 0xc0, 0xc0, 0x0c, - 0x3b, 0xc3, 0x2b, 0x5b, 0x1e, 0x18, 0x78, 0x63, 0x18, 0x78, 0x63, 0x86, 0xb9, 0x1f, 0x92, 0xaf, - 0x65, 0xd9, 0x75, 0x4d, 0x66, 0xa7, 0x1d, 0xde, 0x74, 0xef, 0xfd, 0x9d, 0x7b, 0xcf, 0xb9, 0xe7, - 0x77, 0x8e, 0xce, 0xd5, 0x15, 0x5c, 0xe8, 0x3b, 0xee, 0xb1, 0x8d, 0xd7, 0x7b, 0x37, 0xd7, 0x5d, - 0xec, 0x39, 0x5d, 0xd7, 0xc4, 0x5e, 0xa9, 0xe3, 0x3a, 0xbe, 0x83, 0x14, 0x36, 0x54, 0xea, 0xdd, - 0x2c, 0x7c, 0xe1, 0x99, 0xe3, 0x3c, 0x6b, 0xe2, 0x75, 0x3a, 0x70, 0xd8, 0x7d, 0xba, 0xee, 0xdb, - 0x2d, 0xec, 0xf9, 0x46, 0xab, 0xc3, 0xb0, 0x85, 0x95, 0x28, 0xe0, 0x23, 0xd7, 0xe8, 0x74, 0xb0, - 0xcb, 0xe7, 0x2a, 0xfe, 0x46, 0x82, 0x6c, 0xbd, 0x6d, 0x74, 0xbc, 0x23, 0xc7, 0x47, 0xd7, 0x21, - 0xe9, 0x3a, 0x8e, 0x9f, 0x97, 0x56, 0xa5, 0xb5, 0xf9, 0xcd, 0x73, 0xa5, 0x70, 0x9d, 0xd2, 0x83, - 0xfa, 0x5e, 0x4d, 0x6d, 0xe2, 0x16, 0x6e, 0xfb, 0x1a, 0xc5, 0xa0, 0xef, 0x80, 0xd2, 0x71, 0xb1, - 0x87, 0xdb, 0x26, 0xf6, 0xf2, 0xf2, 0x6a, 0x62, 0x6d, 0x7e, 0xb3, 0x28, 0x08, 0x04, 0x73, 0x96, - 0xf6, 0x03, 0x90, 0xda, 0xf6, 0xdd, 0xbe, 0x36, 0x10, 0x2a, 0x7c, 0x17, 0x16, 0x87, 0x07, 0x51, - 0x0e, 0x12, 0xc7, 0xb8, 0x4f, 0x97, 0x57, 0x34, 0xf2, 0x88, 0xae, 0x41, 0xaa, 0x67, 0x34, 0xbb, - 0x38, 0x2f, 0x53, 0x95, 0x96, 0x85, 0x15, 0x02, 0x59, 0x8d, 0x21, 0xee, 0xc8, 0xef, 0x48, 0xc5, - 0x9f, 0xca, 0x00, 0x95, 0x23, 0xa3, 0xfd, 0x0c, 0xef, 0x1b, 0xe6, 0x31, 0x7a, 0x0b, 0x16, 0x2c, - 0xc7, 0xec, 0x12, 0xad, 0xf5, 0xc1, 0xc4, 0xf3, 0x41, 0xdf, 0x7b, 0xb8, 0x8f, 0x6e, 0x03, 0x98, - 0x47, 0xd8, 0x3c, 0xee, 0x38, 0x76, 0xdb, 0xe7, 0xab, 0x9c, 0x15, 0x56, 0xa9, 0x84, 0x83, 0x9a, - 0x00, 0x44, 0x05, 0xc8, 0x7a, 0xdc, 0xc2, 0x7c, 0x62, 0x55, 0x5a, 0x5b, 0xd0, 0xc2, 0x36, 0xba, - 0x01, 0x19, 0x93, 0xea, 0xe0, 0xe5, 0x93, 0x74, 0x5f, 0x4e, 0x0f, 0xcd, 0x47, 0x46, 0xb4, 0x00, - 0x81, 0xca, 0x70, 0xba, 0x65, 0xb7, 0x75, 0xaf, 0xdf, 0x36, 0xb1, 0xa5, 0xfb, 0xb6, 0x79, 0x8c, - 0xfd, 0x7c, 0x6a, 0x44, 0x8d, 0x86, 0xdd, 0xc2, 0x0d, 0x3a, 0xa8, 0x2d, 0xb5, 0xec, 0x76, 0x9d, - 0xc2, 0x59, 0x07, 0xba, 0x0c, 0x60, 0x7b, 0xba, 0x8b, 0x5b, 0x4e, 0x0f, 0x5b, 0xf9, 0xf4, 0xaa, - 0xb4, 0x96, 0xd5, 0x14, 0xdb, 0xd3, 0x58, 0x47, 0xf1, 0x17, 0x12, 0xa4, 0xd9, 0xaa, 0xe8, 0x6d, - 0x90, 0x6d, 0x8b, 0x7b, 0x77, 0x79, 0x44, 0xa9, 0x9d, 0xaa, 0x26, 0xdb, 0x16, 0xca, 0x43, 0xa6, - 0x85, 0x3d, 0xcf, 0x78, 0xc6, 0x36, 0x5d, 0xd1, 0x82, 0x26, 0xba, 0x05, 0xe0, 0x74, 0xb0, 0x6b, - 0xf8, 0xb6, 0xd3, 0xf6, 0xf2, 0x09, 0x6a, 0xdb, 0x19, 0x61, 0x9a, 0xbd, 0x60, 0x50, 0x13, 0x70, - 0x68, 0x0b, 0x96, 0x02, 0x9f, 0xeb, 0xcc, 0xea, 0x7c, 0x92, 0x6a, 0x70, 0x21, 0xc6, 0x99, 0x7c, - 0x7b, 0x16, 0x3b, 0x43, 0xed, 0xe2, 0x8f, 0x25, 0xc8, 0x06, 0x4a, 0x12, 0x7b, 0xcd, 0xa6, 0x4d, - 0x7c, 0xea, 0xe1, 0x0f, 0xa9, 0x35, 0xa7, 0x34, 0x85, 0xf5, 0xd4, 0xf1, 0x87, 0xe8, 0x2d, 0x00, - 0x0f, 0xbb, 0x3d, 0xec, 0xd2, 0x61, 0x62, 0x42, 0x62, 0x4b, 0xde, 0x90, 0x34, 0x85, 0xf5, 0x12, - 0xc8, 0x25, 0xc8, 0x34, 0x8d, 0x56, 0xc7, 0x71, 0x99, 0xf3, 0xd8, 0x78, 0xd0, 0x85, 0x2e, 0x40, - 0xd6, 0x30, 0x7d, 0xc7, 0xd5, 0x6d, 0x8b, 0x6a, 0xba, 0xa0, 0x65, 0x68, 0x7b, 0xc7, 0x2a, 0xfe, - 0xb6, 0x00, 0x4a, 0x68, 0x25, 0xfa, 0x32, 0x24, 0x3c, 0x1c, 0x44, 0x4b, 0x3e, 0x6e, 0x23, 0x4a, - 0x75, 0xec, 0x6f, 0xcf, 0x69, 0x04, 0x46, 0xd0, 0x86, 0x65, 0x71, 0x8a, 0xc5, 0xa3, 0xcb, 0x96, - 0x45, 0xd0, 0x86, 0x65, 0xa1, 0x75, 0x48, 0x12, 0xf7, 0x51, 0xfd, 0x86, 0xb7, 0x6a, 0x00, 0x7f, - 0xe8, 0xf4, 0xf0, 0xf6, 0x9c, 0x46, 0x81, 0xe8, 0x36, 0xa4, 0x19, 0x05, 0xf8, 0xee, 0x5e, 0x8c, - 0x15, 0x61, 0xa4, 0xd8, 0x9e, 0xd3, 0x38, 0x98, 0xac, 0x83, 0x2d, 0x3b, 0xa0, 0x5c, 0xfc, 0x3a, - 0xaa, 0x65, 0x13, 0x2b, 0x28, 0x90, 0xac, 0xe3, 0xe1, 0x26, 0x36, 0x7d, 0xca, 0xb4, 0x71, 0xeb, - 0xd4, 0x29, 0x84, 0xac, 0xc3, 0xc0, 0x68, 0x13, 0x52, 0x9e, 0xdf, 0x6f, 0xe2, 0x7c, 0x86, 0x4a, - 0x15, 0xe2, 0xa5, 0x08, 0x62, 0x7b, 0x4e, 0x63, 0x50, 0x74, 0x17, 0xb2, 0x76, 0xdb, 0x74, 0xb1, - 0xe1, 0xe1, 0x7c, 0x96, 0x8a, 0x5d, 0x8e, 0x15, 0xdb, 0xe1, 0xa0, 0xed, 0x39, 0x2d, 0x14, 0x40, - 0xdf, 0x04, 0xc5, 0x77, 0x31, 0xd6, 0xa9, 0x75, 0xca, 0x04, 0xe9, 0x86, 0x8b, 0x31, 0xb7, 0x30, - 0xeb, 0xf3, 0x67, 0xf4, 0x6d, 0x00, 0x2a, 0xcd, 0x74, 0x06, 0x2a, 0xbe, 0x32, 0x56, 0x3c, 0xd0, - 0x9b, 0xae, 0x48, 0x1b, 0x85, 0x5f, 0x49, 0x90, 0xa8, 0x63, 0x9f, 0xc4, 0x77, 0xc7, 0x70, 0x09, - 0x59, 0x89, 0x5e, 0x3e, 0xb6, 0x74, 0x23, 0x60, 0xcc, 0xb8, 0xf8, 0x66, 0xf8, 0x0a, 0x83, 0x97, - 0xfd, 0x20, 0x2b, 0xca, 0x83, 0xac, 0xb8, 0x19, 0x64, 0x45, 0xc6, 0x8e, 0x4b, 0xf1, 0x89, 0xba, - 0x6e, 0xb7, 0x3a, 0xcd, 0x20, 0x3d, 0xa2, 0xaf, 0xc1, 0x3c, 0x7e, 0x8e, 0xcd, 0x2e, 0x57, 0x21, - 0x39, 0x49, 0x05, 0x08, 0x90, 0x65, 0xbf, 0xf0, 0x4f, 0x09, 0x12, 0x65, 0xcb, 0x3a, 0x09, 0x43, - 0xde, 0xa5, 0x99, 0xa0, 0x27, 0x4e, 0x20, 0x4f, 0x9a, 0xe0, 0x14, 0x41, 0x0f, 0xc4, 0x3f, 0x4f, - 0xab, 0xff, 0x2d, 0x41, 0x92, 0x84, 0xd7, 0x6b, 0x60, 0xf6, 0x2d, 0x00, 0x41, 0x32, 0x31, 0x49, - 0x52, 0x31, 0x43, 0xa9, 0x59, 0x0d, 0xff, 0x44, 0x82, 0x34, 0x4b, 0x12, 0x27, 0x61, 0xfa, 0xb0, - 0xee, 0xf2, 0x6c, 0xba, 0x27, 0xa6, 0xd5, 0xfd, 0x97, 0x49, 0x48, 0xd2, 0xe8, 0x3d, 0x01, 0xcd, - 0xaf, 0x43, 0xf2, 0xa9, 0xeb, 0xb4, 0xb8, 0xce, 0x62, 0x29, 0xd4, 0xc0, 0xcf, 0xfd, 0x9a, 0x63, - 0xe1, 0x7d, 0xc7, 0xd3, 0x28, 0x06, 0x5d, 0x05, 0xd9, 0x77, 0xb8, 0x9a, 0xe3, 0x90, 0xb2, 0xef, - 0xa0, 0x23, 0x38, 0x3f, 0xd0, 0x47, 0x6f, 0x19, 0x1d, 0xfd, 0xb0, 0xaf, 0xd3, 0x57, 0x0b, 0x2f, - 0x14, 0x36, 0xc7, 0xa6, 0xdf, 0x52, 0xa8, 0xd9, 0x43, 0xa3, 0xb3, 0xd5, 0x2f, 0x13, 0x21, 0x56, - 0x50, 0x2d, 0x9b, 0xa3, 0x23, 0xe4, 0x1d, 0x6e, 0x3a, 0x6d, 0x1f, 0xb7, 0x59, 0x62, 0x57, 0xb4, - 0xa0, 0x19, 0xdd, 0xdb, 0xf4, 0x94, 0x7b, 0x8b, 0x76, 0x00, 0x0c, 0xdf, 0x77, 0xed, 0xc3, 0xae, - 0x8f, 0xbd, 0x7c, 0x86, 0xaa, 0x7b, 0x6d, 0xbc, 0xba, 0xe5, 0x10, 0xcb, 0xb4, 0x14, 0x84, 0x0b, - 0x3f, 0x80, 0xfc, 0x38, 0x6b, 0x62, 0x2a, 0xc0, 0x1b, 0xc3, 0x15, 0xe0, 0x18, 0x55, 0x07, 0x35, - 0x60, 0xe1, 0x5d, 0x58, 0x8a, 0xac, 0x1e, 0x33, 0xeb, 0x19, 0x71, 0x56, 0x45, 0x14, 0xff, 0x93, - 0x04, 0x69, 0xf6, 0xf6, 0x7a, 0x5d, 0x69, 0x34, 0x6b, 0x68, 0x7f, 0x26, 0x43, 0x8a, 0xbe, 0x9c, - 0x5e, 0x57, 0xc3, 0x1e, 0x0c, 0x71, 0x8c, 0x85, 0xc4, 0xf5, 0xf1, 0x85, 0xc2, 0x24, 0x92, 0x45, - 0x37, 0x29, 0x35, 0xed, 0x26, 0xfd, 0x8f, 0xec, 0xf9, 0x44, 0x82, 0x6c, 0x50, 0x8e, 0x9c, 0xc4, - 0x36, 0x6f, 0x0e, 0xb3, 0x7f, 0x96, 0x77, 0xde, 0xd4, 0xe9, 0xf3, 0x87, 0x32, 0x64, 0x83, 0x62, - 0xe8, 0x24, 0x74, 0xbf, 0x3a, 0x44, 0x11, 0x24, 0x4a, 0xb9, 0x58, 0xa0, 0x47, 0x51, 0xa0, 0x47, - 0x1c, 0x8a, 0x50, 0x63, 0x03, 0xb2, 0x3c, 0x83, 0x05, 0xc4, 0x38, 0x13, 0x41, 0x12, 0x22, 0x79, - 0x5a, 0x88, 0x9a, 0x99, 0x00, 0x9f, 0xc9, 0xa0, 0x84, 0x35, 0xdd, 0xeb, 0xb6, 0x0d, 0xb5, 0x98, - 0x08, 0x29, 0x4d, 0x2e, 0x4b, 0x5f, 0xc3, 0x28, 0xd9, 0x4a, 0x43, 0xf2, 0xd0, 0xb1, 0xfa, 0xc5, - 0x7f, 0x48, 0x70, 0x7a, 0x84, 0xc6, 0x91, 0xa2, 0x41, 0x9a, 0xb2, 0x68, 0xd8, 0x80, 0x2c, 0x3d, - 0xef, 0xbe, 0xb4, 0xd0, 0xc8, 0x50, 0x18, 0x2b, 0x4e, 0xf8, 0xa1, 0xf9, 0xe5, 0x85, 0x15, 0x07, - 0x96, 0x7d, 0xb4, 0x06, 0x49, 0xbf, 0xdf, 0x61, 0xa7, 0xac, 0xc5, 0x21, 0x16, 0x3e, 0x22, 0xf6, - 0x35, 0xfa, 0x1d, 0xac, 0x51, 0xc4, 0xc0, 0xfe, 0x14, 0x3d, 0x44, 0xb2, 0x46, 0xf1, 0xe7, 0xa7, - 0x60, 0x5e, 0xb0, 0x19, 0x55, 0x61, 0xfe, 0x03, 0xcf, 0x69, 0xeb, 0xce, 0xe1, 0x07, 0xe4, 0x50, - 0xc5, 0xcc, 0x7d, 0x2b, 0x3e, 0xce, 0xe9, 0xf3, 0x1e, 0x05, 0x6e, 0xcf, 0x69, 0x40, 0xe4, 0x58, - 0x0b, 0x95, 0x81, 0xb6, 0x74, 0xc3, 0x75, 0x8d, 0x3e, 0xb7, 0x7f, 0x75, 0xc2, 0x24, 0x65, 0x82, - 0x23, 0x27, 0x16, 0x22, 0x45, 0x1b, 0xec, 0x83, 0x8e, 0xdd, 0xb2, 0x7d, 0x3b, 0x3c, 0x76, 0x8e, - 0x9b, 0x61, 0x3f, 0xc0, 0x91, 0x19, 0x42, 0x21, 0x74, 0x13, 0x92, 0x3e, 0x7e, 0x1e, 0xd0, 0xe8, - 0xe2, 0x18, 0x61, 0x92, 0xf5, 0xc9, 0x69, 0x92, 0x40, 0xd1, 0x1d, 0x52, 0xa8, 0x74, 0xdb, 0x3e, - 0x76, 0x79, 0x29, 0xb2, 0x32, 0x46, 0xaa, 0xc2, 0x50, 0xdb, 0x73, 0x5a, 0x20, 0x40, 0x97, 0x73, - 0x71, 0x70, 0xa2, 0x1c, 0xbb, 0x9c, 0x8b, 0xe9, 0x21, 0x99, 0x40, 0x0b, 0x9f, 0x4a, 0x00, 0x83, - 0x3d, 0x44, 0x6b, 0x90, 0x6a, 0x93, 0xb4, 0x91, 0x97, 0x68, 0x24, 0x89, 0x51, 0xa7, 0x6d, 0x37, - 0x48, 0x46, 0xd1, 0x18, 0x60, 0xc6, 0x42, 0x56, 0xe4, 0x64, 0x62, 0x06, 0x4e, 0x26, 0xa7, 0xe3, - 0x64, 0xe1, 0x8f, 0x12, 0x28, 0xa1, 0x57, 0x27, 0x5a, 0x75, 0xbf, 0xfc, 0xe6, 0x58, 0xf5, 0x37, - 0x09, 0x94, 0x90, 0x69, 0x61, 0xdc, 0x49, 0xd3, 0xc7, 0x9d, 0x2c, 0xc4, 0xdd, 0x8c, 0xc7, 0x28, - 0xd1, 0xd6, 0xe4, 0x0c, 0xb6, 0xa6, 0xa6, 0xb4, 0xf5, 0xf7, 0x12, 0x24, 0x49, 0x60, 0xa0, 0x6b, - 0xc3, 0xce, 0x5b, 0x8e, 0x29, 0x97, 0xde, 0x0c, 0xef, 0xfd, 0x55, 0x82, 0x0c, 0x0f, 0xda, 0xff, - 0x07, 0xdf, 0xb9, 0x18, 0x4f, 0xf4, 0x1d, 0xaf, 0x50, 0xde, 0x08, 0xdf, 0x85, 0xef, 0xe7, 0x87, - 0x90, 0xe1, 0x79, 0x30, 0xe6, 0xf5, 0xbe, 0x01, 0x19, 0xcc, 0x72, 0x6c, 0xcc, 0x21, 0x40, 0xbc, - 0x2f, 0x08, 0x60, 0x45, 0x13, 0x32, 0x3c, 0x01, 0x91, 0xa2, 0xa8, 0x4d, 0x5e, 0x15, 0xd2, 0x48, - 0xb9, 0x13, 0xa4, 0x28, 0x3a, 0x3e, 0xc3, 0x22, 0x8f, 0x20, 0x4b, 0xe4, 0x49, 0x79, 0x32, 0x60, - 0x93, 0x24, 0x54, 0x20, 0x64, 0x4f, 0xba, 0x1d, 0x6b, 0xba, 0xbd, 0xe7, 0xc0, 0xb2, 0x5f, 0xfc, - 0x1d, 0xa9, 0x8e, 0x79, 0x04, 0xa2, 0x2f, 0x0a, 0x1f, 0xd2, 0xcf, 0xc6, 0x84, 0x28, 0xff, 0x94, - 0x1e, 0x5b, 0x01, 0xcd, 0x58, 0x77, 0xdc, 0x86, 0x79, 0xbb, 0xed, 0xe9, 0xf4, 0x4b, 0x12, 0xff, - 0x30, 0x3d, 0x76, 0x6d, 0xc5, 0x6e, 0x7b, 0xfb, 0x2e, 0xee, 0xed, 0x58, 0xa8, 0x32, 0x54, 0x31, - 0xa6, 0x28, 0x31, 0xdf, 0x8e, 0x91, 0x9a, 0x78, 0x62, 0xd7, 0xa6, 0x29, 0xf7, 0x26, 0x5c, 0xd5, - 0x04, 0x0e, 0x11, 0xaf, 0x6a, 0xde, 0x07, 0x18, 0x68, 0x3c, 0x63, 0xcd, 0x77, 0x0e, 0xd2, 0xce, - 0xd3, 0xa7, 0x1e, 0x66, 0x5e, 0x4c, 0x69, 0xbc, 0x55, 0xfc, 0x17, 0x3f, 0xc9, 0x50, 0x5f, 0x5d, - 0x81, 0x44, 0xc7, 0xf1, 0x62, 0x98, 0x16, 0x14, 0xd6, 0x64, 0x18, 0x21, 0x9e, 0xa2, 0x98, 0xa7, - 0x22, 0xc9, 0x28, 0x31, 0xde, 0x7d, 0x53, 0x86, 0x14, 0xba, 0x05, 0x0b, 0xa1, 0xfb, 0x88, 0x3a, - 0xa9, 0xb1, 0xea, 0x00, 0x77, 0xde, 0xbe, 0xe3, 0x11, 0x0d, 0x2c, 0xdc, 0xf1, 0x8f, 0x68, 0x71, - 0x94, 0xd2, 0x58, 0x23, 0xe2, 0xd3, 0xcc, 0xa8, 0x4f, 0xb9, 0xe9, 0x9f, 0xbb, 0x4f, 0xef, 0xb0, - 0xa3, 0x13, 0x3d, 0x8a, 0xa1, 0xaf, 0x0c, 0xbe, 0x41, 0x4d, 0xc8, 0x87, 0x01, 0xa6, 0xf8, 0x18, - 0x32, 0x7c, 0x07, 0x4e, 0x98, 0x0c, 0x2d, 0x48, 0x1e, 0x78, 0xd8, 0x45, 0x8b, 0x61, 0xcc, 0x2a, - 0x34, 0x38, 0x0b, 0x90, 0xed, 0x7a, 0xd8, 0x6d, 0x1b, 0xad, 0xc0, 0xeb, 0x61, 0x1b, 0x7d, 0x23, - 0xe6, 0x85, 0x53, 0x28, 0xb1, 0xab, 0xd4, 0x52, 0x70, 0x95, 0x4a, 0xf5, 0xa0, 0x77, 0xad, 0x82, - 0x1a, 0xc5, 0xff, 0xc8, 0x90, 0xd9, 0x77, 0x1d, 0x5a, 0x5f, 0x46, 0x97, 0x44, 0x90, 0x14, 0x96, - 0xa3, 0xcf, 0xe8, 0x32, 0x40, 0xa7, 0x7b, 0xd8, 0xb4, 0x4d, 0x7a, 0x43, 0xc9, 0x98, 0xa6, 0xb0, - 0x9e, 0xf7, 0x70, 0x9f, 0x0c, 0x7b, 0xd8, 0x74, 0x31, 0xbb, 0xc0, 0x4c, 0xb2, 0x61, 0xd6, 0x43, - 0x86, 0xd7, 0x20, 0x67, 0x74, 0xfd, 0x23, 0xfd, 0x23, 0x7c, 0x78, 0xe4, 0x38, 0xc7, 0x7a, 0xd7, - 0x6d, 0xf2, 0x2f, 0x7e, 0x8b, 0xa4, 0xff, 0x31, 0xeb, 0x3e, 0x70, 0x9b, 0x68, 0x03, 0xce, 0x0c, - 0x21, 0x5b, 0xd8, 0x3f, 0x72, 0x2c, 0x2f, 0x9f, 0x5e, 0x4d, 0xac, 0x29, 0x1a, 0x12, 0xd0, 0x0f, - 0xd9, 0x08, 0xfa, 0x16, 0x5c, 0xe4, 0xf7, 0x6c, 0x16, 0x36, 0x4c, 0xdf, 0xee, 0x19, 0x3e, 0xd6, - 0xfd, 0x23, 0x17, 0x7b, 0x47, 0x4e, 0xd3, 0xa2, 0x65, 0xb7, 0xa2, 0x5d, 0x60, 0x90, 0x6a, 0x88, - 0x68, 0x04, 0x80, 0xc8, 0x26, 0x66, 0x5f, 0x61, 0x13, 0x89, 0xa8, 0x90, 0xa2, 0x95, 0x97, 0x8b, - 0x0e, 0xf2, 0xf4, 0x4f, 0x12, 0x70, 0xee, 0x80, 0xb4, 0x8c, 0xc3, 0x26, 0xe6, 0x8e, 0xb8, 0x67, - 0xe3, 0xa6, 0xe5, 0xa1, 0x0d, 0xbe, 0xfd, 0x12, 0xff, 0x96, 0x12, 0x9d, 0xaf, 0xee, 0xbb, 0x76, - 0xfb, 0x19, 0x2d, 0x49, 0xb8, 0x73, 0xee, 0xc5, 0x6c, 0xaf, 0x3c, 0x85, 0x74, 0x74, 0xf3, 0x9f, - 0x8e, 0xd9, 0x7c, 0xc6, 0xac, 0x5b, 0x02, 0xb7, 0xe3, 0x55, 0x2f, 0x95, 0x47, 0xdc, 0x13, 0xeb, - 0xb2, 0xef, 0x4f, 0x76, 0x59, 0x72, 0x0a, 0xd5, 0xc7, 0x3b, 0xb4, 0x50, 0x02, 0x34, 0xaa, 0x07, - 0xbb, 0x2f, 0x66, 0xe6, 0x48, 0x94, 0x4b, 0x41, 0xb3, 0xf8, 0xb1, 0x0c, 0x4b, 0x55, 0x7e, 0xd7, - 0x5e, 0xef, 0xb6, 0x5a, 0x86, 0xdb, 0x1f, 0x09, 0x89, 0xd1, 0xcb, 0xad, 0xe8, 0xd5, 0xba, 0x22, - 0x5c, 0xad, 0x0f, 0x53, 0x2a, 0xf9, 0x2a, 0x94, 0xba, 0x0b, 0xf3, 0x86, 0x69, 0x62, 0xcf, 0x13, - 0x8b, 0xbb, 0x49, 0xb2, 0x10, 0xc0, 0x47, 0xf8, 0x98, 0x7e, 0x15, 0x3e, 0xfe, 0x5d, 0x1a, 0xfc, - 0xe6, 0xc0, 0xaf, 0xe1, 0xdf, 0x19, 0x2a, 0x87, 0xaf, 0x8c, 0xbd, 0x06, 0xe7, 0xf7, 0xf2, 0x42, - 0x79, 0xbc, 0x0e, 0xd9, 0xe0, 0x66, 0x7c, 0xd2, 0x1f, 0x11, 0x21, 0xa8, 0xd8, 0x0a, 0xfe, 0x87, - 0x20, 0x93, 0xa0, 0x8b, 0x70, 0xbe, 0xb2, 0x5d, 0xae, 0xdd, 0x57, 0xf5, 0xc6, 0x93, 0x7d, 0x55, - 0x3f, 0xa8, 0xd5, 0xf7, 0xd5, 0xca, 0xce, 0xbd, 0x1d, 0xb5, 0x9a, 0x9b, 0x43, 0xcb, 0xb0, 0x24, - 0x0e, 0xee, 0x1f, 0x34, 0x72, 0x12, 0x3a, 0x07, 0x48, 0xec, 0xac, 0xaa, 0xbb, 0x6a, 0x43, 0xcd, - 0xc9, 0xe8, 0x2c, 0x9c, 0x16, 0xfb, 0x2b, 0xbb, 0x6a, 0x59, 0xcb, 0x25, 0x8a, 0x3d, 0xc8, 0x06, - 0x4a, 0x90, 0xe3, 0x39, 0xa1, 0x32, 0x4f, 0xfe, 0x97, 0x63, 0xf4, 0x2c, 0x55, 0x0d, 0xdf, 0x60, - 0x6f, 0x26, 0x0a, 0x2d, 0x7c, 0x1d, 0x94, 0xb0, 0xeb, 0x55, 0x3e, 0x28, 0x15, 0x6b, 0xc4, 0xcc, - 0xf0, 0xe7, 0x8c, 0xe1, 0x3f, 0x00, 0xa4, 0xb8, 0x3f, 0x00, 0x86, 0xff, 0x21, 0x90, 0x23, 0xff, - 0x10, 0x14, 0x7f, 0x24, 0xc1, 0xbc, 0xf0, 0x75, 0xfa, 0x64, 0xdf, 0x48, 0xe8, 0x4b, 0xb0, 0xe4, - 0xe2, 0xa6, 0x41, 0x8e, 0xb5, 0x3a, 0x07, 0x24, 0x28, 0x60, 0x31, 0xe8, 0xde, 0x63, 0xaf, 0x2e, - 0x13, 0x60, 0x30, 0xb3, 0xf8, 0xd7, 0x82, 0x34, 0xfa, 0xd7, 0xc2, 0x25, 0x50, 0x2c, 0xdc, 0x24, - 0xa7, 0x65, 0xec, 0x06, 0x06, 0x85, 0x1d, 0x43, 0xff, 0x34, 0x24, 0x86, 0xff, 0x69, 0x38, 0x80, - 0x6c, 0xd5, 0x31, 0xd5, 0x1e, 0x6e, 0xfb, 0xe8, 0xc6, 0x10, 0x33, 0xcf, 0x0b, 0x16, 0x06, 0x10, - 0x81, 0x8c, 0x97, 0x80, 0xbd, 0xa7, 0xbc, 0x23, 0xbe, 0xe2, 0x82, 0x36, 0xe8, 0xb8, 0xfe, 0xa9, - 0x0c, 0x4a, 0x78, 0xba, 0x23, 0xe4, 0x7a, 0x54, 0xde, 0x3d, 0xe0, 0x74, 0xa9, 0x1d, 0xec, 0xee, - 0xe6, 0xe6, 0x08, 0xb9, 0x84, 0xce, 0xad, 0xbd, 0xbd, 0x5d, 0xb5, 0x5c, 0x63, 0xa4, 0x13, 0xfa, - 0x77, 0x6a, 0x0d, 0xf5, 0xbe, 0xaa, 0xe5, 0xe4, 0xc8, 0x24, 0xbb, 0x7b, 0xb5, 0xfb, 0xb9, 0x04, - 0x61, 0xa2, 0xd0, 0x59, 0xdd, 0x3b, 0xd8, 0xda, 0x55, 0x73, 0xc9, 0x48, 0x77, 0xbd, 0xa1, 0xed, - 0xd4, 0xee, 0xe7, 0x52, 0xe8, 0x0c, 0xe4, 0xc4, 0x25, 0x9f, 0x34, 0xd4, 0x7a, 0x2e, 0x1d, 0x99, - 0xb8, 0x5a, 0x6e, 0xa8, 0xb9, 0x0c, 0x2a, 0xc0, 0x39, 0xa1, 0x93, 0x9c, 0x35, 0xf4, 0xbd, 0xad, - 0x07, 0x6a, 0xa5, 0x91, 0xcb, 0xa2, 0x0b, 0x70, 0x36, 0x3a, 0x56, 0xd6, 0xb4, 0xf2, 0x93, 0x9c, - 0x12, 0x99, 0xab, 0xa1, 0x7e, 0xaf, 0x91, 0x83, 0xc8, 0x5c, 0xdc, 0x22, 0xbd, 0x52, 0x6b, 0xe4, - 0xe6, 0xd1, 0x79, 0x58, 0x8e, 0x58, 0x45, 0x07, 0x16, 0xa2, 0x33, 0x69, 0xaa, 0x9a, 0x3b, 0x75, - 0xfd, 0x63, 0x09, 0x16, 0x44, 0x5f, 0xa0, 0x2b, 0xb0, 0x5a, 0xdd, 0xab, 0xe8, 0xea, 0x23, 0xb5, - 0xd6, 0x08, 0xf6, 0xa0, 0x72, 0xf0, 0x50, 0xad, 0x35, 0xea, 0x3a, 0x0b, 0x51, 0x12, 0xdc, 0x93, - 0x50, 0x8f, 0xcb, 0x8d, 0xca, 0xb6, 0x5a, 0xcd, 0x49, 0xe8, 0x2a, 0x14, 0xc7, 0xa2, 0x0e, 0x6a, - 0x01, 0x4e, 0xde, 0xba, 0xf1, 0xeb, 0x17, 0x2b, 0xd2, 0x1f, 0x5e, 0xac, 0x48, 0x7f, 0x7e, 0xb1, - 0x22, 0xfd, 0xec, 0x2f, 0x2b, 0x73, 0x70, 0xda, 0xc2, 0xbd, 0x80, 0x2d, 0x46, 0xc7, 0x2e, 0xf5, - 0x6e, 0xee, 0x4b, 0xef, 0x27, 0x4b, 0x77, 0x7b, 0x37, 0x0f, 0xd3, 0x34, 0x3f, 0x7e, 0xf5, 0xbf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xa8, 0x34, 0x19, 0xd1, 0x26, 0x00, 0x00, + // 2599 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x8f, 0x1b, 0x49, + 0x15, 0x9f, 0x6e, 0xb7, 0x3f, 0xfa, 0x4d, 0x32, 0xe3, 0xd4, 0xe4, 0xc3, 0x71, 0x92, 0xd9, 0x89, + 0x97, 0x5d, 0x66, 0x13, 0xf0, 0x7c, 0xb0, 0xbb, 0x2c, 0x09, 0x01, 0x3c, 0x76, 0x27, 0xe3, 0xec, + 0xc4, 0x33, 0xb4, 0x3d, 0x59, 0xb2, 0x02, 0xb5, 0x7a, 0xba, 0x6b, 0x66, 0x7a, 0xc7, 0xee, 0xf6, + 0x76, 0xb7, 0xbd, 0xb1, 0x84, 0x84, 0x84, 0x40, 0xe2, 0xca, 0x0d, 0xfe, 0x04, 0x2e, 0xdc, 0xf7, + 0x08, 0x07, 0x84, 0x84, 0x10, 0x2b, 0xb1, 0x12, 0x57, 0x36, 0x1c, 0x10, 0xdc, 0x10, 0x12, 0x37, + 0x24, 0x54, 0x1f, 0xdd, 0x6e, 0xdb, 0x6d, 0x8f, 0x63, 0x86, 0x55, 0x22, 0x6e, 0x5d, 0x55, 0xbf, + 0x57, 0xf5, 0x5e, 0xbd, 0x5f, 0xbd, 0x7a, 0xd5, 0x55, 0x70, 0xb5, 0xe7, 0xb8, 0x27, 0x16, 0x5e, + 0xeb, 0x6e, 0xac, 0xb9, 0xd8, 0x73, 0x3a, 0xae, 0x81, 0xbd, 0x62, 0xdb, 0x75, 0x7c, 0x07, 0xc9, + 0xac, 0xa9, 0xd8, 0xdd, 0xc8, 0xbf, 0x72, 0xe4, 0x38, 0x47, 0x4d, 0xbc, 0x46, 0x1b, 0x0e, 0x3a, + 0x87, 0x6b, 0xbe, 0xd5, 0xc2, 0x9e, 0xaf, 0xb7, 0xda, 0x0c, 0x9b, 0x5f, 0x1e, 0x06, 0x7c, 0xe4, + 0xea, 0xed, 0x36, 0x76, 0x79, 0x5f, 0x85, 0xdf, 0x09, 0x90, 0xa9, 0xdb, 0x7a, 0xdb, 0x3b, 0x76, + 0x7c, 0x74, 0x0b, 0x24, 0xd7, 0x71, 0xfc, 0x9c, 0xb0, 0x22, 0xac, 0xce, 0x6f, 0x5e, 0x2e, 0x86, + 0xe3, 0x14, 0x1f, 0xd6, 0x77, 0x6b, 0x4a, 0x13, 0xb7, 0xb0, 0xed, 0xab, 0x14, 0x83, 0xbe, 0x05, + 0x72, 0xdb, 0xc5, 0x1e, 0xb6, 0x0d, 0xec, 0xe5, 0xc4, 0x95, 0xc4, 0xea, 0xfc, 0x66, 0x21, 0x22, + 0x10, 0xf4, 0x59, 0xdc, 0x0b, 0x40, 0x8a, 0xed, 0xbb, 0x3d, 0xb5, 0x2f, 0x94, 0xff, 0x36, 0x2c, + 0x0c, 0x36, 0xa2, 0x2c, 0x24, 0x4e, 0x70, 0x8f, 0x0e, 0x2f, 0xab, 0xe4, 0x13, 0xbd, 0x01, 0xc9, + 0xae, 0xde, 0xec, 0xe0, 0x9c, 0x48, 0x55, 0x5a, 0x8a, 0x8c, 0x10, 0xc8, 0xaa, 0x0c, 0x71, 0x47, + 0x7c, 0x47, 0x28, 0xfc, 0x54, 0x04, 0x28, 0x1f, 0xeb, 0xf6, 0x11, 0xde, 0xd3, 0x8d, 0x13, 0x74, + 0x13, 0xce, 0x99, 0x8e, 0xd1, 0x21, 0x5a, 0x6b, 0xfd, 0x8e, 0xe7, 0x83, 0xba, 0x77, 0x71, 0x0f, + 0xbd, 0x05, 0x60, 0x1c, 0x63, 0xe3, 0xa4, 0xed, 0x58, 0xb6, 0xcf, 0x47, 0xb9, 0x14, 0x19, 0xa5, + 0x1c, 0x36, 0xaa, 0x11, 0x20, 0xca, 0x43, 0xc6, 0xe3, 0x16, 0xe6, 0x12, 0x2b, 0xc2, 0xea, 0x39, + 0x35, 0x2c, 0xa3, 0xdb, 0x90, 0x36, 0xa8, 0x0e, 0x5e, 0x4e, 0xa2, 0xf3, 0x72, 0x61, 0xa0, 0x3f, + 0xd2, 0xa2, 0x06, 0x08, 0x54, 0x82, 0x0b, 0x2d, 0xcb, 0xd6, 0xbc, 0x9e, 0x6d, 0x60, 0x53, 0xf3, + 0x2d, 0xe3, 0x04, 0xfb, 0xb9, 0xe4, 0x88, 0x1a, 0x0d, 0xab, 0x85, 0x1b, 0xb4, 0x51, 0x5d, 0x6c, + 0x59, 0x76, 0x9d, 0xc2, 0x59, 0x05, 0xba, 0x01, 0x60, 0x79, 0x9a, 0x8b, 0x5b, 0x4e, 0x17, 0x9b, + 0xb9, 0xd4, 0x8a, 0xb0, 0x9a, 0x51, 0x65, 0xcb, 0x53, 0x59, 0x45, 0xe1, 0x57, 0x02, 0xa4, 0xd8, + 0xa8, 0xe8, 0x55, 0x10, 0x2d, 0x93, 0x7b, 0x77, 0x69, 0x44, 0xa9, 0x6a, 0x45, 0x15, 0x2d, 0x13, + 0xe5, 0x20, 0xdd, 0xc2, 0x9e, 0xa7, 0x1f, 0xb1, 0x49, 0x97, 0xd5, 0xa0, 0x88, 0xde, 0x04, 0x70, + 0xda, 0xd8, 0xd5, 0x7d, 0xcb, 0xb1, 0xbd, 0x5c, 0x82, 0xda, 0x76, 0x31, 0xd2, 0xcd, 0x6e, 0xd0, + 0xa8, 0x46, 0x70, 0x68, 0x0b, 0x16, 0x03, 0x9f, 0x6b, 0xcc, 0xea, 0x9c, 0x44, 0x35, 0xb8, 0x1a, + 0xe3, 0x4c, 0x3e, 0x3d, 0x0b, 0xed, 0x81, 0x72, 0xe1, 0xc7, 0x02, 0x64, 0x02, 0x25, 0x89, 0xbd, + 0x46, 0xd3, 0x22, 0x3e, 0xf5, 0xf0, 0x87, 0xd4, 0x9a, 0xf3, 0xaa, 0xcc, 0x6a, 0xea, 0xf8, 0x43, + 0x74, 0x13, 0xc0, 0xc3, 0x6e, 0x17, 0xbb, 0xb4, 0x99, 0x98, 0x90, 0xd8, 0x12, 0xd7, 0x05, 0x55, + 0x66, 0xb5, 0x04, 0x72, 0x1d, 0xd2, 0x4d, 0xbd, 0xd5, 0x76, 0x5c, 0xe6, 0x3c, 0xd6, 0x1e, 0x54, + 0xa1, 0xab, 0x90, 0xd1, 0x0d, 0xdf, 0x71, 0x35, 0xcb, 0xa4, 0x9a, 0x9e, 0x53, 0xd3, 0xb4, 0x5c, + 0x35, 0x0b, 0x3f, 0xbf, 0x0e, 0x72, 0x68, 0x25, 0xfa, 0x12, 0x24, 0x3c, 0x1c, 0xac, 0x96, 0x5c, + 0xdc, 0x44, 0x14, 0xeb, 0xd8, 0xdf, 0x9e, 0x53, 0x09, 0x8c, 0xa0, 0x75, 0xd3, 0xe4, 0x14, 0x8b, + 0x47, 0x97, 0x4c, 0x93, 0xa0, 0x75, 0xd3, 0x44, 0x6b, 0x20, 0x11, 0xf7, 0x51, 0xfd, 0x06, 0xa7, + 0xaa, 0x0f, 0x7f, 0xe4, 0x74, 0xf1, 0xf6, 0x9c, 0x4a, 0x81, 0xe8, 0x2d, 0x48, 0x31, 0x0a, 0xf0, + 0xd9, 0xbd, 0x16, 0x2b, 0xc2, 0x48, 0xb1, 0x3d, 0xa7, 0x72, 0x30, 0x19, 0x07, 0x9b, 0x56, 0x40, + 0xb9, 0xf8, 0x71, 0x14, 0xd3, 0x22, 0x56, 0x50, 0x20, 0x19, 0xc7, 0xc3, 0x4d, 0x6c, 0xf8, 0x94, + 0x69, 0xe3, 0xc6, 0xa9, 0x53, 0x08, 0x19, 0x87, 0x81, 0xd1, 0x26, 0x24, 0x3d, 0xbf, 0xd7, 0xc4, + 0xb9, 0x34, 0x95, 0xca, 0xc7, 0x4b, 0x11, 0xc4, 0xf6, 0x9c, 0xca, 0xa0, 0xe8, 0x2e, 0x64, 0x2c, + 0xdb, 0x70, 0xb1, 0xee, 0xe1, 0x5c, 0x86, 0x8a, 0xdd, 0x88, 0x15, 0xab, 0x72, 0xd0, 0xf6, 0x9c, + 0x1a, 0x0a, 0xa0, 0xaf, 0x83, 0xec, 0xbb, 0x18, 0x6b, 0xd4, 0x3a, 0x79, 0x82, 0x74, 0xc3, 0xc5, + 0x98, 0x5b, 0x98, 0xf1, 0xf9, 0x37, 0xfa, 0x26, 0x00, 0x95, 0x66, 0x3a, 0x03, 0x15, 0x5f, 0x1e, + 0x2b, 0x1e, 0xe8, 0x4d, 0x47, 0xa4, 0x85, 0xfc, 0x6f, 0x04, 0x48, 0xd4, 0xb1, 0x4f, 0xd6, 0x77, + 0x5b, 0x77, 0x09, 0x59, 0x89, 0x5e, 0x3e, 0x36, 0x35, 0x3d, 0x60, 0xcc, 0xb8, 0xf5, 0xcd, 0xf0, + 0x65, 0x06, 0x2f, 0xf9, 0x41, 0x54, 0x14, 0xfb, 0x51, 0x71, 0x33, 0x88, 0x8a, 0x8c, 0x1d, 0xd7, + 0xe3, 0x03, 0x75, 0xdd, 0x6a, 0xb5, 0x9b, 0x41, 0x78, 0x44, 0x6f, 0xc3, 0x3c, 0x7e, 0x8a, 0x8d, + 0x0e, 0x57, 0x41, 0x9a, 0xa4, 0x02, 0x04, 0xc8, 0x92, 0x9f, 0xff, 0xa7, 0x00, 0x89, 0x92, 0x69, + 0x9e, 0x85, 0x21, 0xf7, 0x68, 0x24, 0xe8, 0x46, 0x3b, 0x10, 0x27, 0x75, 0x70, 0x9e, 0xa0, 0xfb, + 0xe2, 0x9f, 0xa7, 0xd5, 0xff, 0x12, 0x40, 0x22, 0xcb, 0xeb, 0x05, 0x30, 0xfb, 0x4d, 0x80, 0x88, + 0x64, 0x62, 0x92, 0xa4, 0x6c, 0x84, 0x52, 0xb3, 0x1a, 0xfe, 0xb1, 0x00, 0x29, 0x16, 0x24, 0xce, + 0xc2, 0xf4, 0x41, 0xdd, 0xc5, 0xd9, 0x74, 0x4f, 0x4c, 0xab, 0xfb, 0xaf, 0x25, 0x90, 0xe8, 0xea, + 0x3d, 0x03, 0xcd, 0x6f, 0x81, 0x74, 0xe8, 0x3a, 0x2d, 0xae, 0x73, 0x34, 0x15, 0x6a, 0xe0, 0xa7, + 0x7e, 0xcd, 0x31, 0xf1, 0x9e, 0xe3, 0xa9, 0x14, 0x83, 0x5e, 0x07, 0xd1, 0x77, 0xb8, 0x9a, 0xe3, + 0x90, 0xa2, 0xef, 0xa0, 0x63, 0xb8, 0xd2, 0xd7, 0x47, 0x6b, 0xe9, 0x6d, 0xed, 0xa0, 0xa7, 0xd1, + 0xad, 0x85, 0x27, 0x0a, 0x9b, 0x63, 0xc3, 0x6f, 0x31, 0xd4, 0xec, 0x91, 0xde, 0xde, 0xea, 0x95, + 0x88, 0x10, 0x4b, 0xa8, 0x96, 0x8c, 0xd1, 0x16, 0xb2, 0x87, 0x1b, 0x8e, 0xed, 0x63, 0x9b, 0x05, + 0x76, 0x59, 0x0d, 0x8a, 0xc3, 0x73, 0x9b, 0x9a, 0x72, 0x6e, 0x51, 0x15, 0x40, 0xf7, 0x7d, 0xd7, + 0x3a, 0xe8, 0xf8, 0xd8, 0xcb, 0xa5, 0xa9, 0xba, 0x6f, 0x8c, 0x57, 0xb7, 0x14, 0x62, 0x99, 0x96, + 0x11, 0xe1, 0xfc, 0xf7, 0x20, 0x37, 0xce, 0x9a, 0x98, 0x0c, 0xf0, 0xf6, 0x60, 0x06, 0x38, 0x46, + 0xd5, 0x7e, 0x0e, 0x98, 0xbf, 0x07, 0x8b, 0x43, 0xa3, 0xc7, 0xf4, 0x7a, 0x31, 0xda, 0xab, 0x1c, + 0x15, 0xff, 0x93, 0x00, 0x29, 0xb6, 0x7b, 0xbd, 0xa8, 0x34, 0x9a, 0x75, 0x69, 0x7f, 0x26, 0x42, + 0x92, 0x6e, 0x4e, 0x2f, 0xaa, 0x61, 0x0f, 0x07, 0x38, 0xc6, 0x96, 0xc4, 0xad, 0xf1, 0x89, 0xc2, + 0x24, 0x92, 0x0d, 0x4f, 0x52, 0x72, 0xda, 0x49, 0xfa, 0x2f, 0xd9, 0xf3, 0xb1, 0x00, 0x99, 0x20, + 0x1d, 0x39, 0x8b, 0x69, 0xde, 0x1c, 0x64, 0xff, 0x2c, 0x7b, 0xde, 0xd4, 0xe1, 0xf3, 0x93, 0x04, + 0x64, 0x82, 0x64, 0xe8, 0x2c, 0x74, 0x7f, 0x7d, 0x80, 0x22, 0x28, 0x2a, 0xe5, 0xe2, 0x08, 0x3d, + 0x0a, 0x11, 0x7a, 0xc4, 0xa1, 0x08, 0x35, 0x9a, 0xa7, 0x85, 0xce, 0xb7, 0x27, 0xe6, 0x76, 0xcf, + 0x19, 0x3e, 0xd7, 0x21, 0xc3, 0xe3, 0xa5, 0x97, 0x4b, 0x8e, 0x1c, 0x73, 0x48, 0xa7, 0x84, 0xb6, + 0x9e, 0x1a, 0xa2, 0x66, 0x0d, 0xab, 0xff, 0xeb, 0x58, 0xf8, 0x99, 0x08, 0x72, 0x98, 0xa0, 0xbe, + 0x68, 0x3e, 0xad, 0xc5, 0x2c, 0xf7, 0xe2, 0xe4, 0x1c, 0xfb, 0x05, 0x5c, 0xf2, 0x5b, 0x29, 0x90, + 0x0e, 0x1c, 0xb3, 0x57, 0xf8, 0x87, 0x00, 0x17, 0x46, 0xd6, 0xe4, 0x50, 0x06, 0x24, 0x4c, 0x99, + 0x01, 0xad, 0x43, 0x86, 0x1e, 0xde, 0x4f, 0xcd, 0x9a, 0xd2, 0x14, 0xc6, 0x32, 0x2d, 0xfe, 0x07, + 0xe0, 0xf4, 0x2c, 0x91, 0x03, 0x4b, 0x3e, 0x5a, 0x05, 0xc9, 0xef, 0xb5, 0xd9, 0x91, 0x71, 0x61, + 0x80, 0xe4, 0x8f, 0x89, 0x7d, 0x8d, 0x5e, 0x1b, 0xab, 0x14, 0xd1, 0xb7, 0x3f, 0x49, 0x4f, 0xc4, + 0xac, 0x50, 0xf8, 0xc5, 0x79, 0x98, 0x8f, 0xd8, 0x8c, 0x2a, 0x30, 0xff, 0x81, 0xe7, 0xd8, 0x9a, + 0x73, 0xf0, 0x01, 0x39, 0x21, 0x32, 0x73, 0x6f, 0xc6, 0x07, 0x2d, 0xfa, 0xbd, 0x4b, 0x81, 0xdb, + 0x73, 0x2a, 0x10, 0x39, 0x56, 0x42, 0x25, 0xa0, 0x25, 0x4d, 0x77, 0x5d, 0xbd, 0xc7, 0xed, 0x5f, + 0x99, 0xd0, 0x49, 0x89, 0xe0, 0xc8, 0xf1, 0x8b, 0x48, 0xd1, 0x02, 0xfb, 0x3b, 0x65, 0xb5, 0x2c, + 0xdf, 0x0a, 0xcf, 0xd0, 0xe3, 0x7a, 0xd8, 0x0b, 0x70, 0xa4, 0x87, 0x50, 0x08, 0x6d, 0x80, 0xe4, + 0xe3, 0xa7, 0x01, 0x8d, 0xae, 0x8d, 0x11, 0x26, 0x5b, 0x18, 0x39, 0x1a, 0x13, 0x28, 0xba, 0x43, + 0xb2, 0xae, 0x8e, 0xed, 0x63, 0x97, 0x07, 0x80, 0xe5, 0x31, 0x52, 0x65, 0x86, 0xda, 0x9e, 0x53, + 0x03, 0x01, 0x3a, 0x9c, 0x8b, 0x83, 0xe3, 0xf1, 0xd8, 0xe1, 0x5c, 0x4c, 0x4f, 0xfc, 0x04, 0x9a, + 0xff, 0x54, 0x00, 0xe8, 0xcf, 0x21, 0x5a, 0x85, 0xa4, 0x4d, 0xa2, 0x52, 0x4e, 0xa0, 0x2b, 0x29, + 0xba, 0xea, 0xd4, 0xed, 0x06, 0x09, 0x58, 0x2a, 0x03, 0xcc, 0x98, 0x95, 0x47, 0x39, 0x99, 0x98, + 0x81, 0x93, 0xd2, 0x74, 0x9c, 0xcc, 0xff, 0x51, 0x00, 0x39, 0xf4, 0xea, 0x44, 0xab, 0x1e, 0x94, + 0x5e, 0x1e, 0xab, 0xfe, 0x26, 0x80, 0x1c, 0x32, 0x2d, 0x5c, 0x77, 0xc2, 0xf4, 0xeb, 0x4e, 0x8c, + 0xac, 0xbb, 0x19, 0xcf, 0x84, 0x51, 0x5b, 0xa5, 0x19, 0x6c, 0x4d, 0x4e, 0x69, 0xeb, 0x1f, 0x04, + 0x90, 0xc8, 0xc2, 0x40, 0x6f, 0x0c, 0x3a, 0x6f, 0x29, 0x26, 0xf7, 0x7b, 0x39, 0xbc, 0xf7, 0x57, + 0x01, 0xd2, 0x7c, 0xd1, 0xfe, 0x3f, 0xf8, 0xce, 0xc5, 0x78, 0xa2, 0xef, 0x78, 0x02, 0xf4, 0x52, + 0xf8, 0x2e, 0xdc, 0x9f, 0x1f, 0x41, 0x9a, 0xc7, 0xc1, 0x98, 0xed, 0x7d, 0x1d, 0xd2, 0x98, 0xc5, + 0xd8, 0x98, 0x13, 0x4d, 0xf4, 0xf2, 0x23, 0x80, 0x15, 0x0c, 0x48, 0xf3, 0x00, 0x44, 0x92, 0x22, + 0x9b, 0x6c, 0x15, 0xc2, 0x48, 0xba, 0x13, 0x84, 0x28, 0xda, 0x3e, 0xc3, 0x20, 0x8f, 0x21, 0x43, + 0xe4, 0x49, 0x7a, 0xd2, 0x67, 0x93, 0x10, 0xc9, 0x40, 0xc8, 0x9c, 0x74, 0xda, 0xe6, 0x74, 0x73, + 0xcf, 0x81, 0x25, 0xbf, 0xf0, 0x7b, 0x11, 0x32, 0xc1, 0x0a, 0x44, 0xaf, 0x45, 0x6e, 0x05, 0x2e, + 0xc5, 0x2c, 0x51, 0x7e, 0x2f, 0x10, 0x9b, 0x01, 0xcd, 0x98, 0x77, 0xbc, 0x05, 0xf3, 0x96, 0xed, + 0x69, 0xf4, 0xb7, 0x18, 0xff, 0xcb, 0x3e, 0x76, 0x6c, 0xd9, 0xb2, 0xbd, 0x3d, 0x17, 0x77, 0xab, + 0x26, 0x2a, 0x0f, 0x64, 0x8c, 0x2c, 0x33, 0x7f, 0x35, 0x46, 0x6a, 0xe2, 0xef, 0x07, 0x75, 0x9a, + 0x74, 0x6f, 0xc2, 0xbd, 0x53, 0xe0, 0x90, 0xe8, 0xbd, 0xd3, 0xfb, 0x00, 0x7d, 0x8d, 0x67, 0xcc, + 0xf9, 0x2e, 0x43, 0xca, 0x39, 0x3c, 0xf4, 0x30, 0xf3, 0x62, 0x52, 0xe5, 0xa5, 0xc2, 0x2f, 0xf9, + 0xb1, 0x6c, 0xb2, 0xaf, 0x38, 0x80, 0xfb, 0x0a, 0xf1, 0x18, 0xc5, 0x5c, 0x35, 0x14, 0x8d, 0x12, + 0xe3, 0xfd, 0x27, 0xcd, 0xe6, 0xbf, 0xe4, 0x24, 0x7d, 0x22, 0xfe, 0xe3, 0x62, 0x64, 0x31, 0x10, + 0xb1, 0xd4, 0x69, 0x62, 0x35, 0xfc, 0xd4, 0xaf, 0x52, 0xe6, 0x99, 0xb8, 0xed, 0x1f, 0xd3, 0xe4, + 0x28, 0xa9, 0xb2, 0xc2, 0x10, 0x19, 0x32, 0xa3, 0x64, 0xe0, 0x7d, 0x7d, 0xee, 0x64, 0xb8, 0xc3, + 0xce, 0x5c, 0xf4, 0x88, 0x88, 0xbe, 0xdc, 0xff, 0x13, 0x37, 0x21, 0x90, 0x06, 0x18, 0x4a, 0xa4, + 0x70, 0x0e, 0xce, 0x98, 0x48, 0xdf, 0x87, 0x34, 0x3f, 0x7e, 0xa1, 0x4d, 0x90, 0xf9, 0x49, 0xf0, + 0x34, 0x36, 0x65, 0x18, 0xae, 0x6a, 0xa2, 0x7b, 0xb0, 0xd8, 0xc4, 0x87, 0xbe, 0xe6, 0x59, 0x07, + 0x4d, 0xcb, 0x3e, 0x22, 0x92, 0xe2, 0x24, 0xc9, 0xf3, 0x04, 0x5d, 0x67, 0xe0, 0xaa, 0x59, 0x68, + 0x81, 0xb4, 0xef, 0x61, 0x17, 0x2d, 0x84, 0x0c, 0x96, 0x29, 0x55, 0xf3, 0x90, 0xe9, 0x78, 0xd8, + 0xb5, 0xf5, 0x56, 0x40, 0xd7, 0xb0, 0x8c, 0xbe, 0x16, 0xb3, 0x55, 0xe6, 0x8b, 0xec, 0x46, 0xbb, + 0x18, 0xdc, 0x68, 0xd3, 0x59, 0xa0, 0x57, 0xde, 0x91, 0x49, 0x28, 0xfc, 0x5b, 0x84, 0xf4, 0x9e, + 0xeb, 0xd0, 0xcc, 0x78, 0x78, 0x48, 0x04, 0x52, 0x64, 0x38, 0xfa, 0x8d, 0x6e, 0x00, 0xb4, 0x3b, + 0x07, 0x4d, 0xcb, 0xa0, 0x17, 0xc5, 0x6c, 0x89, 0xc8, 0xac, 0xe6, 0x5d, 0xdc, 0x23, 0xcd, 0x1e, + 0x36, 0x5c, 0xcc, 0xee, 0x91, 0x25, 0xd6, 0xcc, 0x6a, 0x48, 0xf3, 0x2a, 0x64, 0xf5, 0x8e, 0x7f, + 0xac, 0x7d, 0x84, 0x0f, 0x8e, 0x1d, 0xe7, 0x44, 0xeb, 0xb8, 0x4d, 0xfe, 0xe3, 0x75, 0x81, 0xd4, + 0xbf, 0xc7, 0xaa, 0xf7, 0xdd, 0x26, 0x5a, 0x87, 0x8b, 0x03, 0xc8, 0x16, 0xf6, 0x8f, 0x1d, 0xd3, + 0xcb, 0xa5, 0x56, 0x12, 0xab, 0xb2, 0x8a, 0x22, 0xe8, 0x47, 0xac, 0x05, 0x7d, 0x03, 0xae, 0xf1, + 0xeb, 0x4e, 0x13, 0xeb, 0x86, 0x6f, 0x75, 0x75, 0x1f, 0x6b, 0xfe, 0xb1, 0x8b, 0xbd, 0x63, 0xa7, + 0x69, 0xd2, 0x35, 0x21, 0xab, 0x57, 0x19, 0xa4, 0x12, 0x22, 0x1a, 0x01, 0x60, 0x68, 0x12, 0x33, + 0xcf, 0x31, 0x89, 0x44, 0x34, 0xb2, 0xb9, 0xc8, 0xa7, 0x8b, 0xf6, 0x77, 0x98, 0x9f, 0x24, 0xe0, + 0xf2, 0x3e, 0x29, 0xe9, 0x07, 0x4d, 0xcc, 0x1d, 0x71, 0xdf, 0xc2, 0x4d, 0xd3, 0x43, 0xeb, 0x7c, + 0xfa, 0x05, 0xfe, 0x4b, 0x6b, 0xb8, 0xbf, 0xba, 0xef, 0x5a, 0xf6, 0x11, 0x4d, 0xa6, 0xb8, 0x73, + 0xee, 0xc7, 0x4c, 0xaf, 0x38, 0x85, 0xf4, 0xf0, 0xe4, 0x1f, 0x8e, 0x99, 0x7c, 0xc6, 0xac, 0x37, + 0x23, 0x3c, 0x8e, 0x57, 0xbd, 0x58, 0x1a, 0x71, 0x4f, 0xac, 0xcb, 0xbe, 0x3b, 0xd9, 0x65, 0xd2, + 0x14, 0xaa, 0x8f, 0x77, 0x68, 0xbe, 0x08, 0x68, 0x54, 0x0f, 0x76, 0x6d, 0xcf, 0xcc, 0x11, 0x28, + 0x97, 0x82, 0x62, 0xe1, 0x87, 0x22, 0x2c, 0x56, 0xf8, 0x93, 0x87, 0x7a, 0xa7, 0xd5, 0xd2, 0xdd, + 0xde, 0xc8, 0x92, 0x18, 0xbd, 0x63, 0x1c, 0x7e, 0xe1, 0x20, 0x47, 0x5e, 0x38, 0x0c, 0x52, 0x4a, + 0x7a, 0x1e, 0x4a, 0xdd, 0x85, 0x79, 0xdd, 0x30, 0xb0, 0xe7, 0x45, 0xd3, 0xd2, 0x49, 0xb2, 0x10, + 0xc0, 0x47, 0xf8, 0x98, 0x7a, 0x1e, 0x3e, 0xfe, 0x5d, 0xe8, 0xbf, 0x36, 0xe1, 0xaf, 0x21, 0xde, + 0x19, 0x48, 0xe4, 0xbf, 0x30, 0xf6, 0x35, 0x02, 0x7f, 0x1e, 0x11, 0x49, 0xec, 0xd7, 0x20, 0x13, + 0x3c, 0x50, 0x98, 0xf4, 0x30, 0x25, 0x04, 0x15, 0x5a, 0xc1, 0xb3, 0x14, 0xd2, 0x09, 0xba, 0x06, + 0x57, 0xca, 0xdb, 0xa5, 0xda, 0x03, 0x45, 0x6b, 0x3c, 0xd9, 0x53, 0xb4, 0xfd, 0x5a, 0x7d, 0x4f, + 0x29, 0x57, 0xef, 0x57, 0x95, 0x4a, 0x76, 0x0e, 0x2d, 0xc1, 0x62, 0xb4, 0x71, 0x6f, 0xbf, 0x91, + 0x15, 0xd0, 0x65, 0x40, 0xd1, 0xca, 0x8a, 0xb2, 0xa3, 0x34, 0x94, 0xac, 0x88, 0x2e, 0xc1, 0x85, + 0x68, 0x7d, 0x79, 0x47, 0x29, 0xa9, 0xd9, 0x44, 0xa1, 0x0b, 0x99, 0x40, 0x09, 0xb4, 0x01, 0x12, + 0xa1, 0x32, 0xdf, 0x7d, 0x6e, 0xc4, 0xe8, 0x59, 0xac, 0xe8, 0xbe, 0xce, 0xb6, 0x46, 0x0a, 0xcd, + 0x7f, 0x15, 0xe4, 0xb0, 0xea, 0x79, 0x7e, 0x85, 0x15, 0x6a, 0xc4, 0xcc, 0xf0, 0x8d, 0xcc, 0xe0, + 0x43, 0x0c, 0x21, 0xee, 0x21, 0xc6, 0xe0, 0x53, 0x0e, 0x71, 0xe8, 0x29, 0x47, 0xe1, 0x47, 0x02, + 0xcc, 0x47, 0x2e, 0x09, 0xce, 0x76, 0x3f, 0x44, 0x5f, 0x84, 0x45, 0x17, 0x37, 0x75, 0x72, 0x20, + 0xd7, 0x38, 0x20, 0x41, 0x01, 0x0b, 0x41, 0xf5, 0x2e, 0xdb, 0x38, 0x0d, 0x80, 0x7e, 0xcf, 0xd1, + 0xc7, 0x23, 0xc2, 0xe8, 0xe3, 0x91, 0xeb, 0x20, 0x9b, 0xb8, 0x49, 0xce, 0xf9, 0xd8, 0x0d, 0x0c, + 0x0a, 0x2b, 0x06, 0x9e, 0x96, 0x24, 0x06, 0x9f, 0x96, 0xec, 0x43, 0xa6, 0xe2, 0x18, 0x4a, 0x17, + 0xdb, 0x3e, 0xba, 0x3d, 0xc0, 0xcc, 0x2b, 0x11, 0x0b, 0x03, 0x48, 0x84, 0x8c, 0xd7, 0x81, 0xed, + 0x53, 0xde, 0x31, 0x1f, 0x31, 0xd8, 0xb8, 0x48, 0xc5, 0xad, 0x4f, 0x45, 0x90, 0xc3, 0x73, 0x29, + 0x21, 0xd7, 0xe3, 0xd2, 0xce, 0x3e, 0xa7, 0x4b, 0x6d, 0x7f, 0x67, 0x27, 0x3b, 0x47, 0xc8, 0x15, + 0xa9, 0xdc, 0xda, 0xdd, 0xdd, 0x51, 0x4a, 0x35, 0x46, 0xba, 0x48, 0x7d, 0xb5, 0xd6, 0x50, 0x1e, + 0x28, 0x6a, 0x56, 0x1c, 0xea, 0x64, 0x67, 0xb7, 0xf6, 0x20, 0x9b, 0x20, 0x4c, 0x8c, 0x54, 0x56, + 0x76, 0xf7, 0xb7, 0x76, 0x94, 0xac, 0x34, 0x54, 0x5d, 0x6f, 0xa8, 0xd5, 0xda, 0x83, 0x6c, 0x12, + 0x5d, 0x84, 0x6c, 0x74, 0xc8, 0x27, 0x0d, 0xa5, 0x9e, 0x4d, 0x0d, 0x75, 0x5c, 0x29, 0x35, 0x94, + 0x6c, 0x1a, 0xe5, 0xe1, 0x72, 0xa4, 0x92, 0x9c, 0x92, 0xb4, 0xdd, 0xad, 0x87, 0x4a, 0xb9, 0x91, + 0xcd, 0xa0, 0xab, 0x70, 0x69, 0xb8, 0xad, 0xa4, 0xaa, 0xa5, 0x27, 0x59, 0x79, 0xa8, 0xaf, 0x86, + 0xf2, 0x9d, 0x46, 0x16, 0x86, 0xfa, 0xe2, 0x16, 0x69, 0xe5, 0x5a, 0x23, 0x3b, 0x8f, 0xae, 0xc0, + 0xd2, 0x90, 0x55, 0xb4, 0xe1, 0xdc, 0x70, 0x4f, 0xaa, 0xa2, 0x64, 0xcf, 0xdf, 0xfa, 0x01, 0x9c, + 0x8b, 0xba, 0x02, 0xbd, 0x0a, 0xaf, 0x54, 0x76, 0xcb, 0x9a, 0xf2, 0x58, 0xa9, 0x35, 0x82, 0x29, + 0x28, 0xef, 0x3f, 0x22, 0x25, 0xb6, 0x40, 0xc9, 0xd2, 0x9e, 0x00, 0x7a, 0xaf, 0xd4, 0x28, 0x6f, + 0x2b, 0x95, 0xac, 0x80, 0x5e, 0x83, 0x9b, 0xe3, 0x40, 0xfb, 0xb5, 0x00, 0x26, 0x6e, 0xdd, 0xfe, + 0xed, 0xb3, 0x65, 0xe1, 0x93, 0x67, 0xcb, 0xc2, 0x9f, 0x9f, 0x2d, 0x0b, 0x3f, 0xfb, 0xcb, 0xf2, + 0x1c, 0x5c, 0x30, 0x71, 0x37, 0x60, 0x8a, 0xde, 0xb6, 0x8a, 0xdd, 0x8d, 0x3d, 0xe1, 0x7d, 0xa9, + 0x78, 0xb7, 0xbb, 0x71, 0x90, 0xa2, 0xb1, 0xf1, 0x2b, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x63, + 0xfc, 0xff, 0x26, 0x54, 0x28, 0x00, 0x00, } func (m *Snapshot) Marshal() (dAtA []byte, err error) { @@ -4664,7 +4745,7 @@ func (m *Operation_TreeEdit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintResources(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x32 } if len(m.Contents) > 0 { for iNdEx := len(m.Contents) - 1; iNdEx >= 0; iNdEx-- { @@ -4677,6 +4758,32 @@ func (m *Operation_TreeEdit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintResources(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x2a + } + } + if len(m.CreatedAtMapByActor) > 0 { + for k := range m.CreatedAtMapByActor { + v := m.CreatedAtMapByActor[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintResources(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintResources(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x22 } } @@ -5838,17 +5945,29 @@ func (m *TreeNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintResources(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x42 } } if m.Depth != 0 { i = encodeVarintResources(dAtA, i, uint64(m.Depth)) i-- - dAtA[i] = 0x30 + dAtA[i] = 0x38 + } + if m.InsNextId != nil { + { + size, err := m.InsNextId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - if m.InsPrevPos != nil { + if m.InsPrevId != nil { { - size, err := m.InsPrevPos.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.InsPrevId.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5884,9 +6003,9 @@ func (m *TreeNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Pos != nil { + if m.Id != nil { { - size, err := m.Pos.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Id.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5940,7 +6059,7 @@ func (m *TreeNodes) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TreePos) Marshal() (dAtA []byte, err error) { +func (m *TreeNodeID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5950,12 +6069,12 @@ func (m *TreePos) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TreePos) MarshalTo(dAtA []byte) (int, error) { +func (m *TreeNodeID) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TreePos) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TreeNodeID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5984,6 +6103,57 @@ func (m *TreePos) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TreePos) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TreePos) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TreePos) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.LeftSiblingId != nil { + { + size, err := m.LeftSiblingId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ParentId != nil { + { + size, err := m.ParentId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *User) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7127,6 +7297,19 @@ func (m *Operation_TreeEdit) Size() (n int) { l = m.To.Size() n += 1 + l + sovResources(uint64(l)) } + if len(m.CreatedAtMapByActor) > 0 { + for k, v := range m.CreatedAtMapByActor { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovResources(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovResources(uint64(len(k))) + l + n += mapEntrySize + 1 + sovResources(uint64(mapEntrySize)) + } + } if len(m.Contents) > 0 { for _, e := range m.Contents { l = e.Size() @@ -7605,8 +7788,8 @@ func (m *TreeNode) Size() (n int) { } var l int _ = l - if m.Pos != nil { - l = m.Pos.Size() + if m.Id != nil { + l = m.Id.Size() n += 1 + l + sovResources(uint64(l)) } l = len(m.Type) @@ -7621,8 +7804,12 @@ func (m *TreeNode) Size() (n int) { l = m.RemovedAt.Size() n += 1 + l + sovResources(uint64(l)) } - if m.InsPrevPos != nil { - l = m.InsPrevPos.Size() + if m.InsPrevId != nil { + l = m.InsPrevId.Size() + n += 1 + l + sovResources(uint64(l)) + } + if m.InsNextId != nil { + l = m.InsNextId.Size() n += 1 + l + sovResources(uint64(l)) } if m.Depth != 0 { @@ -7665,7 +7852,7 @@ func (m *TreeNodes) Size() (n int) { return n } -func (m *TreePos) Size() (n int) { +func (m *TreeNodeID) Size() (n int) { if m == nil { return 0 } @@ -7684,6 +7871,26 @@ func (m *TreePos) Size() (n int) { return n } +func (m *TreePos) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentId != nil { + l = m.ParentId.Size() + n += 1 + l + sovResources(uint64(l)) + } + if m.LeftSiblingId != nil { + l = m.LeftSiblingId.Size() + n += 1 + l + sovResources(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *User) Size() (n int) { if m == nil { return 0 @@ -11195,7 +11402,7 @@ func (m *Operation_TreeEdit) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Contents", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAtMapByActor", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11222,22 +11429,151 @@ func (m *Operation_TreeEdit) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Contents = append(m.Contents, &TreeNodes{}) - if err := m.Contents[len(m.Contents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecutedAt", wireType) + if m.CreatedAtMapByActor == nil { + m.CreatedAtMapByActor = make(map[string]*TimeTicket) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResources - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *TimeTicket + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthResources + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthResources + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthResources + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthResources + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &TimeTicket{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipResources(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResources + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.CreatedAtMapByActor[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Contents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Contents = append(m.Contents, &TreeNodes{}) + if err := m.Contents[len(m.Contents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ @@ -14094,7 +14430,7 @@ func (m *TreeNode) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14121,10 +14457,10 @@ func (m *TreeNode) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Pos == nil { - m.Pos = &TreePos{} + if m.Id == nil { + m.Id = &TreeNodeID{} } - if err := m.Pos.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14230,7 +14566,7 @@ func (m *TreeNode) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InsPrevPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InsPrevId", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14257,14 +14593,50 @@ func (m *TreeNode) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.InsPrevPos == nil { - m.InsPrevPos = &TreePos{} + if m.InsPrevId == nil { + m.InsPrevId = &TreeNodeID{} } - if err := m.InsPrevPos.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.InsPrevId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InsNextId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InsNextId == nil { + m.InsNextId = &TreeNodeID{} + } + if err := m.InsNextId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Depth", wireType) } @@ -14283,7 +14655,7 @@ func (m *TreeNode) Unmarshal(dAtA []byte) error { break } } - case 7: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } @@ -14519,7 +14891,7 @@ func (m *TreeNodes) Unmarshal(dAtA []byte) error { } return nil } -func (m *TreePos) Unmarshal(dAtA []byte) error { +func (m *TreeNodeID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14542,10 +14914,10 @@ func (m *TreePos) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TreePos: wiretype end group for non-group") + return fmt.Errorf("proto: TreeNodeID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TreePos: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TreeNodeID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14625,6 +14997,129 @@ func (m *TreePos) Unmarshal(dAtA []byte) error { } return nil } +func (m *TreePos) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TreePos: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TreePos: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentId == nil { + m.ParentId = &TreeNodeID{} + } + if err := m.ParentId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeftSiblingId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeftSiblingId == nil { + m.LeftSiblingId = &TreeNodeID{} + } + if err := m.LeftSiblingId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResources(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResources + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *User) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -16329,7 +16824,7 @@ func (m *DocEvent) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Publisher", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowResources @@ -16339,25 +16834,23 @@ func (m *DocEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthResources } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthResources } if postIndex > l { return io.ErrUnexpectedEOF } - m.Publisher = append(m.Publisher[:0], dAtA[iNdEx:postIndex]...) - if m.Publisher == nil { - m.Publisher = []byte{} - } + m.Publisher = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/api/yorkie/v1/resources.proto b/api/yorkie/v1/resources.proto index 46d879aad..281b5906d 100644 --- a/api/yorkie/v1/resources.proto +++ b/api/yorkie/v1/resources.proto @@ -95,6 +95,10 @@ message Operation { TimeTicket executed_at = 6; map attributes = 7; } + // NOTE(hackerwins): Select Operation is not used in the current version. + // In the previous version, it was used to represent selection of Text. + // However, it has been replaced by Presence now. It is retained for backward + // compatibility purposes. message Select { TimeTicket parent_created_at = 1; TextNodePos from = 2; @@ -117,8 +121,9 @@ message Operation { TimeTicket parent_created_at = 1; TreePos from = 2; TreePos to = 3; - repeated TreeNodes contents = 4; - TimeTicket executed_at = 5; + map created_at_map_by_actor = 4; + repeated TreeNodes contents = 5; + TimeTicket executed_at = 6; } message TreeStyle { TimeTicket parent_created_at = 1; @@ -233,24 +238,30 @@ message TextNodeID { } message TreeNode { - TreePos pos = 1; + TreeNodeID id = 1; string type = 2; string value = 3; TimeTicket removed_at = 4; - TreePos ins_prev_pos = 5; - int32 depth = 6; - map attributes = 7; + TreeNodeID ins_prev_id = 5; + TreeNodeID ins_next_id = 6; + int32 depth = 7; + map attributes = 8; } message TreeNodes { repeated TreeNode content = 1; } -message TreePos { +message TreeNodeID { TimeTicket created_at = 1; int32 offset = 2; } +message TreePos { + TreeNodeID parent_id = 1; + TreeNodeID left_sibling_id = 2; +} + ///////////////////////////////////////// // Messages for Common // ///////////////////////////////////////// @@ -343,12 +354,12 @@ enum ValueType { } enum DocEventType { - DOC_EVENT_TYPE_DOCUMENTS_CHANGED = 0; - DOC_EVENT_TYPE_DOCUMENTS_WATCHED = 1; - DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED = 2; + DOC_EVENT_TYPE_DOCUMENT_CHANGED = 0; + DOC_EVENT_TYPE_DOCUMENT_WATCHED = 1; + DOC_EVENT_TYPE_DOCUMENT_UNWATCHED = 2; } message DocEvent { DocEventType type = 1; - bytes publisher = 2; + string publisher = 2; } diff --git a/api/yorkie/v1/yorkie.pb.go b/api/yorkie/v1/yorkie.pb.go index 0bd73bedd..8af66bd68 100644 --- a/api/yorkie/v1/yorkie.pb.go +++ b/api/yorkie/v1/yorkie.pb.go @@ -74,8 +74,7 @@ func (m *ActivateClientRequest) GetClientKey() string { } type ActivateClientResponse struct { - ClientKey string `protobuf:"bytes,1,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"` - ClientId []byte `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -114,22 +113,15 @@ func (m *ActivateClientResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ActivateClientResponse proto.InternalMessageInfo -func (m *ActivateClientResponse) GetClientKey() string { - if m != nil { - return m.ClientKey - } - return "" -} - -func (m *ActivateClientResponse) GetClientId() []byte { +func (m *ActivateClientResponse) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } type DeactivateClientRequest struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -168,15 +160,14 @@ func (m *DeactivateClientRequest) XXX_DiscardUnknown() { var xxx_messageInfo_DeactivateClientRequest proto.InternalMessageInfo -func (m *DeactivateClientRequest) GetClientId() []byte { +func (m *DeactivateClientRequest) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } type DeactivateClientResponse struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -215,15 +206,8 @@ func (m *DeactivateClientResponse) XXX_DiscardUnknown() { var xxx_messageInfo_DeactivateClientResponse proto.InternalMessageInfo -func (m *DeactivateClientResponse) GetClientId() []byte { - if m != nil { - return m.ClientId - } - return nil -} - type AttachDocumentRequest struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` ChangePack *ChangePack `protobuf:"bytes,2,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -263,11 +247,11 @@ func (m *AttachDocumentRequest) XXX_DiscardUnknown() { var xxx_messageInfo_AttachDocumentRequest proto.InternalMessageInfo -func (m *AttachDocumentRequest) GetClientId() []byte { +func (m *AttachDocumentRequest) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } func (m *AttachDocumentRequest) GetChangePack() *ChangePack { @@ -278,9 +262,8 @@ func (m *AttachDocumentRequest) GetChangePack() *ChangePack { } type AttachDocumentResponse struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - DocumentId string `protobuf:"bytes,2,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` - ChangePack *ChangePack `protobuf:"bytes,3,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` + DocumentId string `protobuf:"bytes,1,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` + ChangePack *ChangePack `protobuf:"bytes,2,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -319,13 +302,6 @@ func (m *AttachDocumentResponse) XXX_DiscardUnknown() { var xxx_messageInfo_AttachDocumentResponse proto.InternalMessageInfo -func (m *AttachDocumentResponse) GetClientId() []byte { - if m != nil { - return m.ClientId - } - return nil -} - func (m *AttachDocumentResponse) GetDocumentId() string { if m != nil { return m.DocumentId @@ -341,7 +317,7 @@ func (m *AttachDocumentResponse) GetChangePack() *ChangePack { } type DetachDocumentRequest struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` DocumentId string `protobuf:"bytes,2,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` ChangePack *ChangePack `protobuf:"bytes,3,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` RemoveIfNotAttached bool `protobuf:"varint,4,opt,name=remove_if_not_attached,json=removeIfNotAttached,proto3" json:"remove_if_not_attached,omitempty"` @@ -383,11 +359,11 @@ func (m *DetachDocumentRequest) XXX_DiscardUnknown() { var xxx_messageInfo_DetachDocumentRequest proto.InternalMessageInfo -func (m *DetachDocumentRequest) GetClientId() []byte { +func (m *DetachDocumentRequest) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } func (m *DetachDocumentRequest) GetDocumentId() string { @@ -412,7 +388,6 @@ func (m *DetachDocumentRequest) GetRemoveIfNotAttached() bool { } type DetachDocumentResponse struct { - ClientKey string `protobuf:"bytes,1,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"` ChangePack *ChangePack `protobuf:"bytes,2,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -452,13 +427,6 @@ func (m *DetachDocumentResponse) XXX_DiscardUnknown() { var xxx_messageInfo_DetachDocumentResponse proto.InternalMessageInfo -func (m *DetachDocumentResponse) GetClientKey() string { - if m != nil { - return m.ClientKey - } - return "" -} - func (m *DetachDocumentResponse) GetChangePack() *ChangePack { if m != nil { return m.ChangePack @@ -467,7 +435,7 @@ func (m *DetachDocumentResponse) GetChangePack() *ChangePack { } type WatchDocumentRequest struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` DocumentId string `protobuf:"bytes,2,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -507,11 +475,11 @@ func (m *WatchDocumentRequest) XXX_DiscardUnknown() { var xxx_messageInfo_WatchDocumentRequest proto.InternalMessageInfo -func (m *WatchDocumentRequest) GetClientId() []byte { +func (m *WatchDocumentRequest) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } func (m *WatchDocumentRequest) GetDocumentId() string { @@ -610,7 +578,7 @@ func (*WatchDocumentResponse) XXX_OneofWrappers() []interface{} { } type WatchDocumentResponse_Initialization struct { - ClientIds [][]byte `protobuf:"bytes,1,rep,name=client_ids,json=clientIds,proto3" json:"client_ids,omitempty"` + ClientIds []string `protobuf:"bytes,1,rep,name=client_ids,json=clientIds,proto3" json:"client_ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -649,7 +617,7 @@ func (m *WatchDocumentResponse_Initialization) XXX_DiscardUnknown() { var xxx_messageInfo_WatchDocumentResponse_Initialization proto.InternalMessageInfo -func (m *WatchDocumentResponse_Initialization) GetClientIds() [][]byte { +func (m *WatchDocumentResponse_Initialization) GetClientIds() []string { if m != nil { return m.ClientIds } @@ -657,7 +625,7 @@ func (m *WatchDocumentResponse_Initialization) GetClientIds() [][]byte { } type RemoveDocumentRequest struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` DocumentId string `protobuf:"bytes,2,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` ChangePack *ChangePack `protobuf:"bytes,3,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -698,11 +666,11 @@ func (m *RemoveDocumentRequest) XXX_DiscardUnknown() { var xxx_messageInfo_RemoveDocumentRequest proto.InternalMessageInfo -func (m *RemoveDocumentRequest) GetClientId() []byte { +func (m *RemoveDocumentRequest) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } func (m *RemoveDocumentRequest) GetDocumentId() string { @@ -720,8 +688,7 @@ func (m *RemoveDocumentRequest) GetChangePack() *ChangePack { } type RemoveDocumentResponse struct { - ClientKey string `protobuf:"bytes,1,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"` - ChangePack *ChangePack `protobuf:"bytes,2,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` + ChangePack *ChangePack `protobuf:"bytes,1,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -760,13 +727,6 @@ func (m *RemoveDocumentResponse) XXX_DiscardUnknown() { var xxx_messageInfo_RemoveDocumentResponse proto.InternalMessageInfo -func (m *RemoveDocumentResponse) GetClientKey() string { - if m != nil { - return m.ClientKey - } - return "" -} - func (m *RemoveDocumentResponse) GetChangePack() *ChangePack { if m != nil { return m.ChangePack @@ -775,7 +735,7 @@ func (m *RemoveDocumentResponse) GetChangePack() *ChangePack { } type PushPullChangesRequest struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` DocumentId string `protobuf:"bytes,2,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` ChangePack *ChangePack `protobuf:"bytes,3,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` PushOnly bool `protobuf:"varint,4,opt,name=push_only,json=pushOnly,proto3" json:"push_only,omitempty"` @@ -817,11 +777,11 @@ func (m *PushPullChangesRequest) XXX_DiscardUnknown() { var xxx_messageInfo_PushPullChangesRequest proto.InternalMessageInfo -func (m *PushPullChangesRequest) GetClientId() []byte { +func (m *PushPullChangesRequest) GetClientId() string { if m != nil { return m.ClientId } - return nil + return "" } func (m *PushPullChangesRequest) GetDocumentId() string { @@ -846,8 +806,7 @@ func (m *PushPullChangesRequest) GetPushOnly() bool { } type PushPullChangesResponse struct { - ClientId []byte `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - ChangePack *ChangePack `protobuf:"bytes,2,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` + ChangePack *ChangePack `protobuf:"bytes,1,opt,name=change_pack,json=changePack,proto3" json:"change_pack,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -886,13 +845,6 @@ func (m *PushPullChangesResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PushPullChangesResponse proto.InternalMessageInfo -func (m *PushPullChangesResponse) GetClientId() []byte { - if m != nil { - return m.ClientId - } - return nil -} - func (m *PushPullChangesResponse) GetChangePack() *ChangePack { if m != nil { return m.ChangePack @@ -921,49 +873,48 @@ func init() { func init() { proto.RegisterFile("yorkie/v1/yorkie.proto", fileDescriptor_40070c858814ab24) } var fileDescriptor_40070c858814ab24 = []byte{ - // 661 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xce, 0xd2, 0x1f, 0x35, 0x93, 0xb6, 0xc0, 0x96, 0xa4, 0x21, 0x15, 0xa9, 0x59, 0x2e, 0x91, - 0x2a, 0x39, 0xa4, 0x95, 0xca, 0x81, 0x53, 0xdb, 0x20, 0x35, 0x42, 0x82, 0xc8, 0x54, 0x54, 0xad, - 0x84, 0x2c, 0xd7, 0xde, 0x92, 0x55, 0x5c, 0x6f, 0xb0, 0xd7, 0x96, 0xcc, 0x03, 0x70, 0x83, 0x33, - 0xef, 0xc0, 0x5b, 0x70, 0xe2, 0xc8, 0x91, 0x23, 0x2a, 0x2f, 0x82, 0x62, 0x3b, 0xc1, 0xeb, 0x9a, - 0x34, 0x54, 0x54, 0x70, 0xb3, 0x67, 0xe7, 0xfb, 0xfc, 0xcd, 0xec, 0xfc, 0x18, 0x2a, 0x21, 0x77, - 0xfb, 0x8c, 0x36, 0x83, 0x56, 0x33, 0x7e, 0x52, 0x07, 0x2e, 0x17, 0x1c, 0x17, 0x93, 0xb7, 0xa0, - 0x55, 0xbb, 0xfb, 0xcb, 0xc5, 0xa5, 0x1e, 0xf7, 0x5d, 0x93, 0x7a, 0xb1, 0x17, 0xd9, 0x86, 0xf2, - 0x8e, 0x29, 0x58, 0x60, 0x08, 0xba, 0x67, 0x33, 0xea, 0x08, 0x8d, 0xbe, 0xf1, 0xa9, 0x27, 0xf0, - 0x3d, 0x00, 0x33, 0x32, 0xe8, 0x7d, 0x1a, 0x56, 0x91, 0x82, 0x1a, 0x45, 0xad, 0x18, 0x5b, 0x9e, - 0xd2, 0x90, 0x1c, 0x40, 0x25, 0x8b, 0xf3, 0x06, 0xdc, 0xf1, 0xe8, 0x25, 0x40, 0xbc, 0x06, 0xc9, - 0x8b, 0xce, 0xac, 0xea, 0x0d, 0x05, 0x35, 0x16, 0xb5, 0x85, 0xd8, 0xd0, 0xb1, 0xc8, 0x36, 0xac, - 0xb6, 0xa9, 0x91, 0xab, 0x47, 0xc2, 0xa1, 0x0c, 0xee, 0x11, 0x54, 0x2f, 0xe2, 0x12, 0x3d, 0x13, - 0x81, 0x36, 0x94, 0x77, 0x84, 0x30, 0xcc, 0x5e, 0x9b, 0x9b, 0xfe, 0xd9, 0x94, 0x9f, 0xc3, 0xdb, - 0x50, 0x32, 0x7b, 0x86, 0xf3, 0x9a, 0xea, 0x03, 0xc3, 0xec, 0x47, 0x51, 0x94, 0x36, 0xcb, 0xea, - 0x38, 0xe1, 0xea, 0x5e, 0x74, 0xda, 0x35, 0xcc, 0xbe, 0x06, 0xe6, 0xf8, 0x99, 0x7c, 0x40, 0x50, - 0xc9, 0x7e, 0x6e, 0x0a, 0x95, 0x78, 0x1d, 0x4a, 0x56, 0x02, 0x18, 0x65, 0xad, 0xa8, 0xc1, 0xc8, - 0x74, 0x51, 0xd0, 0xcc, 0xb4, 0x82, 0x3e, 0x23, 0x28, 0xb7, 0xe9, 0x1f, 0xc7, 0x7f, 0x5d, 0x7a, - 0xf0, 0x16, 0x54, 0x5c, 0x7a, 0xc6, 0x03, 0xaa, 0xb3, 0x53, 0xdd, 0xe1, 0x42, 0x37, 0xa2, 0x6c, - 0x51, 0xab, 0x3a, 0xab, 0xa0, 0xc6, 0x82, 0xb6, 0x12, 0x9f, 0x76, 0x4e, 0x9f, 0x71, 0xb1, 0x93, - 0x1c, 0x11, 0x0e, 0x95, 0x6c, 0x0c, 0xd3, 0x95, 0xe2, 0x55, 0xaf, 0xf1, 0x00, 0xee, 0x1c, 0x1a, - 0xe2, 0x2f, 0xe7, 0x8c, 0x7c, 0x43, 0x50, 0xce, 0xd0, 0x26, 0x61, 0x1c, 0xc1, 0x32, 0x73, 0x98, - 0x60, 0x86, 0xcd, 0xde, 0x1a, 0x82, 0x71, 0x27, 0x22, 0x2f, 0x6d, 0x36, 0x53, 0x52, 0x73, 0x91, - 0x6a, 0x47, 0x82, 0xed, 0x17, 0xb4, 0x0c, 0x11, 0xde, 0x80, 0x39, 0x1a, 0x50, 0x47, 0x24, 0xc1, - 0xaf, 0xa4, 0x18, 0xdb, 0xdc, 0x7c, 0x32, 0x3c, 0xda, 0x2f, 0x68, 0xb1, 0x4f, 0xad, 0x09, 0xcb, - 0x32, 0x61, 0x2a, 0xc1, 0xcc, 0xf2, 0xaa, 0x48, 0x99, 0x69, 0x2c, 0x8e, 0x12, 0xdc, 0xb1, 0xbc, - 0xdd, 0x79, 0x98, 0x3d, 0xe1, 0x56, 0x48, 0xde, 0x23, 0x28, 0x6b, 0xd1, 0xcd, 0xfd, 0x17, 0x65, - 0x36, 0xac, 0x98, 0xac, 0x9c, 0xeb, 0xad, 0x98, 0x4f, 0x08, 0x2a, 0x5d, 0xdf, 0xeb, 0x75, 0x7d, - 0xdb, 0x8e, 0x5d, 0xbc, 0x7f, 0xdb, 0x68, 0x6b, 0x50, 0x1c, 0xf8, 0x5e, 0x4f, 0xe7, 0x8e, 0x1d, - 0x26, 0xbd, 0xb5, 0x30, 0x34, 0x3c, 0x77, 0xec, 0x90, 0x38, 0xb0, 0x7a, 0x41, 0xec, 0x34, 0x63, - 0xea, 0x8a, 0xd9, 0xd9, 0x7c, 0x37, 0x07, 0x4b, 0x47, 0x91, 0xd3, 0x0b, 0xea, 0x06, 0xcc, 0xa4, - 0xf8, 0x10, 0x96, 0xe5, 0xed, 0x82, 0x95, 0x14, 0x4d, 0xee, 0xc2, 0xaa, 0xdd, 0x9f, 0xe0, 0x11, - 0xab, 0x27, 0x05, 0xfc, 0x0a, 0x6e, 0x65, 0x17, 0x05, 0x26, 0xe9, 0xa2, 0xcf, 0xdf, 0x3e, 0xb5, - 0x07, 0x13, 0x7d, 0xc6, 0xf4, 0x43, 0xdd, 0xd2, 0x7c, 0x97, 0x75, 0xe7, 0x6d, 0x1a, 0x59, 0x77, - 0xee, 0x72, 0x88, 0x89, 0xe5, 0x19, 0x27, 0x11, 0xe7, 0x8e, 0x70, 0x89, 0x38, 0x7f, 0x40, 0xc6, - 0xc4, 0x72, 0x2b, 0x48, 0xc4, 0xb9, 0x4d, 0x2b, 0x11, 0xe7, 0xf7, 0x11, 0x29, 0xe0, 0x63, 0xb8, - 0x99, 0x29, 0x22, 0x9c, 0xc6, 0xe5, 0x77, 0x43, 0x8d, 0x4c, 0x72, 0x19, 0x73, 0xbf, 0x84, 0x25, - 0x69, 0xde, 0xe1, 0xf5, 0xdf, 0x4f, 0xc2, 0x98, 0x57, 0xb9, 0x6c, 0x54, 0x92, 0xc2, 0x43, 0xb4, - 0xbb, 0xf1, 0xe5, 0xbc, 0x8e, 0xbe, 0x9e, 0xd7, 0xd1, 0xf7, 0xf3, 0x3a, 0xfa, 0xf8, 0xa3, 0x5e, - 0x80, 0xdb, 0x16, 0x0d, 0x46, 0x50, 0x63, 0xc0, 0xd4, 0xa0, 0xd5, 0x45, 0xc7, 0xb3, 0xea, 0xe3, - 0xa0, 0x75, 0x32, 0x1f, 0xfd, 0x40, 0x6d, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x78, 0xaa, 0xfe, - 0x50, 0x80, 0x09, 0x00, 0x00, + // 650 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xce, 0xf6, 0x9f, 0x9a, 0x89, 0xda, 0xdf, 0x8f, 0x2d, 0x49, 0x43, 0x2a, 0x52, 0xb3, 0x5c, + 0x2a, 0x55, 0x72, 0x48, 0x2b, 0x7a, 0xe1, 0xd4, 0x36, 0x48, 0x8d, 0x90, 0x20, 0x18, 0x44, 0xd5, + 0x4a, 0xc8, 0x72, 0xed, 0x2d, 0x59, 0xc5, 0xf5, 0xa6, 0xf6, 0xda, 0x92, 0x79, 0x00, 0x6e, 0xdc, + 0x79, 0x07, 0xde, 0x82, 0x13, 0x47, 0x8e, 0x1c, 0x51, 0x79, 0x11, 0x14, 0xdb, 0x4d, 0xbd, 0xee, + 0x92, 0x16, 0x8a, 0x04, 0xb7, 0xcd, 0xec, 0x37, 0xdf, 0x7c, 0x3b, 0x99, 0xf9, 0x64, 0xa8, 0xc5, + 0xdc, 0x1f, 0x30, 0xda, 0x8a, 0xda, 0xad, 0xf4, 0xa4, 0x0f, 0x7d, 0x2e, 0x38, 0x2e, 0x67, 0xbf, + 0xa2, 0x76, 0xe3, 0xce, 0x05, 0xc4, 0xa7, 0x01, 0x0f, 0x7d, 0x9b, 0x06, 0x29, 0x8a, 0x6c, 0x41, + 0x75, 0xdb, 0x16, 0x2c, 0xb2, 0x04, 0xdd, 0x75, 0x19, 0xf5, 0x84, 0x41, 0x4f, 0x43, 0x1a, 0x08, + 0x7c, 0x17, 0xc0, 0x4e, 0x02, 0xe6, 0x80, 0xc6, 0x75, 0xa4, 0xa1, 0xb5, 0xb2, 0x51, 0x4e, 0x23, + 0x4f, 0x68, 0x4c, 0x1e, 0x42, 0xad, 0x98, 0x17, 0x0c, 0xb9, 0x17, 0x50, 0xbc, 0x02, 0x19, 0xcc, + 0x64, 0x4e, 0x96, 0x37, 0x9f, 0x06, 0xba, 0x0e, 0xd9, 0x82, 0xe5, 0x0e, 0xb5, 0x94, 0x05, 0x27, + 0xe6, 0x35, 0xa0, 0x7e, 0x39, 0x2f, 0x2d, 0x48, 0x5c, 0xa8, 0x6e, 0x0b, 0x61, 0xd9, 0xfd, 0x0e, + 0xb7, 0xc3, 0x93, 0x6b, 0x32, 0xe2, 0x2d, 0xa8, 0xd8, 0x7d, 0xcb, 0x7b, 0x43, 0xcd, 0xa1, 0x65, + 0x0f, 0xea, 0x53, 0x1a, 0x5a, 0xab, 0x6c, 0x54, 0xf5, 0x71, 0xd3, 0xf4, 0xdd, 0xe4, 0xb6, 0x67, + 0xd9, 0x03, 0x03, 0xec, 0xf1, 0x99, 0x9c, 0x42, 0xad, 0x58, 0x2d, 0x7b, 0xf8, 0x2a, 0x54, 0x9c, + 0x2c, 0x76, 0x51, 0x10, 0xce, 0x43, 0x37, 0x28, 0xf9, 0x09, 0x41, 0xb5, 0x43, 0x7f, 0xf9, 0x85, + 0x05, 0x3d, 0x53, 0x57, 0xe9, 0x99, 0xbe, 0xa6, 0x1e, 0xbc, 0x09, 0x35, 0x9f, 0x9e, 0xf0, 0x88, + 0x9a, 0xec, 0xd8, 0xf4, 0xb8, 0x30, 0xad, 0xa4, 0x21, 0xd4, 0xa9, 0xcf, 0x68, 0x68, 0x6d, 0xde, + 0x58, 0x4a, 0x6f, 0xbb, 0xc7, 0x4f, 0xb9, 0xd8, 0xce, 0xae, 0x48, 0x0f, 0x6a, 0xc5, 0x37, 0x64, + 0x7d, 0xfb, 0xdd, 0xb6, 0xbc, 0x84, 0xdb, 0xfb, 0x96, 0xf8, 0xc3, 0x4d, 0x21, 0x5f, 0x11, 0x54, + 0x0b, 0xb4, 0x99, 0xce, 0x03, 0x58, 0x64, 0x1e, 0x13, 0xcc, 0x72, 0xd9, 0x5b, 0x4b, 0x30, 0xee, + 0x25, 0xe4, 0x95, 0x8d, 0x56, 0x4e, 0xaa, 0x32, 0x53, 0xef, 0x4a, 0x69, 0x7b, 0x25, 0xa3, 0x40, + 0x84, 0xd7, 0x61, 0x96, 0x46, 0xd4, 0x13, 0xd9, 0xe3, 0x97, 0x72, 0x8c, 0x1d, 0x6e, 0x3f, 0x1e, + 0x5d, 0xed, 0x95, 0x8c, 0x14, 0xd3, 0x68, 0xc1, 0xa2, 0x4c, 0x98, 0xdb, 0x55, 0xe6, 0x04, 0x75, + 0xa4, 0x4d, 0x5f, 0xec, 0x6a, 0xd7, 0x09, 0x76, 0xe6, 0x60, 0xe6, 0x88, 0x3b, 0x31, 0x79, 0x8f, + 0xa0, 0x6a, 0x24, 0x7f, 0xcd, 0x3f, 0x31, 0x47, 0xa3, 0x91, 0x28, 0xca, 0x51, 0x8f, 0x04, 0xba, + 0x2e, 0xe3, 0x47, 0x04, 0xb5, 0x5e, 0x18, 0xf4, 0x7b, 0xa1, 0xeb, 0xa6, 0x90, 0xe0, 0xef, 0xae, + 0xca, 0x0a, 0x94, 0x87, 0x61, 0xd0, 0x37, 0xb9, 0xe7, 0xc6, 0xd9, 0x76, 0xcc, 0x8f, 0x02, 0xcf, + 0x3c, 0x37, 0x26, 0xcf, 0x61, 0xf9, 0x92, 0xd8, 0x9b, 0x35, 0x60, 0xe3, 0xdd, 0x2c, 0x2c, 0x1c, + 0x24, 0xa0, 0x17, 0xd4, 0x8f, 0x98, 0x4d, 0xf1, 0x3e, 0x2c, 0xca, 0x46, 0x8d, 0xb5, 0x1c, 0x8d, + 0xd2, 0xfb, 0x1b, 0xf7, 0x26, 0x20, 0x32, 0xd3, 0x2d, 0xe1, 0xd7, 0xf0, 0x7f, 0xd1, 0x92, 0x31, + 0xc9, 0x0f, 0xae, 0xda, 0xe7, 0x1b, 0xf7, 0x27, 0x62, 0xc6, 0xf4, 0x23, 0xdd, 0x92, 0xcf, 0xca, + 0xba, 0x55, 0x86, 0x2f, 0xeb, 0x56, 0x9a, 0x74, 0x4a, 0x2c, 0x1b, 0x91, 0x44, 0xac, 0xf4, 0x59, + 0x89, 0x58, 0xed, 0x62, 0x29, 0xb1, 0x3c, 0xce, 0x12, 0xb1, 0x72, 0xf1, 0x24, 0x62, 0xf5, 0x2e, + 0x90, 0x12, 0x3e, 0x84, 0xff, 0x0a, 0x73, 0x82, 0xf3, 0x79, 0xea, 0x81, 0x6f, 0x90, 0x49, 0x90, + 0x31, 0xf7, 0x2b, 0x58, 0x90, 0x3c, 0x0b, 0xaf, 0xfe, 0xdc, 0xcd, 0x52, 0x5e, 0xed, 0x2a, 0xbb, + 0x23, 0xa5, 0x07, 0x68, 0x67, 0xfd, 0xf3, 0x59, 0x13, 0x7d, 0x39, 0x6b, 0xa2, 0x6f, 0x67, 0x4d, + 0xf4, 0xe1, 0x7b, 0xb3, 0x04, 0xb7, 0x1c, 0x1a, 0x9d, 0xa7, 0x5a, 0x43, 0xa6, 0x47, 0xed, 0x1e, + 0x3a, 0x9c, 0xd1, 0x1f, 0x45, 0xed, 0xa3, 0xb9, 0xe4, 0x5b, 0x64, 0xf3, 0x47, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x8c, 0xb9, 0x33, 0xc1, 0xcb, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1353,13 +1304,6 @@ func (m *ActivateClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) copy(dAtA[i:], m.ClientId) i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientId))) i-- - dAtA[i] = 0x12 - } - if len(m.ClientKey) > 0 { - i -= len(m.ClientKey) - copy(dAtA[i:], m.ClientKey) - i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientKey))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1423,13 +1367,6 @@ func (m *DeactivateClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.ClientId) > 0 { - i -= len(m.ClientId) - copy(dAtA[i:], m.ClientId) - i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientId))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } @@ -1513,20 +1450,13 @@ func (m *AttachDocumentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintYorkie(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } if len(m.DocumentId) > 0 { i -= len(m.DocumentId) copy(dAtA[i:], m.DocumentId) i = encodeVarintYorkie(dAtA, i, uint64(len(m.DocumentId))) i-- - dAtA[i] = 0x12 - } - if len(m.ClientId) > 0 { - i -= len(m.ClientId) - copy(dAtA[i:], m.ClientId) - i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientId))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1631,13 +1561,6 @@ func (m *DetachDocumentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x12 } - if len(m.ClientKey) > 0 { - i -= len(m.ClientKey) - copy(dAtA[i:], m.ClientKey) - i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientKey))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } @@ -1883,13 +1806,6 @@ func (m *RemoveDocumentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintYorkie(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - if len(m.ClientKey) > 0 { - i -= len(m.ClientKey) - copy(dAtA[i:], m.ClientKey) - i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientKey))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1992,13 +1908,6 @@ func (m *PushPullChangesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintYorkie(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - if len(m.ClientId) > 0 { - i -= len(m.ClientId) - copy(dAtA[i:], m.ClientId) - i = encodeVarintYorkie(dAtA, i, uint64(len(m.ClientId))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -2037,10 +1946,6 @@ func (m *ActivateClientResponse) Size() (n int) { } var l int _ = l - l = len(m.ClientKey) - if l > 0 { - n += 1 + l + sovYorkie(uint64(l)) - } l = len(m.ClientId) if l > 0 { n += 1 + l + sovYorkie(uint64(l)) @@ -2073,10 +1978,6 @@ func (m *DeactivateClientResponse) Size() (n int) { } var l int _ = l - l = len(m.ClientId) - if l > 0 { - n += 1 + l + sovYorkie(uint64(l)) - } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2109,10 +2010,6 @@ func (m *AttachDocumentResponse) Size() (n int) { } var l int _ = l - l = len(m.ClientId) - if l > 0 { - n += 1 + l + sovYorkie(uint64(l)) - } l = len(m.DocumentId) if l > 0 { n += 1 + l + sovYorkie(uint64(l)) @@ -2160,10 +2057,6 @@ func (m *DetachDocumentResponse) Size() (n int) { } var l int _ = l - l = len(m.ClientKey) - if l > 0 { - n += 1 + l + sovYorkie(uint64(l)) - } if m.ChangePack != nil { l = m.ChangePack.Size() n += 1 + l + sovYorkie(uint64(l)) @@ -2240,8 +2133,8 @@ func (m *WatchDocumentResponse_Initialization) Size() (n int) { var l int _ = l if len(m.ClientIds) > 0 { - for _, b := range m.ClientIds { - l = len(b) + for _, s := range m.ClientIds { + l = len(s) n += 1 + l + sovYorkie(uint64(l)) } } @@ -2281,10 +2174,6 @@ func (m *RemoveDocumentResponse) Size() (n int) { } var l int _ = l - l = len(m.ClientKey) - if l > 0 { - n += 1 + l + sovYorkie(uint64(l)) - } if m.ChangePack != nil { l = m.ChangePack.Size() n += 1 + l + sovYorkie(uint64(l)) @@ -2328,10 +2217,6 @@ func (m *PushPullChangesResponse) Size() (n int) { } var l int _ = l - l = len(m.ClientId) - if l > 0 { - n += 1 + l + sovYorkie(uint64(l)) - } if m.ChangePack != nil { l = m.ChangePack.Size() n += 1 + l + sovYorkie(uint64(l)) @@ -2462,7 +2347,7 @@ func (m *ActivateClientResponse) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2490,41 +2375,7 @@ func (m *ActivateClientResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowYorkie - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthYorkie - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthYorkie - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2581,7 +2432,7 @@ func (m *DeactivateClientRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -2591,25 +2442,23 @@ func (m *DeactivateClientRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2662,40 +2511,6 @@ func (m *DeactivateClientResponse) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: DeactivateClientResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowYorkie - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthYorkie - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthYorkie - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipYorkie(dAtA[iNdEx:]) @@ -2751,7 +2566,7 @@ func (m *AttachDocumentRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -2761,25 +2576,23 @@ func (m *AttachDocumentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2869,40 +2682,6 @@ func (m *AttachDocumentResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowYorkie - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthYorkie - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthYorkie - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DocumentId", wireType) } @@ -2934,7 +2713,7 @@ func (m *AttachDocumentResponse) Unmarshal(dAtA []byte) error { } m.DocumentId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChangePack", wireType) } @@ -3025,7 +2804,7 @@ func (m *DetachDocumentRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -3035,25 +2814,23 @@ func (m *DetachDocumentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3194,38 +2971,6 @@ func (m *DetachDocumentResponse) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: DetachDocumentResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowYorkie - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthYorkie - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthYorkie - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChangePack", wireType) @@ -3317,7 +3062,7 @@ func (m *WatchDocumentRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -3327,25 +3072,23 @@ func (m *WatchDocumentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3555,7 +3298,7 @@ func (m *WatchDocumentResponse_Initialization) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientIds", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -3565,23 +3308,23 @@ func (m *WatchDocumentResponse_Initialization) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientIds = append(m.ClientIds, make([]byte, postIndex-iNdEx)) - copy(m.ClientIds[len(m.ClientIds)-1], dAtA[iNdEx:postIndex]) + m.ClientIds = append(m.ClientIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -3638,7 +3381,7 @@ func (m *RemoveDocumentRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -3648,25 +3391,23 @@ func (m *RemoveDocumentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3788,38 +3529,6 @@ func (m *RemoveDocumentResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowYorkie - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthYorkie - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthYorkie - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChangePack", wireType) } @@ -3910,7 +3619,7 @@ func (m *PushPullChangesRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowYorkie @@ -3920,25 +3629,23 @@ func (m *PushPullChangesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthYorkie } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthYorkie } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } + m.ClientId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4080,40 +3787,6 @@ func (m *PushPullChangesResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowYorkie - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthYorkie - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthYorkie - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientId = append(m.ClientId[:0], dAtA[iNdEx:postIndex]...) - if m.ClientId == nil { - m.ClientId = []byte{} - } - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChangePack", wireType) } diff --git a/api/yorkie/v1/yorkie.proto b/api/yorkie/v1/yorkie.proto index b4f0eda6a..639ddd37d 100644 --- a/api/yorkie/v1/yorkie.proto +++ b/api/yorkie/v1/yorkie.proto @@ -42,49 +42,45 @@ message ActivateClientRequest { } message ActivateClientResponse { - string client_key = 1; - bytes client_id = 2; + string client_id = 1; } message DeactivateClientRequest { - bytes client_id = 1; + string client_id = 1; } message DeactivateClientResponse { - bytes client_id = 1; } message AttachDocumentRequest { - bytes client_id = 1; + string client_id = 1; ChangePack change_pack = 2; } message AttachDocumentResponse { - bytes client_id = 1; - string document_id = 2; - ChangePack change_pack = 3; + string document_id = 1; + ChangePack change_pack = 2; } message DetachDocumentRequest { - bytes client_id = 1; + string client_id = 1; string document_id = 2; ChangePack change_pack = 3; bool remove_if_not_attached = 4; } message DetachDocumentResponse { - string client_key = 1; ChangePack change_pack = 2; } message WatchDocumentRequest { - bytes client_id = 1; + string client_id = 1; string document_id = 2; } message WatchDocumentResponse { message Initialization { - repeated bytes client_ids = 1; + repeated string client_ids = 1; } oneof body { @@ -94,24 +90,22 @@ message WatchDocumentResponse { } message RemoveDocumentRequest { - bytes client_id = 1; + string client_id = 1; string document_id = 2; ChangePack change_pack = 3; } message RemoveDocumentResponse { - string client_key = 1; - ChangePack change_pack = 2; + ChangePack change_pack = 1; } message PushPullChangesRequest { - bytes client_id = 1; + string client_id = 1; string document_id = 2; ChangePack change_pack = 3; bool push_only = 4; } message PushPullChangesResponse { - bytes client_id = 1; - ChangePack change_pack = 2; + ChangePack change_pack = 1; } diff --git a/build/charts/yorkie-cluster/Chart.yaml b/build/charts/yorkie-cluster/Chart.yaml index c88edee38..218aec62e 100644 --- a/build/charts/yorkie-cluster/Chart.yaml +++ b/build/charts/yorkie-cluster/Chart.yaml @@ -13,8 +13,8 @@ maintainers: sources: - https://github.com/yorkie-team/yorkie -version: 0.4.5 -appVersion: "0.4.5" +version: 0.4.6 +appVersion: "0.4.6" kubeVersion: ">=1.23.0-0" keywords: diff --git a/client/client.go b/client/client.go index 655433967..453fccbb6 100644 --- a/client/client.go +++ b/client/client.go @@ -214,7 +214,7 @@ func (c *Client) Activate(ctx context.Context) error { return err } - clientID, err := time.ActorIDFromBytes(response.ClientId) + clientID, err := time.ActorIDFromHex(response.ClientId) if err != nil { return err } @@ -232,7 +232,7 @@ func (c *Client) Deactivate(ctx context.Context) error { } _, err := c.client.DeactivateClient(withShardKey(ctx, c.options.APIKey), &api.DeactivateClientRequest{ - ClientId: c.id.Bytes(), + ClientId: c.id.String(), }) if err != nil { return err @@ -276,7 +276,7 @@ func (c *Client) Attach(ctx context.Context, doc *document.Document, options ... res, err := c.client.AttachDocument( withShardKey(ctx, c.options.APIKey, doc.Key().String()), &api.AttachDocumentRequest{ - ClientId: c.id.Bytes(), + ClientId: c.id.String(), ChangePack: pbChangePack, }, ) @@ -349,7 +349,7 @@ func (c *Client) Detach(ctx context.Context, doc *document.Document, options ... res, err := c.client.DetachDocument( withShardKey(ctx, c.options.APIKey, doc.Key().String()), &api.DetachDocumentRequest{ - ClientId: c.id.Bytes(), + ClientId: c.id.String(), DocumentId: attachment.docID.String(), ChangePack: pbChangePack, RemoveIfNotAttached: opts.removeIfNotAttached, @@ -412,7 +412,7 @@ func (c *Client) Watch( stream, err := c.client.WatchDocument( withShardKey(ctx, c.options.APIKey, doc.Key().String()), &api.WatchDocumentRequest{ - ClientId: c.id.Bytes(), + ClientId: c.id.String(), DocumentId: attachment.docID.String(), }, ) @@ -425,14 +425,14 @@ func (c *Client) Watch( case *api.WatchDocumentResponse_Initialization_: var clientIDs []string for _, clientID := range resp.Initialization.ClientIds { - id, err := time.ActorIDFromBytes(clientID) + id, err := time.ActorIDFromHex(clientID) if err != nil { return nil, err } clientIDs = append(clientIDs, id.String()) } - doc.SetOnlineClientSet(clientIDs...) + doc.SetOnlineClients(clientIDs...) return nil, nil case *api.WatchDocumentResponse_Event: eventType, err := converter.FromEventType(resp.Event.Type) @@ -440,29 +440,32 @@ func (c *Client) Watch( return nil, err } - cli, err := time.ActorIDFromBytes(resp.Event.Publisher) + cli, err := time.ActorIDFromHex(resp.Event.Publisher) if err != nil { return nil, err } switch eventType { - case types.DocumentsChangedEvent: + case types.DocumentChangedEvent: return &WatchResponse{Type: DocumentChanged}, nil - case types.DocumentsWatchedEvent: + case types.DocumentWatchedEvent: doc.AddOnlineClient(cli.String()) - if doc.OnlinePresence(cli.String()) == nil { + if doc.Presence(cli.String()) == nil { return nil, nil } return &WatchResponse{ Type: DocumentWatched, Presences: map[string]innerpresence.Presence{ - cli.String(): doc.OnlinePresence(cli.String()), + cli.String(): doc.Presence(cli.String()), }, }, nil - case types.DocumentsUnwatchedEvent: - p := doc.OnlinePresence(cli.String()) + case types.DocumentUnwatchedEvent: + p := doc.Presence(cli.String()) doc.RemoveOnlineClient(cli.String()) + if p == nil { + return nil, nil + } return &WatchResponse{ Type: DocumentUnwatched, @@ -521,6 +524,8 @@ func (c *Client) Watch( t := PresenceChanged if e.Type == document.WatchedEvent { t = DocumentWatched + } else if e.Type == document.UnwatchedEvent { + t = DocumentUnwatched } rch <- WatchResponse{Type: t, Presences: e.Presences} case <-ctx.Done(): @@ -576,7 +581,7 @@ func (c *Client) pushPullChanges(ctx context.Context, opt SyncOptions) error { res, err := c.client.PushPullChanges( withShardKey(ctx, c.options.APIKey, opt.key.String()), &api.PushPullChangesRequest{ - ClientId: c.id.Bytes(), + ClientId: c.id.String(), DocumentId: attachment.docID.String(), ChangePack: pbChangePack, PushOnly: opt.mode == types.SyncModePushOnly, @@ -621,7 +626,7 @@ func (c *Client) Remove(ctx context.Context, doc *document.Document) error { res, err := c.client.RemoveDocument( withShardKey(ctx, c.options.APIKey, doc.Key().String()), &api.RemoveDocumentRequest{ - ClientId: c.id.Bytes(), + ClientId: c.id.String(), DocumentId: attachment.docID.String(), ChangePack: pbChangePack, }, diff --git a/client/client_test.go b/client/client_test.go index e7a32877b..cc780cc1f 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -86,8 +86,6 @@ func TestClient(t *testing.T) { t.Run("x-shard-key test", func(t *testing.T) { dummyID := types.ID("000000000000000000000000") - dummyActorID, err := dummyID.Bytes() - assert.NoError(t, err) testServer, addr := dialTestYorkieServer(t) defer testServer.Stop() @@ -113,7 +111,7 @@ func TestClient(t *testing.T) { assert.Equal(t, "dummy-api-key", data[types.ShardKey][0]) return &api.ActivateClientResponse{ - ClientId: dummyActorID, + ClientId: dummyID.String(), }, nil }, ) diff --git a/cmd/yorkie/commands.go b/cmd/yorkie/commands.go index 857d44f47..62e51c6dc 100644 --- a/cmd/yorkie/commands.go +++ b/cmd/yorkie/commands.go @@ -45,6 +45,8 @@ func Run() int { } func init() { + rootCmd.SetOut(os.Stdout) + rootCmd.SetErr(os.Stderr) rootCmd.AddCommand(project.SubCmd) rootCmd.AddCommand(document.SubCmd) rootCmd.AddCommand(context.SubCmd) diff --git a/cmd/yorkie/logout.go b/cmd/yorkie/logout.go index a22bbc0f5..cf7135638 100644 --- a/cmd/yorkie/logout.go +++ b/cmd/yorkie/logout.go @@ -1,3 +1,19 @@ +/* + * Copyright 2023 The Yorkie Authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package main import ( diff --git a/cmd/yorkie/server.go b/cmd/yorkie/server.go index 583188698..ecfb3d2f1 100644 --- a/cmd/yorkie/server.go +++ b/cmd/yorkie/server.go @@ -50,12 +50,7 @@ var ( authWebhookMaxWaitInterval time.Duration authWebhookCacheAuthTTL time.Duration authWebhookCacheUnauthTTL time.Duration - - etcdEndpoints []string - etcdDialTimeout time.Duration - etcdUsername string - etcdPassword string - etcdLockLeaseTime time.Duration + projectInfoCacheTTL time.Duration conf = server.NewConfig() ) @@ -72,6 +67,7 @@ func newServerCmd() *cobra.Command { conf.Backend.AuthWebhookMaxWaitInterval = authWebhookMaxWaitInterval.String() conf.Backend.AuthWebhookCacheAuthTTL = authWebhookCacheAuthTTL.String() conf.Backend.AuthWebhookCacheUnauthTTL = authWebhookCacheUnauthTTL.String() + conf.Backend.ProjectInfoCacheTTL = projectInfoCacheTTL.String() conf.Housekeeping.Interval = housekeepingInterval.String() @@ -227,6 +223,12 @@ func init() { server.DefaultHousekeepingCandidatesLimitPerProject, "candidates limit per project for a single housekeeping run", ) + cmd.Flags().IntVar( + &conf.Housekeeping.ProjectFetchSize, + "housekeeping-project-fetch-size", + server.DefaultHousekeepingProjectFetchSize, + "housekeeping project fetch size for a single housekeeping run", + ) cmd.Flags().StringVar( &mongoConnectionURI, "mongo-connection-uri", @@ -337,6 +339,18 @@ func init() { server.DefaultAuthWebhookCacheUnauthTTL, "TTL value to set when caching unauthorized webhook response.", ) + cmd.Flags().IntVar( + &conf.Backend.ProjectInfoCacheSize, + "project-info-cache-size", + server.DefaultProjectInfoCacheSize, + "The cache size of the project info.", + ) + cmd.Flags().DurationVar( + &projectInfoCacheTTL, + "project-info-cache-ttl", + server.DefaultProjectInfoCacheTTL, + "TTL value to set when caching project info.", + ) cmd.Flags().StringVar( &conf.Backend.Hostname, "hostname", diff --git a/design/README.md b/design/README.md index 62a92322f..9c0515a2f 100644 --- a/design/README.md +++ b/design/README.md @@ -2,16 +2,17 @@ ## Contents -- [Document Editing](document-editing.md): Local and remote document editing mechanism -- [Peer Awareness](peer-awareness.md): Algorithm for managing end-user status - [Data Structure](data-structure.md): CRDT data structures in `crdt` package +- [Document Editing](document-editing.md): Local and remote document editing mechanism +- [Document Removal](document-removal.md): Soft deletion of document +- [Tree](tree.md): Tree CRDT data structure - [Range Deletion in Splay Tree](range-deletion-in-splay-tree.md): Rotation-free range deletion algorithm for splay tree +- [Presence](presence.md): For sharing the presence of peers - [PubSub](pub-sub.md): Client-side event sharing with gRPC server-side stream and PubSub pattern - [Garbage Collection](garbage-collection.md): Deleting unused nodes in CRDT system - [Garbage Collection for Text Type](gc-for-text-type.md): Garbage collection for text nodes - [Housekeeping](housekeeping.md): Deactivating outdated clients for efficient garbage collection - [Retention](retention.md): Clearing unnecessary changes with `--backend-snapshot-with-purging-changes` flag -- [Document Removal](document-removal.md): Soft deletion of document - [Sharded Cluster Mode](sharded-cluster-mode.md): Shard-based server cluster mode with consistent hashing ## Maintaining the Document diff --git a/design/data-structure.md b/design/data-structure.md index ec4711bb2..bfab2f745 100644 --- a/design/data-structure.md +++ b/design/data-structure.md @@ -1,6 +1,6 @@ --- title: data-structure -target-version: 0.3.1 +target-version: 0.4.6 --- # Data Structures @@ -28,7 +28,9 @@ The `json` and `crdt` package has data structures for representing the contents Below is the dependency graph of data structures used in a JSON-like document. -![data-structure](./media/data-structure.png) + + + The data structures can be divided into three groups: @@ -45,7 +47,9 @@ JSON-like data strucutres are used when editing JSON-like documents. - `Primitive`: represents primitive data like `string`, `number`, `boolean`, `null`, etc. - `Object`: represents [object type](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object) of JavaScript. Just like JavaScript, you can use `Object` as [hash table](https://en.wikipedia.org/wiki/Hash_table). - `Array`: represents [array type](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array) of JavaScript. You can also use `Array` as [list](https://en.wikipedia.org/wiki/List_(abstract_data_type)). -- `Text`: represents text with style attributes in rich text editors such as [Quill](https://github.com/yorkie-team/yorkie-js-sdk/blob/main/examples/quill.html). Users can express styles such as bold, italic, and underline to text content. Of course, it can represent just a plain text in text-based editors such as [CodeMirror](https://github.com/yorkie-team/yorkie-js-sdk/blob/main/examples/index.html). It supports collaborative editing; multiple users can modify parts of the contents without conflict. +- `Text`: represents text with style attributes in rich text editors such as [Quill](https://quilljs.com/). Users can express styles such as bold, italic, and underline to text content. Of course, it can represent just a plain text in text-based editors such as [CodeMirror](https://codemirror.net). It supports collaborative editing; multiple users can modify parts of the contents without conflict. +- `Counter`: represents a counter in the document. As a proxy for the CRDT counter, it is used when the user manipulates the counter from the outside. +- `Tree`: represents CRDT-based tree structure that is used to represent the document tree of text-based editor such as [ProseMirror](https://prosemirror.net/). JSON-like data structures can be edited through proxies. For example: @@ -72,15 +76,16 @@ CRDT data structures are used by JSON-like group to resolve conflicts in concurr - `ElementRHT`: similar to `RHT`, but has elements as values. - `RGATreeList`: extended `RGA(Replicated Growable Array)` with an additional index tree. The index tree manages the indices of elements and provides faster access to elements at the int-based index. - `RGATreeSplit`: extended `RGATreeList` allowing characters to be represented as blocks rather than each single character. - +- `CRDTTree`: represents the CRDT tree with an index tree structure'. It resolves conflicts arising from concurrent editing. ### Common Group Common data structures can be used for general purposes. - [`SplayTree`](https://en.wikipedia.org/wiki/Splay_tree): A tree that moves nodes to the root by splaying. This is effective when user frequently access the same location, such as text editing. We use `SplayTree` as an index tree to give each node a weight, and to quickly access the node based on the index. - [`LLRBTree`](https://en.wikipedia.org/wiki/Left-leaning_red%E2%80%93black_tree): A tree simpler than Red-Black Tree. Newly added `floor` method finds the node of the largest key less than or equal to the given key. -- [`Trie`](https://en.wikipedia.org/wiki/Trie): A data structure that can quickly search for prefixes of sequence data such as strings. We use `Trie` to remove nested events when the contents of the `Document`' are modified at once. - +- [`Trie`](https://en.wikipedia.org/wiki/Trie): A data structure that can quickly search for prefixes of sequence data such as strings. We use `Trie` to remove nested events when the contents of the `Document` are modified at once. +- `IndexTree`: A tree implementation to represent a document of text-based editors. + ### Risks and Mitigation -We can replace the data structures with better ones for some reason, such as performance. For example, `SplayTree` used in `RGATreeList` can be replaced with [TreeList](https://commons.apache.org/proper/commons-collections/apidocs/org/apache/commons/collections4/list/TreeList.html). \ No newline at end of file +We can replace the data structures with better ones for some reason, such as performance. For example, `SplayTree` used in `RGATreeList` can be replaced with [TreeList](https://commons.apache.org/proper/commons-collections/apidocs/org/apache/commons/collections4/list/TreeList.html). diff --git a/design/housekeeping.md b/design/housekeeping.md index 1130d63b1..b5846ea71 100644 --- a/design/housekeeping.md +++ b/design/housekeeping.md @@ -7,11 +7,9 @@ target-version: 0.2.1 ## Summary -We provide Garbage Collection to purge tombstones and prevent the problem of +We provide [Garbage Collection](garbage-collection.md) to purge tombstones and prevent the problem of documents growing. -https://yorkie.dev/docs/garbage-collection - However, when there are clients that has been editing old documents but have not been used for a long time, garbage collection becomes less efficient. diff --git a/design/media/presence-api.png b/design/media/presence-api.png new file mode 100644 index 000000000..8c04934d8 Binary files /dev/null and b/design/media/presence-api.png differ diff --git a/design/media/presence-event-presence-changed.png b/design/media/presence-event-presence-changed.png new file mode 100644 index 000000000..19aa371e5 Binary files /dev/null and b/design/media/presence-event-presence-changed.png differ diff --git a/design/media/presence-event-unwatched.png b/design/media/presence-event-unwatched.png new file mode 100644 index 000000000..d5710ccce Binary files /dev/null and b/design/media/presence-event-unwatched.png differ diff --git a/design/media/presence-event-watched.png b/design/media/presence-event-watched.png new file mode 100644 index 000000000..5e467d61d Binary files /dev/null and b/design/media/presence-event-watched.png differ diff --git a/design/media/presence-structure.png b/design/media/presence-structure.png new file mode 100644 index 000000000..6707de174 Binary files /dev/null and b/design/media/presence-structure.png differ diff --git a/design/media/pubsub.jpg b/design/media/pubsub.jpg new file mode 100644 index 000000000..81daa7ca0 Binary files /dev/null and b/design/media/pubsub.jpg differ diff --git a/design/media/pubsub.png b/design/media/pubsub.png deleted file mode 100644 index 7bb03f378..000000000 Binary files a/design/media/pubsub.png and /dev/null differ diff --git a/design/media/server-streaming.jpg b/design/media/server-streaming.jpg new file mode 100644 index 000000000..aeaf3254d Binary files /dev/null and b/design/media/server-streaming.jpg differ diff --git a/design/media/watch-document.jpg b/design/media/watch-document.jpg new file mode 100644 index 000000000..d6f861bbe Binary files /dev/null and b/design/media/watch-document.jpg differ diff --git a/design/peer-awareness.md b/design/peer-awareness.md deleted file mode 100644 index c2a5f806c..000000000 --- a/design/peer-awareness.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: peer-awareness -target-version: 0.1.2 ---- - -# Peer Awareness - -## Summary - -We will provide Peer Awareness which is a simple algorithm that manages end-user -status like who is connected and metadata like username or email address and -etc. For example, users can implement a list of people participating in the -editing, such as a box in the top right of Google Docs. - -### Goals - -Implement Peer Awareness and provide API to users to use the feature. The goal -of the first version is to implement simple functionality and check usability. - -### Non-Goals - -The first version does not implement complex features such as dynamic metadata -updates. - -## Proposal details - -### How to use - -Users can pass metadata along with client options when creating a client. - -```typescript -const client = yorkie.createClient('https://yorkie.dev/api', { - metadata: { - username: 'hackerwins' - } -}); -``` - -Then the users create a document in the usual way and attach it to the client. - -```typescript -const doc = yorkie.createDocument('examples', 'codemirror'); -await client.attach(doc); -``` - -When a new peer registers or leaves, `peers-changed` event is fired, and the -other peer's clientID and metadata can be obtained from the event. - -```typescript -client.subscribe((event) => { - if (event.name === 'peers-changed') { - const peers = event.value[doc.getKey().toIDString()]; - for (const [clientID, metadata] of Object.entries(peers)) { - console.log(clientID, metadata); - } - } -}); -``` - -### How does it work? - -``` - +--Client "A"----+ +--Agent-----------------+ - | +--Metadata--+ | | +--PubSub -----------+ | - | | { num: 1 } | <-- WatchDocuments --> | | { | | - | +------------+ | | | docA: { | | - +----------------+ | | A: { num: 1 }, | | - | | B: { num: 2 } | | - +--Client "B"----+ | | }, | | - | +--Metadata--+ | | | ... | | - | | { num: 2 } | <-- WatchDocuments --> | | } | | - | +------------+ | | +--------------------+ | - +----------------+ +------------------------+ -``` - -When a client attaches documents, a stream is connected between agent and the -client through WatchDocuments API. This will update the map of clients that are -watching the documents in PubSub. When the stream disconnects or a new -connection is made, `DOCUMENTS_UNWATCHED` or `DOCUMENTS_WATCHED` event is -delivered to other clients who are watching the document together. - -### Risks and Mitigation - -The first version is missing the ability to dynamically update metadata and -propagate it to other peers. Client Metadata is managed inside the instance of -the Client and is not stored persistently in Yorkie. The reasons are as follows: - -- The goal of the first version is to check the usability of the feature. -- Metadata's primary "source of truth" location is user's DB, and it is simply - passed to Yorkie. -- All other locations of the metadata in Yorkie just refer back to the primary " - source of truth" location. -- We can prevent increasing management points caused by storing metadata in - MongoDB. - -In the future, if the users needs arise, we may need to implement the ability to -dynamically update metadata and propagates it to peers. We might consider -treating it as a Yorkie Document that has logical clocks, not a normal map in -PubSub. diff --git a/design/presence.md b/design/presence.md new file mode 100644 index 000000000..b82b5baaf --- /dev/null +++ b/design/presence.md @@ -0,0 +1,331 @@ +--- +title: presence +target-version: 0.4.6 +--- + +# Presence + +## Summary + +In collaborative applications, it is important to inform users about who is currently participating in the document and what their status is. To represent this peer awareness, yorkie provides the `Presence` feature. Presence can be used to share state between clients, such as remote cursors, selections, etc. Previously, there was an issue where presence is belong to `Client`, preventing batch processing with `Document`. To address this, presence has been moved from `Client` to `Document`, allowing for batch processing. + +### Goals + +The goal is to move `Presence` from `Client` to `Document` and enable batch processing with both presence and document. + +### Non-Goals + +Distinguishing between users who are not participating in document editing and users in an offline state is not covered. Users can only be aware of online peers. + +## Proposal details + +### Structure Comparison + +![Presence Structure Comparison](media/presence-structure.png) + +Previously, presence is belong to `Client`, leading to the following issues: + +1. Inefficient management of presences for each document when a client participates in more than one document. +2. Inability to edit presence and document together (no batch processing). + +In the new structure, presence is belong to `Document`, and it can be updated together with document updates through `document.update()`, enabling atomic batch processing. + +### How to use + +#### Set presence + +When attaching, the client informs the server that it is subscribing to the document. The `initialPresence` option sets the initial presence of the client. If not set, it is created as an empty object. The presence is shared with other users participating in the document. It must be serializable to JSON. + +```typescript +const doc = new yorkie.Document('docKey'); +await client.attach(doc, { + initialPresence: { color: 'blue', cursor: { x: 0, y: 0 } }, +}); +``` + +#### Get presence + +Only retrieve the presence of online clients. + +```typescript +// Get the presence of the current user +doc.getMyPresence(); // { color: 'blue', cursor: { x: 1, y: 1 } } + +// Get the presence of the client +doc.getPresence(clientID); + +// Get all users currently participating in the document +const users = doc.getPresences(); +for (const { clientID, presence } of users) { + // Do something... +} +``` + +#### Update presence + +Changes specific properties provided to `presence.set()` within `document.update()`. The existing presence object is updated by merging the new changes. In other words, properties not specified will remain unchanged. + +Change within the `document.update()` function are processed atomically, and other clients receive a single change. Subscribers are called only after the change is applied. + +```typescript +doc.update((root, presence) => { + presence.set({ cursor: { x: 1, y: 1 } }); +}); +``` + +#### Subscribe presence events + +`doc.subscribe("presence")` allows subscribing to all presence-related changes. To distinguish between events from oneself and others, `"my-presence"` and `"others"` topics can be used. Subscribing to these events notifies clients when others watch, unwatch, or modify their presence. + +When a new client establishes a watch stream connection with a document, it immediately receives an `initialized` event representing all users participating in the document. (Note: Create the document, subscribe, and then attach to receive the initialized event. You won't receive the initialized event if you subscribe after attaching.) + +```typescript +const unsubscribe = doc.subscribe('presence', (event) => { + if (event.type === 'initialized') { + // Array of users currently participating in the document + } + + if (event.type === 'watched') { + // A user has joined the document editing in online + } + + if (event.type === 'unwatched') { + // A user has left the document editing + } + + if (event.type === 'presence-changed') { + // A user has updated their presence + } +}); + +const unsubscribe = doc.subscribe('my-presence', (event) => { + // evnet.type can be initialized, presence-changed +}); + +const unsubscribe = doc.subscribe('others', (event) => { + // evnet.type can be watched, unwatched, presence-changed +}); +``` + +### How does it work? + +#### Presence Change + +In yorkie, changes to the document are conveyed through a `Change` in the `ChangePack`. To apply changes in the document along with updates to presence, a `PresenceChange` has been included within the `Change`. + +```proto +// resources.proto +message Change { + ChangeID id = 1; + string message = 2; + repeated Operation operations = 3; + PresenceChange presence_change = 4; // this is added +} + +message PresenceChange { + enum ChangeType { + CHANGE_TYPE_UNSPECIFIED = 0; + CHANGE_TYPE_PUT = 1; + CHANGE_TYPE_DELETE = 2; + CHANGE_TYPE_CLEAR = 3; + } + ChangeType type = 1; + Presence presence = 2; +} + +message Presence { + map data = 1; +} +``` + +![Presence-Change within Change](media/presence-api.png) + +When attaching a document, the initial presence is transmitted through `PresenceChange({type: 'put', presence: P})`. When updating presence via `document.update()`, the modified presence is transmitted through `PresenceChange({type: 'put', presence: P})`. When detaching the document, `PresenceChange({type: 'clear'})` is sent to remove the presence. (`P` represents the presence object.) + +#### Presence Events + +##### `presence-changed` event + +![presence-changed event](media/presence-event-presence-changed.png) + +When clientA updates presence using `doc.update()`, clientB receives a `presence-changed` event. Modification within the update function are atomic and sent as a single change to other clients, including operations(`Ops`) and presenceChange(`P`). ClientB applies the change and triggers the subscribers only after applying change. If both root changes and presence changes exist, a `remote-change` event occurs, and then the `presence-changed` event occurs. + +```typescript +public applyChanges(changes: Array>): void { + // ... + for (const change of changes) { + let changeInfo: ChangeInfo | undefined; + let presenceEvent: WatchedEvent

| UnwatchedEvent

| PresenceChangedEvent

| undefined; + // Apply a change + const opInfos = change.execute(this.root, this.presences); + + // Fire DocEvent only after the change is applied. + if (changeInfo) { + this.publish({ + type: DocEventType.RemoteChange, + value: changeInfo, + }); + } + if (presenceEvent) { + this.publish(presenceEvent); + } + } +} +``` + +##### `watched` event + +![watched event](media/presence-event-watched.png) + +When a client attaches to a document, it establishes watch stream connection with the server through the `WatchDocument` API. When the stream disconnects or a new connection is established, `DocumentUnWatched` or `DocumentWatched` event is sent to other clients who are watching the document. (The dotted line represents the watch stream. For information on how the watch stream operates, please refer to the [pub-sub](./pub-sub.md) documentation.) + +The client maintains a `presences` map for client presence and an `onlineClients` set for online (watching) clients. The server manages who is watching a document through `subscriptionsMapByDocID`. + +When clientA attaches while clientB is already watching the document, two scenarios exist depending on the order of receiving `DocumentChanged` and `DocumentWatched` in the watch stream response: + +1. `DocumentChanged(pushpull)` -> `DocumentWatched` + ClientA attaches, and an initial presence change is sent. Since changes occur in the document, the server informs clientB with `DocumentChanged`. ClientB sends a `pushpullChanges` request, receives clientA's initial presence, and adds it to the `presences` map. + Once clientA receives the AttachDocument response, it requests a `WatchDocument` API, establishing a watch stream and receiving the list of current document participants(`initalized`). The server notifies clientB that clientA is watching. ClientB adds clientA to the `onlineClients` set and triggers the `watched` event. + +2. `DocumentWatched` -> `DocumentChanged(pushpull)` + If `DocumentWatched` arrives before pushpull response, clientB can't trigger the `watched` event immediately due to the absence of presence data. Later, when `pushpull` delivers clientA's presence, if the presence didn't exist previously, it means that the initial presence was received. In this case, clientB triggers the `watched` event. + +```typescript +// client.ts +private handleWatchDocumentsResponse( + attachment: Attachment, + resp: WatchDocumentResponse, +) { + const pbWatchEvent = resp.getEvent()!; + const eventType = pbWatchEvent.getType(); + const publisher = converter.toHexString(pbWatchEvent.getPublisher_asU8()); + switch (eventType) { + // ... + case PbDocEventType.DOC_EVENT_TYPE_DOCUMENTS_WATCHED: + attachment.doc.addOnlineClient(publisher); + // NOTE(chacha912): We added to onlineClients, but we won't trigger watched event + // unless we also know their initial presence data at this point. + if (attachment.doc.hasPresence(publisher)) { + attachment.doc.publish({ + type: DocEventType.Watched, + value: { + clientID: publisher, + presence: attachment.doc.getPresence(publisher)!, + }, + }); + } + break; + } +} + + +// document.ts +public applyChanges(changes: Array>): void { + // ... + for (const change of changes) { + let changeInfo: ChangeInfo | undefined; + let presenceEvent: WatchedEvent

| UnwatchedEvent

| PresenceChangedEvent

| undefined; + const actorID = change.getID().getActorID()!; + + if (change.hasPresenceChange() && this.onlineClients.has(actorID)) { + const presenceChange = change.getPresenceChange()!; + switch (presenceChange.type) { + // ... + case PresenceChangeType.Put: + // NOTE(chacha912): When the user exists in onlineClients, but + // their presence was initially absent, we can consider that we have + // received their initial presence, so trigger the 'watched' event. + presenceEvent = { + type: this.presences.has(actorID) + ? DocEventType.PresenceChanged + : DocEventType.Watched, + value: { + clientID: actorID, + presence: presenceChange.presence, + }, + }; + break; + } + } + } +} +``` + +##### `unwatched` event + +![unwatched event](media/presence-event-unwatched.png) + +Consider clientA detaching from the document. Similar to the `watched` scenario, two cases exist for the `unwatched` event: + +1. `DocumentChanged(pushpull)` -> `DocumentUnWatched` + ClientA detaches, sending `PresenceChange({type: 'clear'})`. Since the change occurs, the server informs clientB with `DocumentChanged`. ClientB requests a `pushpullChanges`, receives clientA's `PresenceChange({type: 'clear'})`, and removes clientA from the `presences` map. If clientA exists in `onlineClients`, we can consider it as detachment occurring before unwatching. Since detached user is no longer participating in the document, clientB removes clientA from `onlineClients` and triggers the `unwatched` event. Future `unwatched` event is ignored. +2. `DocumentUnWatched` -> `DocumentChanged(pushpull)` + If clientB receives `DocumentUnWatched` before pushpull response, clientB removes clientA from `onlineClients` and triggers the `unwatched` event. Later, `pushpull` removes clientA's presence from the `presences` map. + +```typescript +// client.ts +private handleWatchDocumentsResponse( + attachment: Attachment, + resp: WatchDocumentResponse, +) { + const pbWatchEvent = resp.getEvent()!; + const eventType = pbWatchEvent.getType(); + const publisher = converter.toHexString(pbWatchEvent.getPublisher_asU8()); + switch (eventType) { + // ... + case PbDocEventType.DOC_EVENT_TYPE_DOCUMENTS_UNWATCHED: { + const presence = attachment.doc.getPresence(publisher); + attachment.doc.removeOnlineClient(publisher); + // NOTE(chacha912): There is no presence, when PresenceChange(clear) is applied before unwatching. + // In that case, the 'unwatched' event is triggered while handling the PresenceChange. + if (presence) { + attachment.doc.publish({ + type: DocEventType.Unwatched, + value: { clientID: publisher, presence }, + }); + } + break; + } +} + + +// document.ts +public applyChanges(changes: Array>): void { + // ... + for (const change of changes) { + let changeInfo: ChangeInfo | undefined; + let presenceEvent: WatchedEvent

| UnwatchedEvent

| PresenceChangedEvent

| undefined; + const actorID = change.getID().getActorID()!; + + if (change.hasPresenceChange() && this.onlineClients.has(actorID)) { + const presenceChange = change.getPresenceChange()!; + switch (presenceChange.type) { + // ... + case PresenceChangeType.Clear: + // NOTE(chacha912): When the user exists in onlineClients, but + // PresenceChange(clear) is received, we can consider it as detachment + // occurring before unwatching. + // Detached user is no longer participating in the document, we remove + // them from the online clients and trigger the 'unwatched' event. + presenceEvent = { + type: DocEventType.Unwatched, + value: { + clientID: actorID, + presence: this.getPresence(actorID)!, + }, + }; + this.removeOnlineClient(actorID); + break; + } + } + } +} +``` + +### Risks and Mitigation + +- If clients don't explicitly detach, presence can accumulate in the `presences` map. We can handle clearing presence during housekeeping. +- Although presence doesn't need to be stored in the database, it is included in `Change` and stored. Later, we may consider changing the storage approach as data grows. ([Refer to Discord's storage changes.](https://blog.bytebytego.com/i/109085468/how-discord-stores-trillions-of-messages)) + - Client, Document → MongoDB (Document DB or RDB) + - Change → HBase (Wide column store) + - Snapshot, Presence → Redis (key-value store, In-memory DB) diff --git a/design/pub-sub.md b/design/pub-sub.md index 25658fcac..52e336275 100644 --- a/design/pub-sub.md +++ b/design/pub-sub.md @@ -1,6 +1,6 @@ --- title: pub-sub -target-version: 0.3.0 +target-version: 0.4.6 --- # PubSub @@ -18,36 +18,46 @@ Documents. ## Proposal Details -### How does it work? +### WatchDocument API + +In Yorkie, we use gRPC-Web and it currently supports 2 RPC modes: unary RPCs, server-side streaming RPCs. ([Client-side and Bi-directional streaming is not supported.](https://github.com/grpc/grpc-web#streaming-support)). Server-side streaming allows the server to send multiple messages in response to a single client request. -Yorkie implements WatchDocuments API -using [gRPC server-side streaming](https://grpc.io/docs/languages/go/basics/#server-side-streaming-rpc) -to deliver the events that have occurred to other clients. +![server-side streaming RPCs](media/server-streaming.jpg) + +Yorkie implements WatchDocument API using [gRPC server-side streaming](https://grpc.io/docs/languages/go/basics/#server-side-streaming-rpc) to deliver the events to other clients. ```protobuf // api/yorkie.proto service Yorkie { ... - rpc WatchDocuments (WatchDocumentsRequest) returns (stream WatchDocumentsResponse) {} + rpc WatchDocument (WatchDocumentRequest) returns (stream WatchDocumentResponse) {} } ``` -And to manage the event delivery target, we are using the [PubSub pattern](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern). You can learn more by looking at the [sync package](https://github.com/yorkie-team/yorkie/blob/main/server/backend/sync/pubsub.go) we are implementing. +In brief, when the client sends a WatchDocument request, it establishes a stream connection(1). On the server side, when changes occur in a document, it retrieves the clients watching that document in the subscriptionsMap(2), and then sends responses through the server stream to those clients(3). As a result, clients watching the document can receive response about its changes through the stream without the separate requests. + +![WatchDocument API](media/watch-document.jpg) + +### How does it work? + +We are using the [PubSub pattern](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) for handling event delivery targets. For more details, you can check out the [sync package](https://github.com/yorkie-team/yorkie/blob/main/server/backend/sync/pubsub.go) that we're working on. + ![pub-sub pattern vs observer pattern](media/pubsub-pattern.png) The process of the event pub-sub is as follows: -![pub-sub event flow](media/pubsub.png) + +![pub-sub event flow](media/pubsub.jpg) #### 1. Set SubscriptionsMap -The `watchDocuments` API creates a `Subscription` instance and adds it to the `subscriptionsMapByDocKey`. The `Subscription` instance internally manages the `DocEvent channel`, and a `select` statement is used to retrieve events that are passed to the `Subscription` instance. +The `WatchDocument` API creates a `Subscription` instance and adds it to the `subscriptionsMapByDocID`[(code🔍)](https://github.com/yorkie-team/yorkie/blob/16fd182021231d75562a933cb32d924af16fc7f4/server/rpc/yorkie_server.go#L518-L523). The `Subscription` instance internally manages the `DocEvent channel`. ```go // Subscription represents a subscription of a subscriber to documents. type Subscription struct { id string - subscriber types.Client + subscriber *time.ActorID closed bool events chan DocEvent } @@ -55,23 +65,41 @@ type Subscription struct { #### 2. Publish Event -The publisher can send a `DocEvent` to the `Subscription` instances that subscribe to the same document through the `Publish` method. +When changes occur in a document through WatchDocument or PushPull, the `Publish` method is called to send out a `DocEvent` which includes events such as `DocumentsWatchedEvent`, `DocumentsUnwatchedEvent`, and `DocumentsChangedEvent`. ```go -type DocEvent struct { - Type types.DocEventType - Publisher types.Client - DocumentKeys []key.Key +// server/rpc/yorkie_server.go +func (s *yorkieServer) watchDoc(...) (...) { + // Publish DocumentsWatchedEvent during watchDocument + s.backend.Coordinator.Publish( + ctx, + subscription.Subscriber(), + sync.DocEvent{ + Type: types.DocumentsWatchedEvent, + Publisher: subscription.Subscriber(), + DocumentID: documentID, + }, + ) } -Publish(ctx context.Context, publisherID *time.ActorID, event sync.DocEvent) +// server/packs/packs.go +func PushPull(...) (...) { + // Publish DocumentsChangedEvent during pushpull + be.Coordinator.Publish( + ctx, + publisherID, + sync.DocEvent{ + Type: types.DocumentsChangedEvent, + Publisher: publisherID, + DocumentID: docInfo.ID, + }, + ) +} ``` -When performing `WatchDocuments`, `UpdatePresence`, and `PushPull`, the `Publish` method is called to deliver the `DocEvent`. - #### 3. Fire Event -The event is sent to the `Subscription` channels that subscribe to the same document by iterating through the `documentKeys` of the `DocEvent`. +The `Publish` method sends a `DocEvent` to the event channel of subscriptions that are subscribing to the document of the `DocEvent`. Through `subscriptionsMapByDocID`, we can find the subscriptions (created in step 1) that are subscribing to a specific document. Subsequently, the `DocEvent` is sent to the event channels of these subscriptions. [(code🔍)](https://github.com/yorkie-team/yorkie/blob/16fd182021231d75562a933cb32d924af16fc7f4/server/backend/sync/memory/pubsub.go#L150-L196). ```go func (m *PubSub) Publish( @@ -82,34 +110,33 @@ func (m *PubSub) Publish( m.subscriptionsMapMu.RLock() defer m.subscriptionsMapMu.RUnlock() - for _, docKey := range event.DocumentKeys { - k := docKey.String() - - if subs, ok := m.subscriptionsMapByDocKey[k]; ok { - for _, sub := range subs.Map() { - // If the subscriber is itself, do not send - if sub.Subscriber().ID.Compare(publisherID) == 0 { - continue - } + documentID := event.DocumentID + if subs, ok := m.subscriptionsMapByDocID[documentID]; ok { + for _, sub := range subs.Map() { + // If the subscriber is itself, do not send + if sub.Subscriber().Compare(publisherID) == 0 { + continue + } - // Send the event to the peer's event channel - sub.Events() <- event + select { + // Send the event to the peer's event channel + case sub.Events() <- event: } } } } ``` -#### 4. Send watchDocuments response to stream +#### 4. Send watchDocument response to stream -In the `select` statement from step 1, when the `Subscription` channel receives an event, the event is sent to the `watchDocumentsResponse` of the rpc stream. +When the event channel of `Subscription` receives an event, the event is sent to the `WatchDocumentResponse` of the rpc stream. [(code🔍)](https://github.com/yorkie-team/yorkie/blob/16fd182021231d75562a933cb32d924af16fc7f4/server/rpc/yorkie_server.go#L421-L443) ```go -func (s *yorkieServer) WatchDocuments( - req *api.WatchDocumentsRequest, - stream api.YorkieService_WatchDocumentsServer, +func (s *yorkieServer) WatchDocument( + req *api.WatchDocumentRequest, + stream api.YorkieService_WatchDocumentServer, ) error { - // ... + // ... for { select { case <-s.serviceCtx.Done(): @@ -119,12 +146,11 @@ func (s *yorkieServer) WatchDocuments( case event := <-subscription.Events(): eventType, err := converter.ToDocEventType(event.Type) - if err := stream.Send(&api.WatchDocumentsResponse{ - Body: &api.WatchDocumentsResponse_Event{ + if err := stream.Send(&api.WatchDocumentResponse{ + Body: &api.WatchDocumentResponse_Event{ Event: &api.DocEvent{ - Type: eventType, - Publisher: converter.ToClient(event.Publisher), - DocumentKeys: converter.ToDocumentKeys(event.DocumentKeys), + Type: eventType, + Publisher: event.Publisher.Bytes(), }, }, }); err != nil { @@ -137,4 +163,4 @@ func (s *yorkieServer) WatchDocuments( ### Risks and Mitigation -Currently, Subscription instances are managed in memory. \ No newline at end of file +Currently, Subscription instances are managed in memory. diff --git a/design/range-deletion-in-splay-tree.md b/design/range-deletion-in-splay-tree.md index 114f25f78..cc479337a 100644 --- a/design/range-deletion-in-splay-tree.md +++ b/design/range-deletion-in-splay-tree.md @@ -2,6 +2,7 @@ title: delete-range-in-splay-tree target-version: 0.2.12 --- + --- # Range Deletion in Splay Tree @@ -14,7 +15,7 @@ Using the feature of a splay tree that changes the root freely, `splay.DeleteRan ### Goals -The function `DeleteRange` should separate all nodes exactly in the given range as a subtree. After executing the function, the entire tree from and weight of every node must be correct just as when the nodes were deleted one by one. +The function `DeleteRange` should separate all nodes exactly in the given range as a subtree. After executing the function, the entire tree structure and weight of every node must be correct just as when the nodes were deleted one by one. ## Proposal Details @@ -24,8 +25,7 @@ From the property of indexed BST, all nodes with a smaller index than the root a And also, Splay Tree can change the root freely to use `Splay`. -Then using the properties, when we want to delete the range from index `L` to `R` we can make the shape of tree like the figure avobe to `Splay(L-1)` then `Splay(R+1)`. - +Then using the properties, when we want to delete the range from index `L` to `R` we can make the shape of tree like the figure above to `Splay(L-1)` then `Splay(R+1)`. ![delete-range-in-splay-tree-2](./media/range-deletion-in-splay-tree-2-separation.png) @@ -39,10 +39,10 @@ func (t *Tree[V]) DeleteRange(leftBoundary, rightBoundary *Node[V]) { t.cutOffRight(leftBoundary) return } - + t.Splay(leftBoundary) t.Splay(rightBoundary) - + // refer case 2 of second figure if rightBoundary.left != leftBoundary { t.rotateRight(leftBoundary) @@ -51,9 +51,11 @@ func (t *Tree[V]) DeleteRange(leftBoundary, rightBoundary *Node[V]) { } ``` + Sometimes the tree shapes like case 2 after `Splay`s because of the zig-zig case of `Splay`. But it simply changes to the same shapes as case 1 in one rotation for `L-1`. Then now to cut off the right child(subtree) of `L-1`, we can separate all nodes in the given range to be deleted. + ### Risks and Mitigation `DeleteRange` does not consider the occurrence of new nodes due to concurrent editing in the range to be deleted. They should be filtered before using `DeleteRange`, and `DeleteRange` should be executed continuously in the smaller ranges that do not include them. diff --git a/design/retention.md b/design/retention.md index 54ce78c25..06262385f 100644 --- a/design/retention.md +++ b/design/retention.md @@ -56,7 +56,7 @@ In conclusion, when a snapshot is created, it should not simply delete the chang This can be expressed as a picture above(when SnapshotInterval=10, SnapshotThreshold=5). Assuming that there are a series of C (changes) in chronological order (ServerSeq), S (snapshots) are being created at intervals of 10 and the synchronized ServerSeq is being recorded in SyncedSeq. At this time, there may be a situation where Client A's synchronization is delayed for some reason. -In this situation, if all previous Cs are deleted when S2 is created, Client A must pull C19 and C20 for synchronization, but it is already deleted and does not exist. This is the reason why the previous changes are deleted based on the minimum synced ServerSeq in the actual implementation. +In this situation, if all previous Cs are deleted when S3 is created, Client A must pull C19 and C20 for synchronization, but it is already deleted and does not exist. This is the reason why the previous changes are deleted based on the minimum synced ServerSeq in the actual implementation. ### How it was implemented as code diff --git a/design/tree.md b/design/tree.md new file mode 100644 index 000000000..ab7071acc --- /dev/null +++ b/design/tree.md @@ -0,0 +1,166 @@ +--- +title: tree +target-version: 0.4.6 +--- + +# Tree + +## Summary + +In Yorkie, users can create and edit JSON-like documents using JSON-like data structures such as `Primitive`, `Object`, `Array`, `Text`, and `Tree`. Among these, the `Tree` structure is used to represent the document model of a tree-based text editor, similar to XML. + +This document introduces the `Tree` data structure, and explains the operations provided by `Tree`, focusing on the `Tree` coordinate system and the logic of the `Tree.Edit` operation. Furthermore, it explains how this logic ensures eventual consistency in concurrent document editing scenarios. + +### Goals + +This document aims to help new SDK contributors understand the overall `Tree` data structure and explain how Yorkie ensures consistency when multiple clients are editing concurrently. + +### Non-Goals + +This document focuses on `Tree.Edit` operations rather than `Tree.Style`. + +## Proposal Details + +### XML-like Tree + +In yorkie, a XML-like `Tree` is used to represent the document model of a tree-based text editor. + +This tree-based document model resembles XML tree and consists of element nodes and text nodes. element nodes can have attributes, and text nodes contain a string as their value. For example: + + + +**Operation** + +The XML-like `Tree` provides specialized operations tailored for text editing rather than typical operations of a general tree. To specify the operation's range, an `index` or `path` is used. For example: + + + +These `index`es are assigned in order at positions where the user's cursor can reach. These `index`es draw inspiration from ProseMirror's index and share a similar structural concept. + +In the case of a `path`, it contains `offset`s of each node from the root node as elements except the last. The last element of the `path` represents the position in the parent node. For example, the `path` of the position between '`k`' and '`i`' is `[1, 4]`. The first element of the `path` is the `offset` of the `` in `

` and the second element represents the position between '`k`' and '`i`' in ``. + +1. `Tree.Edit` + +Users can use the `Edit` operation to insert or delete nodes within the `Tree`. + +https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/json/tree.go#L115-L131 + +Where `fromIdx` is the starting position of editing, `toIdx` is the ending position, and `contents` represent the nodes to be inserted. If `contents` are omitted, the operation only deletes nodes between `fromIdx` and `toIdx`. + + + +Similarly, users can specify the editing range using a `path` that leads to the `Tree`'s node in the type of `[]int`. + +https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/json/tree.go#L217-L237 + +2. `Tree.Style` + +Users can use the `Style` operation to specify attributes for the element nodes in the `Tree`. + +https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/json/tree.go#L239-L268 + +### Implementation of Edit Operation + +**Tree Coordinate System** + + + +Yorkie implements the above [data structure](https://github.com/yorkie-team/yorkie/blob/main/design/data-structure.md) to create a JSON-like `Document`, which consists of different layers, each with its own coordinate system. The dependency graph above can be divided into three main groups. The **JSON-like** group directly used by users to edit JSON-like `Document`s. The **CRDT** Group is utilized from the JSON-like group to resolve conflicts in concurrent editing situations. Finally, the **common** group is used for the detailed implementation of CRDT group and serves general purposes. + +Thus, the `Tree`, introduced in this document, has dependencies such as '`Tree` → `CRDTTree` → `IndexTree`', and each layer has its own coordinate system: + + + +These coordinate systems transform in the order of '`index(path)` → `IndexTree.TreePos` → `CRDTTree.TreeNodeID` → `CRDTTree.TreePos`'. + + + +1. `index` → `IndexTree.TreePos` + +The `index` is the coordinate system used by users for local editing. This `index` is received from the user, and is converted to `IndexTree.TreePos`. This `IndexTree.TreePos` represents the physical position within the local tree and is used for actual tree editing. + +2. `IndexTree.TreePos` → (`CRDTTree.TreeNodeID`) → `CRDTTree.TreePos` + +Next, the obtained `IndexTree.TreePos` is transformed into the logical coordinate system of the distributed tree, represented by `CRDTTree.TreePos`. To achieve this, the given physical position, `IndexTree.TreePos`, is used to find the parent node and left sibling node. Then, a `CRDTTree.TreePos` is created using the unique IDs of the parent node and left sibling node, which are `CRDTTree.TreeNodeID`. This coordinate system is used in subsequent `Tree.Edit` and `Tree.Style` operations. + +In the case of remote editing, where the local coordinate system is received from the user in local editing, there is no need for Step 1 since changes are pulled from the server using `ChangePack` to synchronize the changes. Refer to the [document-editing](https://github.com/yorkie-team/yorkie/blob/main/design/document-editing.md) for more details. + +**Tree.Edit Logic** + +The core process of the `Tree.Edit` operation is as follows: + +1. Find `CRDTTree.TreePos` from the given `fromIdx` and `toIdx` (local editing only). +2. Find the corresponding left sibling node and parent node within the `IndexTree` based on `CRDTTree.TreePos`. +3. Delete nodes in the range of `fromTreePos` to `toTreePos`. +4. Insert the given nodes at the appropriate positions (insert operation only). + +**[[STEP 1]](https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/json/tree.go#L121C1-L128)** Find `CRDTTree.TreePos` from the given `fromIdx` and `toIdx` (local editing only) + +In the case of local editing, the given `index`es are converted to `CRDTTree.TreePos`. The detailed process is the same as described in the 'Tree Coordinate System' above. + +**[[STEP 2]](https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/crdt/tree.go#L572C1-L580C3)** Find the corresponding left sibling node and parent node within the `IndexTree` based on `CRDTTree.TreePos` + +2-1. For text nodes, if necessary, split nodes at the appropriate positions to find the left sibling node. + +2-2. Determine the sequence of nodes and find the appropriate position. Since `Clone`s of each client might exist in different states, the `findFloorNode` function is used to find the closest node (lower bound). + +**[[STEP 3]](https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/crdt/tree.go#L582-L640)** Delete nodes in the range of `fromTreePos` to `toTreePos` + +3-1. Traverse the range and identify nodes to be removed. If a node is an element node and doesn't include both opening and closing tags, it is excluded from removal. + +3-2. Update the `latestCreatedAtMapByActor` information for each node and mark nodes with tombstones in the `IndexTree` to indicate removal. + +**[[STEP 4]](https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/pkg/document/crdt/tree.go#L642-L681)** Insert the given nodes at the appropriate positions (insert operation only) + +4-1. If the left sibling node at the insertion position is the same as the parent node, it means the node will be inserted as the leftmost child of the parent. Hence, the node is inserted at the leftmost position of the parent's children list. + +4-2. Otherwise, the new node is inserted to the right of the left sibling node. + +### How to Guarantee Eventual Consistency + +**Coverage** + + + +Using conditions such as range type, node type, and edit type, 27 possible cases of concurrent editing can be represented. + + + +Eventual consistency is guaranteed for these [27 cases](https://github.com/yorkie-team/yorkie/blob/fd3b15c7d2c482464b6c8470339bcc497204114e/test/integration/tree_test.go#L736-L2094). In addition, eventual consistency is ensured for the following edge cases: + +- Selecting multiple nodes in a multi-level range +- Selecting only a part of nodes (e.g., selecting only the opening tag or closing tag of the node) + +**How does it work?** + +- `lastCreatedAtMapByActor` + +https://github.com/yorkie-team/yorkie/blob/81137b32d0d1d3d36be5b63652e5ab0273f536de/pkg/document/operations/tree_edit.go#L36-L38 + +`latestCreatedAtMapByActor` is a map that stores the latest creation time by actor for the nodes included in the editing range. However, relying solely on the typical `lamport` clocks that represent local clock of clients, it's not possible to determine if two events are causally related or concurrent. For instance: + + + +In the case of the example above, during the process of synchronizing operations between clients A and B, client A is unaware of the existence of '`c`' when client B performs `Edit(0,2)`. As a result, an issue arises where the element '`c`', which is within the contained range, gets deleted together. + +To address this, the `lastCreatedAtMapByActor` is utilized during operation execution to store final timestamp information for each actor. Subsequently, this information allows us to ascertain the causal relationship between the two events. + +- Restricted to only `insertAfter` + +https://github.com/yorkie-team/yorkie/blob/422901861aedbd3a86fdcb9cf3b5740d6daf38eb/pkg/index/tree.go#L552-L570 + +To ensure consistency in concurrent editing scenarios, only the `insertAfter` operation is allowed, rather than `insertBefore`, similar to conventional CRDT algorithms. To achieve this, `CRDTTree.TreePos` takes a form that includes `LeftSiblingID`, thus always maintaining a reference to the left sibling node. + +If the left sibling node is the same as the parent node, it indicates that the node is positioned at the far left of the parent's children list. + +- `FindOffset` + +https://github.com/yorkie-team/yorkie/blob/422901861aedbd3a86fdcb9cf3b5740d6daf38eb/pkg/index/tree.go#L393-L412 + +During the traversal of the given range in `traverseInPosRange` (STEP3), the process of converting the provided `CRDTTree.TreePos` to an `IndexTree.TreePos` is executed. To determine the `offset` for this conversion, the `FindOffset` function is utilized. In doing so, calculating the `offset` excluding the removed nodes prevents potential issues that can arise in concurrent editing scenarios. + +### Risks and Mitigation + +- In the current conflict resolution policy of Yorkie, when both insert and delete operations occur simultaneously, even if the insert range is included in the delete range, the inserted node remains after synchronization. This might not always reflect the user's intention accurately. + +- The `Tree.Edit` logic uses index-based traversal instead of node-based traversal for a clearer implementation. This might lead to a performance impact. If this becomes a concern, switching to node-based traversal can be considered. diff --git a/pkg/document/crdt/array.go b/pkg/document/crdt/array.go index 9faf44419..458c42027 100644 --- a/pkg/document/crdt/array.go +++ b/pkg/document/crdt/array.go @@ -43,9 +43,8 @@ func (a *Array) Purge(elem Element) error { } // Add adds the given element at the last. -func (a *Array) Add(elem Element) *Array { - a.elements.Add(elem) - return a +func (a *Array) Add(elem Element) error { + return a.elements.Add(elem) } // Get returns the element of the given index. @@ -59,7 +58,7 @@ func (a *Array) Get(idx int) (Element, error) { // FindPrevCreatedAt returns the creation time of the previous element of the // given element. -func (a *Array) FindPrevCreatedAt(createdAt *time.Ticket) *time.Ticket { +func (a *Array) FindPrevCreatedAt(createdAt *time.Ticket) (*time.Ticket, error) { return a.elements.FindPrevCreatedAt(createdAt) } @@ -74,8 +73,8 @@ func (a *Array) Delete(idx int, deletedAt *time.Ticket) (Element, error) { // MoveAfter moves the given `createdAt` element after the `prevCreatedAt` // element. -func (a *Array) MoveAfter(prevCreatedAt, createdAt, executedAt *time.Ticket) { - a.elements.MoveAfter(prevCreatedAt, createdAt, executedAt) +func (a *Array) MoveAfter(prevCreatedAt, createdAt, executedAt *time.Ticket) error { + return a.elements.MoveAfter(prevCreatedAt, createdAt, executedAt) } // Elements returns an array of elements contained in this RGATreeList. @@ -111,7 +110,9 @@ func (a *Array) DeepCopy() (Element, error) { if err != nil { return nil, err } - elements.Add(copiedNode) + if err = elements.Add(copiedNode); err != nil { + return nil, err + } } array := NewArray(elements, a.createdAt) @@ -160,13 +161,17 @@ func (a *Array) LastCreatedAt() *time.Ticket { } // InsertAfter inserts the given element after the given previous element. -func (a *Array) InsertAfter(prevCreatedAt *time.Ticket, element Element) { - a.elements.InsertAfter(prevCreatedAt, element) +func (a *Array) InsertAfter(prevCreatedAt *time.Ticket, element Element) error { + return a.elements.InsertAfter(prevCreatedAt, element) } // DeleteByCreatedAt deletes the given element. -func (a *Array) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) Element { - return a.elements.DeleteByCreatedAt(createdAt, deletedAt).elem +func (a *Array) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) (Element, error) { + node, err := a.elements.DeleteByCreatedAt(createdAt, deletedAt) + if err != nil { + return nil, err + } + return node.elem, nil } // Len returns length of this Array. diff --git a/pkg/document/crdt/array_test.go b/pkg/document/crdt/array_test.go index 805708e08..920422cda 100644 --- a/pkg/document/crdt/array_test.go +++ b/pkg/document/crdt/array_test.go @@ -32,11 +32,14 @@ func TestArray(t *testing.T) { a := crdt.NewArray(crdt.NewRGATreeList(), ctx.IssueTimeTicket()) - a.Add(crdt.NewPrimitive("1", ctx.IssueTimeTicket())) + err := a.Add(crdt.NewPrimitive("1", ctx.IssueTimeTicket())) + assert.NoError(t, err) assert.Equal(t, `["1"]`, a.Marshal()) - a.Add(crdt.NewPrimitive("2", ctx.IssueTimeTicket())) + err = a.Add(crdt.NewPrimitive("2", ctx.IssueTimeTicket())) + assert.NoError(t, err) assert.Equal(t, `["1","2"]`, a.Marshal()) - a.Add(crdt.NewPrimitive("3", ctx.IssueTimeTicket())) + err = a.Add(crdt.NewPrimitive("3", ctx.IssueTimeTicket())) + assert.NoError(t, err) assert.Equal(t, `["1","2","3"]`, a.Marshal()) }) } diff --git a/pkg/document/crdt/counter.go b/pkg/document/crdt/counter.go index 0bf7c5159..f43dac3fa 100644 --- a/pkg/document/crdt/counter.go +++ b/pkg/document/crdt/counter.go @@ -18,11 +18,15 @@ package crdt import ( "encoding/binary" + "errors" "fmt" "github.com/yorkie-team/yorkie/pkg/document/time" ) +// ErrUnsupportedType is returned when the given type is not supported. +var ErrUnsupportedType = errors.New("unsupported type") + // CounterType represents any type that can be used as a counter. type CounterType int @@ -33,16 +37,16 @@ const ( ) // CounterValueFromBytes parses the given bytes into value. -func CounterValueFromBytes(counterType CounterType, value []byte) interface{} { +func CounterValueFromBytes(counterType CounterType, value []byte) (interface{}, error) { switch counterType { case IntegerCnt: val := int32(binary.LittleEndian.Uint32(value)) - return int(val) + return int(val), nil case LongCnt: - return int64(binary.LittleEndian.Uint64(value)) + return int64(binary.LittleEndian.Uint64(value)), nil + default: + return nil, ErrUnsupportedType } - - panic("unsupported type") } // Counter represents changeable number data type. @@ -55,39 +59,47 @@ type Counter struct { } // NewCounter creates a new instance of Counter. -func NewCounter(valueType CounterType, value interface{}, createdAt *time.Ticket) *Counter { +func NewCounter(valueType CounterType, value interface{}, createdAt *time.Ticket) (*Counter, error) { switch valueType { case IntegerCnt: + intValue, err := castToInt(value) + if err != nil { + return nil, err + } return &Counter{ valueType: IntegerCnt, - value: castToInt(value), + value: intValue, createdAt: createdAt, - } + }, nil case LongCnt: + longValue, err := castToLong(value) + if err != nil { + return nil, err + } return &Counter{ valueType: LongCnt, - value: castToLong(value), + value: longValue, createdAt: createdAt, - } + }, nil + default: + return nil, ErrUnsupportedType } - - panic("unsupported type") } // Bytes creates an array representing the value. -func (p *Counter) Bytes() []byte { +func (p *Counter) Bytes() ([]byte, error) { switch val := p.value.(type) { case int32: bytes := [4]byte{} binary.LittleEndian.PutUint32(bytes[:], uint32(val)) - return bytes[:] + return bytes[:], nil case int64: bytes := [8]byte{} binary.LittleEndian.PutUint64(bytes[:], uint64(val)) - return bytes[:] + return bytes[:], nil + default: + return nil, ErrUnsupportedType } - - panic("unsupported type") } // Marshal returns the JSON encoding of the value. @@ -146,20 +158,28 @@ func (p *Counter) ValueType() CounterType { // than MinInt32, Counter's value type can be changed Integer to Long. // Because in golang, int can be either int32 or int64. // So we need to assert int to int32. -func (p *Counter) Increase(v *Primitive) *Counter { +func (p *Counter) Increase(v *Primitive) (*Counter, error) { if !p.IsNumericType() || !v.IsNumericType() { - panic("unsupported type") + return nil, ErrUnsupportedType } switch p.valueType { case IntegerCnt: - p.value = p.value.(int32) + castToInt(v.value) + intValue, err := castToInt(v.value) + if err != nil { + return nil, err + } + p.value = p.value.(int32) + intValue case LongCnt: - p.value = p.value.(int64) + castToLong(v.value) + longValue, err := castToLong(v.value) + if err != nil { + return nil, err + } + p.value = p.value.(int64) + longValue default: - panic("unsupported type") + return nil, ErrUnsupportedType } - return p + return p, nil } // IsNumericType checks for numeric types. @@ -169,37 +189,37 @@ func (p *Counter) IsNumericType() bool { } // castToInt casts numeric type to int32. -func castToInt(value interface{}) int32 { +func castToInt(value interface{}) (int32, error) { switch val := value.(type) { case int32: - return val + return val, nil case int64: - return int32(val) + return int32(val), nil case int: - return int32(val) + return int32(val), nil case float32: - return int32(val) + return int32(val), nil case float64: - return int32(val) + return int32(val), nil default: - panic("unsupported type") + return 0, ErrUnsupportedType } } // castToLong casts numeric type to int64. -func castToLong(value interface{}) int64 { +func castToLong(value interface{}) (int64, error) { switch val := value.(type) { case int64: - return val + return val, nil case int32: - return int64(val) + return int64(val), nil case int: - return int64(val) + return int64(val), nil case float32: - return int64(val) + return int64(val), nil case float64: - return int64(val) + return int64(val), nil default: - panic("unsupported type") + return 0, ErrUnsupportedType } } diff --git a/pkg/document/crdt/counter_test.go b/pkg/document/crdt/counter_test.go index de3345dfb..d83a2b066 100644 --- a/pkg/document/crdt/counter_test.go +++ b/pkg/document/crdt/counter_test.go @@ -30,81 +30,94 @@ import ( func TestCounter(t *testing.T) { t.Run("new counter test", func(t *testing.T) { - intCntWithInt32Value := crdt.NewCounter(crdt.IntegerCnt, int32(math.MaxInt32), time.InitialTicket) + intCntWithInt32Value, err := crdt.NewCounter(crdt.IntegerCnt, int32(math.MaxInt32), time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.IntegerCnt, intCntWithInt32Value.ValueType()) - intCntWithInt64Value := crdt.NewCounter(crdt.IntegerCnt, int64(math.MaxInt32+1), time.InitialTicket) + intCntWithInt64Value, err := crdt.NewCounter(crdt.IntegerCnt, int64(math.MaxInt32+1), time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.IntegerCnt, intCntWithInt64Value.ValueType()) - intCntWithIntValue := crdt.NewCounter(crdt.IntegerCnt, math.MaxInt32, time.InitialTicket) + intCntWithIntValue, err := crdt.NewCounter(crdt.IntegerCnt, math.MaxInt32, time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.IntegerCnt, intCntWithIntValue.ValueType()) - intCntWithDoubleValue := crdt.NewCounter(crdt.IntegerCnt, 0.5, time.InitialTicket) + intCntWithDoubleValue, err := crdt.NewCounter(crdt.IntegerCnt, 0.5, time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.IntegerCnt, intCntWithDoubleValue.ValueType()) - intCntWithUnsupportedValue := func() { crdt.NewCounter(crdt.IntegerCnt, "", time.InitialTicket) } - assert.Panics(t, intCntWithUnsupportedValue) + _, err = crdt.NewCounter(crdt.IntegerCnt, "", time.InitialTicket) + assert.ErrorIs(t, err, crdt.ErrUnsupportedType) - longCntWithInt32Value := crdt.NewCounter(crdt.LongCnt, int32(math.MaxInt32), time.InitialTicket) + longCntWithInt32Value, err := crdt.NewCounter(crdt.LongCnt, int32(math.MaxInt32), time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.LongCnt, longCntWithInt32Value.ValueType()) - longCntWithInt64Value := crdt.NewCounter(crdt.LongCnt, int64(math.MaxInt32+1), time.InitialTicket) + longCntWithInt64Value, err := crdt.NewCounter(crdt.LongCnt, int64(math.MaxInt32+1), time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.LongCnt, longCntWithInt64Value.ValueType()) - longCntWithIntValue := crdt.NewCounter(crdt.LongCnt, math.MaxInt32+1, time.InitialTicket) + longCntWithIntValue, err := crdt.NewCounter(crdt.LongCnt, math.MaxInt32+1, time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.LongCnt, longCntWithIntValue.ValueType()) - longCntWithDoubleValue := crdt.NewCounter(crdt.LongCnt, 0.5, time.InitialTicket) + longCntWithDoubleValue, err := crdt.NewCounter(crdt.LongCnt, 0.5, time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, crdt.LongCnt, longCntWithDoubleValue.ValueType()) - longCntWithUnsupportedValue := func() { crdt.NewCounter(crdt.LongCnt, "", time.InitialTicket) } - assert.Panics(t, longCntWithUnsupportedValue) + _, err = crdt.NewCounter(crdt.LongCnt, "", time.InitialTicket) + assert.ErrorIs(t, err, crdt.ErrUnsupportedType) }) t.Run("increase test", func(t *testing.T) { var x = 5 var y int64 = 10 var z = 3.14 - integer := crdt.NewCounter(crdt.IntegerCnt, x, time.InitialTicket) - long := crdt.NewCounter(crdt.LongCnt, y, time.InitialTicket) - double := crdt.NewCounter(crdt.IntegerCnt, z, time.InitialTicket) + integer, err := crdt.NewCounter(crdt.IntegerCnt, x, time.InitialTicket) + assert.NoError(t, err) + long, err := crdt.NewCounter(crdt.LongCnt, y, time.InitialTicket) + assert.NoError(t, err) + double, err := crdt.NewCounter(crdt.IntegerCnt, z, time.InitialTicket) + assert.NoError(t, err) integerOperand := crdt.NewPrimitive(x, time.InitialTicket) longOperand := crdt.NewPrimitive(y, time.InitialTicket) doubleOperand := crdt.NewPrimitive(z, time.InitialTicket) // normal process test - integer.Increase(integerOperand) - integer.Increase(longOperand) - integer.Increase(doubleOperand) + _, err = integer.Increase(integerOperand) + assert.NoError(t, err) + _, err = integer.Increase(longOperand) + assert.NoError(t, err) + _, err = integer.Increase(doubleOperand) + assert.NoError(t, err) assert.Equal(t, integer.Marshal(), "23") - long.Increase(integerOperand) - long.Increase(longOperand) - long.Increase(doubleOperand) + _, err = long.Increase(integerOperand) + assert.NoError(t, err) + _, err = long.Increase(longOperand) + assert.NoError(t, err) + _, err = long.Increase(doubleOperand) + assert.NoError(t, err) assert.Equal(t, long.Marshal(), "28") - double.Increase(integerOperand) - double.Increase(longOperand) - double.Increase(doubleOperand) + _, err = double.Increase(integerOperand) + assert.NoError(t, err) + _, err = double.Increase(longOperand) + assert.NoError(t, err) + _, err = double.Increase(doubleOperand) + assert.NoError(t, err) assert.Equal(t, double.Marshal(), "21") // error process test - // TODO: it should be modified to error check - // when 'Remove panic from server code (#50)' is completed. - unsupportedTypePanicTest := func() { - r := recover() - assert.NotNil(t, r) - assert.Equal(t, r, "unsupported type") + unsupportedTypeErrorTest := func(v interface{}) { + _, err = crdt.NewCounter(crdt.IntegerCnt, v, time.InitialTicket) + assert.ErrorIs(t, err, crdt.ErrUnsupportedType) } - unsupportedTest := func(v interface{}) { - defer unsupportedTypePanicTest() - crdt.NewCounter(crdt.IntegerCnt, v, time.InitialTicket) - } - unsupportedTest("str") - unsupportedTest(true) - unsupportedTest([]byte{2}) - unsupportedTest(gotime.Now()) + unsupportedTypeErrorTest("str") + unsupportedTypeErrorTest(true) + unsupportedTypeErrorTest([]byte{2}) + unsupportedTypeErrorTest(gotime.Now()) assert.Equal(t, integer.Marshal(), "23") assert.Equal(t, long.Marshal(), "28") @@ -112,11 +125,13 @@ func TestCounter(t *testing.T) { }) t.Run("Counter value overflow test", func(t *testing.T) { - integer := crdt.NewCounter(crdt.IntegerCnt, math.MaxInt32, time.InitialTicket) + integer, err := crdt.NewCounter(crdt.IntegerCnt, math.MaxInt32, time.InitialTicket) + assert.NoError(t, err) assert.Equal(t, integer.ValueType(), crdt.IntegerCnt) operand := crdt.NewPrimitive(1, time.InitialTicket) - integer.Increase(operand) + _, err = integer.Increase(operand) + assert.NoError(t, err) assert.Equal(t, integer.ValueType(), crdt.IntegerCnt) assert.Equal(t, integer.Marshal(), strconv.FormatInt(math.MinInt32, 10)) }) diff --git a/pkg/document/crdt/element.go b/pkg/document/crdt/element.go index 2702d2de2..5330ced41 100644 --- a/pkg/document/crdt/element.go +++ b/pkg/document/crdt/element.go @@ -36,7 +36,7 @@ type Container interface { Descendants(callback func(elem Element, parent Container) bool) // DeleteByCreatedAt removes the given element from this container. - DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) Element + DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) (Element, error) } // GCElement represents Element which has GC. diff --git a/pkg/document/crdt/element_rht.go b/pkg/document/crdt/element_rht.go index 5d90699e9..8186bc3f0 100644 --- a/pkg/document/crdt/element_rht.go +++ b/pkg/document/crdt/element_rht.go @@ -129,17 +129,17 @@ func (rht *ElementRHT) Delete(k string, deletedAt *time.Ticket) Element { } // DeleteByCreatedAt deletes the Element of the given creation time. -func (rht *ElementRHT) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) Element { +func (rht *ElementRHT) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) (Element, error) { node, ok := rht.nodeMapByCreatedAt[createdAt.Key()] if !ok { - return nil + return nil, fmt.Errorf("DeleteByCreatedAt %s: %w", createdAt.Key(), ErrChildNotFound) } if !node.Remove(deletedAt) { - return nil + return nil, nil } - return node.elem + return node.elem, nil } // Elements returns a map of elements because the map easy to use for loop. diff --git a/pkg/document/crdt/object.go b/pkg/document/crdt/object.go index c27d43d32..abcba45e4 100644 --- a/pkg/document/crdt/object.go +++ b/pkg/document/crdt/object.go @@ -63,7 +63,7 @@ func (o *Object) Has(k string) bool { } // DeleteByCreatedAt deletes the element of the given creation time. -func (o *Object) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) Element { +func (o *Object) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) (Element, error) { return o.memberNodes.DeleteByCreatedAt(createdAt, deletedAt) } diff --git a/pkg/document/crdt/primitive.go b/pkg/document/crdt/primitive.go index 0308db5c9..c109007aa 100644 --- a/pkg/document/crdt/primitive.go +++ b/pkg/document/crdt/primitive.go @@ -52,7 +52,7 @@ func ValueFromBytes(valueType ValueType, value []byte) interface{} { return false case Integer: val := int32(binary.LittleEndian.Uint32(value)) - return int32(val) + return val case Long: return int64(binary.LittleEndian.Uint64(value)) case Double: diff --git a/pkg/document/crdt/rga_tree_list.go b/pkg/document/crdt/rga_tree_list.go index 0273b4c6f..26d132857 100644 --- a/pkg/document/crdt/rga_tree_list.go +++ b/pkg/document/crdt/rga_tree_list.go @@ -149,8 +149,8 @@ func (a *RGATreeList) Marshal() string { } // Add adds the given element at the last. -func (a *RGATreeList) Add(elem Element) { - a.insertAfter(a.last.CreatedAt(), elem, elem.CreatedAt()) +func (a *RGATreeList) Add(elem Element) error { + return a.insertAfter(a.last.CreatedAt(), elem, elem.CreatedAt()) } // Nodes returns an array of elements contained in this RGATreeList. @@ -175,8 +175,8 @@ func (a *RGATreeList) LastCreatedAt() *time.Ticket { } // InsertAfter inserts the given element after the given previous element. -func (a *RGATreeList) InsertAfter(prevCreatedAt *time.Ticket, elem Element) { - a.insertAfter(prevCreatedAt, elem, elem.CreatedAt()) +func (a *RGATreeList) InsertAfter(prevCreatedAt *time.Ticket, elem Element) error { + return a.insertAfter(prevCreatedAt, elem, elem.CreatedAt()) } // Get returns the element of the given index. @@ -207,17 +207,17 @@ func (a *RGATreeList) Get(idx int) (*RGATreeListNode, error) { } // DeleteByCreatedAt deletes the given element. -func (a *RGATreeList) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) *RGATreeListNode { +func (a *RGATreeList) DeleteByCreatedAt(createdAt *time.Ticket, deletedAt *time.Ticket) (*RGATreeListNode, error) { node, ok := a.nodeMapByCreatedAt[createdAt.Key()] if !ok { - panic("fail to find the given createdAt: " + createdAt.Key()) + return nil, fmt.Errorf("DeleteByCreatedAt %s: %w", createdAt.Key(), ErrChildNotFound) } alreadyRemoved := node.isRemoved() if node.elem.Remove(deletedAt) && !alreadyRemoved { a.nodeMapByIndex.Splay(node.indexNode) } - return node + return node, nil } // Len returns length of this RGATreeList. @@ -237,35 +237,38 @@ func (a *RGATreeList) Delete(idx int, deletedAt *time.Ticket) (*RGATreeListNode, if err != nil { return nil, err } - return a.DeleteByCreatedAt(target.CreatedAt(), deletedAt), nil + return a.DeleteByCreatedAt(target.CreatedAt(), deletedAt) } // MoveAfter moves the given `createdAt` element after the `prevCreatedAt` // element. -func (a *RGATreeList) MoveAfter(prevCreatedAt, createdAt, executedAt *time.Ticket) { +func (a *RGATreeList) MoveAfter(prevCreatedAt, createdAt, executedAt *time.Ticket) error { prevNode, ok := a.nodeMapByCreatedAt[prevCreatedAt.Key()] if !ok { - panic("fail to find the given prevCreatedAt: " + prevCreatedAt.Key()) + return fmt.Errorf("MoveAfter %s: %w", prevCreatedAt.Key(), ErrChildNotFound) } node, ok := a.nodeMapByCreatedAt[createdAt.Key()] if !ok { - panic("fail to find the given createdAt: " + createdAt.Key()) + return fmt.Errorf("MoveAfter %s: %w", createdAt.Key(), ErrChildNotFound) } if node.elem.MovedAt() == nil || executedAt.After(node.elem.MovedAt()) { a.release(node) - a.insertAfter(prevNode.CreatedAt(), node.elem, executedAt) + if err := a.insertAfter(prevNode.CreatedAt(), node.elem, executedAt); err != nil { + return err + } node.elem.SetMovedAt(executedAt) } + return nil } // FindPrevCreatedAt returns the creation time of the previous element of the // given element. -func (a *RGATreeList) FindPrevCreatedAt(createdAt *time.Ticket) *time.Ticket { +func (a *RGATreeList) FindPrevCreatedAt(createdAt *time.Ticket) (*time.Ticket, error) { node, ok := a.nodeMapByCreatedAt[createdAt.Key()] if !ok { - panic("fail to find the given prevCreatedAt: " + createdAt.Key()) + return nil, fmt.Errorf("FindPrevCreatedAt %s: %w", createdAt.Key(), ErrChildNotFound) } for { @@ -275,7 +278,7 @@ func (a *RGATreeList) FindPrevCreatedAt(createdAt *time.Ticket) *time.Ticket { } } - return node.CreatedAt() + return node.CreatedAt(), nil } // purge physically purge child element. @@ -293,17 +296,17 @@ func (a *RGATreeList) purge(elem Element) error { func (a *RGATreeList) findNextBeforeExecutedAt( createdAt *time.Ticket, executedAt *time.Ticket, -) *RGATreeListNode { +) (*RGATreeListNode, error) { node, ok := a.nodeMapByCreatedAt[createdAt.Key()] if !ok { - panic("fail to find the given createdAt: " + createdAt.Key()) + return nil, fmt.Errorf("findNextBeforeExecutedAt %s: %w", createdAt.Key(), ErrChildNotFound) } for node.next != nil && node.next.PositionedAt().After(executedAt) { node = node.next } - return node + return node, nil } func (a *RGATreeList) release(node *RGATreeListNode) { @@ -325,8 +328,12 @@ func (a *RGATreeList) insertAfter( prevCreatedAt *time.Ticket, value Element, executedAt *time.Ticket, -) { - prevNode := a.findNextBeforeExecutedAt(prevCreatedAt, executedAt) +) error { + prevNode, err := a.findNextBeforeExecutedAt(prevCreatedAt, executedAt) + if err != nil { + return err + } + newNode := newRGATreeListNodeAfter(prevNode, value) if prevNode == a.last { a.last = newNode @@ -334,4 +341,5 @@ func (a *RGATreeList) insertAfter( a.nodeMapByIndex.InsertAfter(prevNode.indexNode, newNode.indexNode) a.nodeMapByCreatedAt[value.CreatedAt().Key()] = newNode + return nil } diff --git a/pkg/document/crdt/rga_tree_list_test.go b/pkg/document/crdt/rga_tree_list_test.go new file mode 100644 index 000000000..8f669f84a --- /dev/null +++ b/pkg/document/crdt/rga_tree_list_test.go @@ -0,0 +1,72 @@ +package crdt_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/yorkie-team/yorkie/pkg/document/crdt" + "github.com/yorkie-team/yorkie/test/helper" +) + +func TestRGATreeList(t *testing.T) { + t.Run("rga_tree_list operations test", func(t *testing.T) { + root := helper.TestRoot() + ctx := helper.TextChangeContext(root) + + elements := crdt.NewRGATreeList() + + var err error + for _, v := range []string{"1", "2", "3"} { + err = elements.Add(crdt.NewPrimitive(v, ctx.IssueTimeTicket())) + assert.NoError(t, err) + } + assert.Equal(t, `["1","2","3"]`, elements.Marshal()) + + nodes := elements.Nodes() + assert.Equal(t, len(nodes), 3) + + targetElement, err := elements.Get(1) + assert.NoError(t, err) + assert.Equal(t, `"2"`, targetElement.Element().Marshal()) + + prevCreatedAt, err := elements.FindPrevCreatedAt(targetElement.CreatedAt()) + assert.NoError(t, err) + assert.Equal(t, prevCreatedAt.Compare(targetElement.CreatedAt()), -1) + + err = elements.MoveAfter(targetElement.CreatedAt(), prevCreatedAt, ctx.IssueTimeTicket()) + assert.NoError(t, err) + assert.Equal(t, `["2","1","3"]`, elements.Marshal()) + + _, err = elements.DeleteByCreatedAt(targetElement.CreatedAt(), ctx.IssueTimeTicket()) + assert.NoError(t, err) + assert.Equal(t, `["1","3"]`, elements.Marshal()) + + _, err = elements.Delete(1, ctx.IssueTimeTicket()) + assert.NoError(t, err) + assert.Equal(t, `["1"]`, elements.Marshal()) + + }) + + t.Run("invalid createdAt test", func(t *testing.T) { + root := helper.TestRoot() + ctx := helper.TextChangeContext(root) + + validCreatedAt, invalidCreatedAt := ctx.IssueTimeTicket(), ctx.IssueTimeTicket() + elements := crdt.NewRGATreeList() + err := elements.Add(crdt.NewPrimitive("1", validCreatedAt)) + assert.NoError(t, err) + + _, err = elements.DeleteByCreatedAt(invalidCreatedAt, ctx.IssueTimeTicket()) + assert.ErrorIs(t, err, crdt.ErrChildNotFound) + + err = elements.MoveAfter(validCreatedAt, invalidCreatedAt, ctx.IssueTimeTicket()) + assert.ErrorIs(t, err, crdt.ErrChildNotFound) + + err = elements.MoveAfter(invalidCreatedAt, validCreatedAt, ctx.IssueTimeTicket()) + assert.ErrorIs(t, err, crdt.ErrChildNotFound) + + _, err = elements.FindPrevCreatedAt(invalidCreatedAt) + assert.ErrorIs(t, err, crdt.ErrChildNotFound) + }) +} diff --git a/pkg/document/crdt/rga_tree_split.go b/pkg/document/crdt/rga_tree_split.go index 640e0a33c..cabb68b0d 100644 --- a/pkg/document/crdt/rga_tree_split.go +++ b/pkg/document/crdt/rga_tree_split.go @@ -144,21 +144,6 @@ func (pos *RGATreeSplitNodePos) Equal(other *RGATreeSplitNodePos) bool { return pos.relativeOffset == other.relativeOffset } -// Selection represents the selection of text range in the editor. -type Selection struct { - from *RGATreeSplitNodePos - to *RGATreeSplitNodePos - updatedAt *time.Ticket -} - -func newSelection(from, to *RGATreeSplitNodePos, updatedAt *time.Ticket) *Selection { - return &Selection{ - from, - to, - updatedAt, - } -} - // RGATreeSplitNode is a node of RGATreeSplit. type RGATreeSplitNode[V RGATreeSplitValue] struct { id *RGATreeSplitNodeID diff --git a/pkg/document/crdt/root_test.go b/pkg/document/crdt/root_test.go index 64e5c77f4..df8f69a6e 100644 --- a/pkg/document/crdt/root_test.go +++ b/pkg/document/crdt/root_test.go @@ -36,15 +36,18 @@ func TestRoot(t *testing.T) { t.Run("garbage collection for array test", func(t *testing.T) { root := helper.TestRoot() ctx := helper.TextChangeContext(root) - array := crdt.NewArray(crdt.NewRGATreeList(), ctx.IssueTimeTicket()) - array.Add(crdt.NewPrimitive(0, ctx.IssueTimeTicket())) - array.Add(crdt.NewPrimitive(1, ctx.IssueTimeTicket())) - array.Add(crdt.NewPrimitive(2, ctx.IssueTimeTicket())) + array := crdt.NewArray(crdt.NewRGATreeList(), ctx.IssueTimeTicket()) + var err error + for _, v := range []int{0, 1, 2} { + err = array.Add(crdt.NewPrimitive(v, ctx.IssueTimeTicket())) + assert.NoError(t, err) + } assert.Equal(t, "[0,1,2]", array.Marshal()) targetElement, _ := array.Get(1) - array.DeleteByCreatedAt(targetElement.CreatedAt(), ctx.IssueTimeTicket()) + _, err = array.DeleteByCreatedAt(targetElement.CreatedAt(), ctx.IssueTimeTicket()) + assert.NoError(t, err) root.RegisterRemovedElementPair(array, targetElement) assert.Equal(t, "[0,2]", array.Marshal()) assert.Equal(t, 1, root.GarbageLen()) @@ -182,10 +185,14 @@ func TestRoot(t *testing.T) { obj := root.Object() obj.Set("1", crdt.NewPrimitive(1, ctx.IssueTimeTicket())) - arr := crdt.NewArray(crdt.NewRGATreeList(), ctx.IssueTimeTicket()). - Add(crdt.NewPrimitive(1, ctx.IssueTimeTicket())). - Add(crdt.NewPrimitive(2, ctx.IssueTimeTicket())). - Add(crdt.NewPrimitive(3, ctx.IssueTimeTicket())) + + arr := crdt.NewArray(crdt.NewRGATreeList(), ctx.IssueTimeTicket()) + var err error + for _, v := range []int{1, 2, 3} { + err = arr.Add(crdt.NewPrimitive(v, ctx.IssueTimeTicket())) + assert.NoError(t, err) + } + obj.Set("2", arr) obj.Set("3", crdt.NewPrimitive(3, ctx.IssueTimeTicket())) assert.Equal(t, `{"1":1,"2":[1,2,3],"3":3}`, root.Object().Marshal()) diff --git a/pkg/document/crdt/text.go b/pkg/document/crdt/text.go index 45caab38a..148f3d959 100644 --- a/pkg/document/crdt/text.go +++ b/pkg/document/crdt/text.go @@ -116,7 +116,6 @@ func InitialTextNode() *RGATreeSplitNode[*TextValue] { // Text is an extended data type for the contents of a text editor. type Text struct { rgaTreeSplit *RGATreeSplit[*TextValue] - selectionMap map[string]*Selection createdAt *time.Ticket movedAt *time.Ticket removedAt *time.Ticket @@ -126,7 +125,6 @@ type Text struct { func NewText(elements *RGATreeSplit[*TextValue], createdAt *time.Ticket) *Text { return &Text{ rgaTreeSplit: elements, - selectionMap: make(map[string]*Selection), createdAt: createdAt, } } @@ -276,17 +274,6 @@ func (t *Text) Style( return nil } -// Select stores that the given range has been selected. -func (t *Text) Select( - from *RGATreeSplitNodePos, - to *RGATreeSplitNodePos, - executedAt *time.Ticket, -) { - if prev, ok := t.selectionMap[executedAt.ActorIDHex()]; !ok || executedAt.After(prev.updatedAt) { - t.selectionMap[executedAt.ActorIDHex()] = newSelection(from, to, executedAt) - } -} - // Nodes returns the internal nodes of this Text. func (t *Text) Nodes() []*RGATreeSplitNode[*TextValue] { return t.rgaTreeSplit.nodes() diff --git a/pkg/document/crdt/tree.go b/pkg/document/crdt/tree.go index 0e0894676..ddb90fe3f 100644 --- a/pkg/document/crdt/tree.go +++ b/pkg/document/crdt/tree.go @@ -33,19 +33,6 @@ var ( ErrNodeNotFound = errors.New("node not found") ) -var ( - // DummyTreePos is a dummy position of Tree. It is used to represent the head node of RGASplit. - DummyTreePos = &TreePos{ - CreatedAt: time.InitialTicket, - Offset: 0, - } -) - -const ( - // DummyHeadType is a type of dummy head. It is used to represent the head node of RGASplit. - DummyHeadType = "dummy" -) - // TreeNodeForTest is a TreeNode for test. type TreeNodeForTest struct { Type string @@ -59,65 +46,105 @@ type TreeNodeForTest struct { type TreeNode struct { IndexTreeNode *index.Node[*TreeNode] - Pos *TreePos + ID *TreeNodeID RemovedAt *time.Ticket - Next *TreeNode - Prev *TreeNode - InsPrev *TreeNode + InsPrevID *TreeNodeID + InsNextID *TreeNodeID + // Value is optional. If the value is not empty, it means that the node is a + // text node. Value string + + // Attrs is optional. If the value is not empty, it means that the node is a + // element node. Attrs *RHT } -// TreePos represents the position of Tree. +// TreePos represents a position in the tree. It is used to determine the +// position of insertion, deletion, and style change. type TreePos struct { - CreatedAt *time.Ticket - Offset int + // ParentID is the ID of the parent node. + ParentID *TreeNodeID + + // LeftSiblingID is the ID of the left sibling node. If the node is the + // parent, it means that the position is leftmost. + LeftSiblingID *TreeNodeID } // NewTreePos creates a new instance of TreePos. -func NewTreePos(createdAt *time.Ticket, offset int) *TreePos { +func NewTreePos(parentID *TreeNodeID, leftSiblingID *TreeNodeID) *TreePos { return &TreePos{ - CreatedAt: createdAt, - Offset: offset, + ParentID: parentID, + LeftSiblingID: leftSiblingID, } } -// Compare compares the given two CRDTTreePos. -func (t *TreePos) Compare(other llrb.Key) int { - compare := t.CreatedAt.Compare(other.(*TreePos).CreatedAt) - if compare != 0 { - return compare - } +// Equals compares the given two CRDTTreePos. +func (t *TreePos) Equals(other *TreePos) bool { + return t.ParentID.CreatedAt.Compare(other.ParentID.CreatedAt) == 0 && + t.ParentID.Offset == other.ParentID.Offset && + t.LeftSiblingID.CreatedAt.Compare(other.LeftSiblingID.CreatedAt) == 0 && + t.LeftSiblingID.Offset == other.LeftSiblingID.Offset +} - if t.Offset > other.(*TreePos).Offset { - return 1 - } else if t.Offset < other.(*TreePos).Offset { - return -1 +// TreeNodeID represent an ID of a node in the tree. It is used to +// identify a node in the tree. It is composed of the creation time of the node +// and the offset from the beginning of the node if the node is split. +// +// Some replicas may have nodes that are not split yet. In this case, we can +// use `map.floorEntry()` to find the adjacent node. +type TreeNodeID struct { + CreatedAt *time.Ticket + Offset int +} + +// NewTreeNodeID creates a new instance of TreeNodeID. +func NewTreeNodeID(createdAt *time.Ticket, offset int) *TreeNodeID { + return &TreeNodeID{ + CreatedAt: createdAt, + Offset: offset, } - return 0 } // NewTreeNode creates a new instance of TreeNode. -func NewTreeNode(pos *TreePos, nodeType string, attributes *RHT, value ...string) *TreeNode { - node := &TreeNode{ - Pos: pos, - } +func NewTreeNode(id *TreeNodeID, nodeType string, attributes *RHT, value ...string) *TreeNode { + node := &TreeNode{ID: id} + // NOTE(hackerwins): The value of TreeNode is optional. If the value is + // empty, it means that the node is an element node. if len(value) > 0 { node.Value = value[0] } node.Attrs = attributes - node.IndexTreeNode = index.NewNode(nodeType, node) return node } +// toIDString returns a string that can be used as an ID for this TreeNodeID. +func (t *TreeNodeID) toIDString() string { + return t.CreatedAt.StructureAsString() + ":" + strconv.Itoa(t.Offset) +} + +// Compare compares the given two CRDTTreePos. +func (t *TreeNodeID) Compare(other llrb.Key) int { + compare := t.CreatedAt.Compare(other.(*TreeNodeID).CreatedAt) + if compare != 0 { + return compare + } + + if t.Offset > other.(*TreeNodeID).Offset { + return 1 + } else if t.Offset < other.(*TreeNodeID).Offset { + return -1 + } + return 0 +} + // Type returns the type of the Node. func (n *TreeNode) Type() string { - return string(n.IndexTreeNode.Type) + return n.IndexTreeNode.Type } // Len returns the length of the Node. @@ -186,16 +213,16 @@ func (n *TreeNode) Child(offset int) (*TreeNode, error) { } // Split splits the node at the given offset. -func (n *TreeNode) Split(offset int) (*TreeNode, error) { +func (n *TreeNode) Split(offset, absOffset int) (*TreeNode, error) { if n.IsText() { - return n.SplitText(offset) + return n.SplitText(offset, absOffset) } - return nil, nil + return n.SplitElement(offset) } // SplitText splits the text node at the given offset. -func (n *TreeNode) SplitText(offset int) (*TreeNode, error) { +func (n *TreeNode) SplitText(offset, absOffset int) (*TreeNode, error) { if offset == 0 || offset == n.Len() { return nil, nil } @@ -204,13 +231,19 @@ func (n *TreeNode) SplitText(offset int) (*TreeNode, error) { leftRune := utf16.Decode(encoded[0:offset]) rightRune := utf16.Decode(encoded[offset:]) + if len(rightRune) == 0 { + return nil, nil + } + n.Value = string(leftRune) n.IndexTreeNode.Length = len(leftRune) - rightNode := NewTreeNode(&TreePos{ - CreatedAt: n.Pos.CreatedAt, - Offset: offset, + rightNode := NewTreeNode(&TreeNodeID{ + CreatedAt: n.ID.CreatedAt, + Offset: offset + absOffset, }, n.Type(), nil, string(rightRune)) + rightNode.RemovedAt = n.RemovedAt + if err := n.IndexTreeNode.Parent.InsertAfterInternal( rightNode.IndexTreeNode, n.IndexTreeNode, @@ -221,16 +254,56 @@ func (n *TreeNode) SplitText(offset int) (*TreeNode, error) { return rightNode, nil } +// SplitElement splits the given element at the given offset. +func (n *TreeNode) SplitElement(offset int) (*TreeNode, error) { + split := NewTreeNode(&TreeNodeID{ + CreatedAt: n.ID.CreatedAt, + Offset: offset, + }, n.Type(), nil) + split.RemovedAt = n.RemovedAt + + if err := n.IndexTreeNode.SetChildren(n.IndexTreeNode.Children(true)[0:offset]); err != nil { + return nil, err + } + + if err := split.IndexTreeNode.SetChildren(n.IndexTreeNode.Children(true)[offset:]); err != nil { + return nil, err + } + + nodeLength, splitLength := 0, 0 + for _, child := range n.IndexTreeNode.Children() { + nodeLength += child.Length + } + for _, child := range split.IndexTreeNode.Children() { + splitLength += child.Length + } + + n.IndexTreeNode.Length = nodeLength + split.IndexTreeNode.Length = splitLength + + return split, nil +} + // remove marks the node as removed. -func (n *TreeNode) remove(removedAt *time.Ticket) { +func (n *TreeNode) remove(removedAt *time.Ticket) bool { justRemoved := n.RemovedAt == nil if n.RemovedAt == nil || n.RemovedAt.Compare(removedAt) > 0 { n.RemovedAt = removedAt + if justRemoved { + n.IndexTreeNode.UpdateAncestorsSize() + } + return true } - if justRemoved { - n.IndexTreeNode.UpdateAncestorsSize() + return false +} + +func (n *TreeNode) canDelete(removedAt *time.Ticket, latestCreatedAt *time.Ticket) bool { + if !n.ID.CreatedAt.After(latestCreatedAt) && + (n.RemovedAt == nil || n.RemovedAt.Compare(removedAt) > 0) { + return true } + return false } // InsertAt inserts the given node at the given offset. @@ -242,9 +315,9 @@ func (n *TreeNode) InsertAt(newNode *TreeNode, offset int) error { func (n *TreeNode) DeepCopy() (*TreeNode, error) { var clone *TreeNode if n.Attrs != nil { - clone = NewTreeNode(n.Pos, n.Type(), n.Attrs.DeepCopy(), n.Value) + clone = NewTreeNode(n.ID, n.Type(), n.Attrs.DeepCopy(), n.Value) } else { - clone = NewTreeNode(n.Pos, n.Type(), nil, n.Value) + clone = NewTreeNode(n.ID, n.Type(), nil, n.Value) } clone.RemovedAt = n.RemovedAt @@ -271,9 +344,8 @@ func (n *TreeNode) DeepCopy() (*TreeNode, error) { // Tree represents the tree of CRDT. It has doubly linked list structure and // index tree structure. type Tree struct { - DummyHead *TreeNode IndexTree *index.Tree[*TreeNode] - NodeMapByPos *llrb.Tree[*TreePos, *TreeNode] + NodeMapByID *llrb.Tree[*TreeNodeID, *TreeNode] removedNodeMap map[string]*TreeNode createdAt *time.Ticket @@ -284,17 +356,14 @@ type Tree struct { // NewTree creates a new instance of Tree. func NewTree(root *TreeNode, createdAt *time.Ticket) *Tree { tree := &Tree{ - DummyHead: NewTreeNode(DummyTreePos, DummyHeadType, nil), IndexTree: index.NewTree[*TreeNode](root.IndexTreeNode), - NodeMapByPos: llrb.NewTree[*TreePos, *TreeNode](), + NodeMapByID: llrb.NewTree[*TreeNodeID, *TreeNode](), removedNodeMap: make(map[string]*TreeNode), createdAt: createdAt, } - previous := tree.DummyHead index.Traverse(tree.IndexTree, func(node *index.Node[*TreeNode], depth int) { - tree.InsertAfter(previous, node.Value) - previous = node.Value + tree.NodeMapByID.Put(node.Value.ID, node.Value) }) return tree @@ -325,30 +394,36 @@ func (t *Tree) purgeRemovedNodesBefore(ticket *time.Ticket) (int, error) { } for node := range nodesToBeRemoved { - if err := node.IndexTreeNode.Parent.RemoveChild(node.IndexTreeNode); err != nil { + if err := t.purgeNode(node); err != nil { return 0, err } - t.NodeMapByPos.Remove(node.Pos) - t.Purge(node) - delete(t.removedNodeMap, node.Pos.CreatedAt.StructureAsString()+":"+strconv.Itoa(node.Pos.Offset)) } return count, nil } -// Purge physically purges the given node. -func (t *Tree) Purge(node *TreeNode) { - if node.Prev != nil { - node.Prev.Next = node.Next +// purgeNode physically purges the given node. +func (t *Tree) purgeNode(node *TreeNode) error { + if err := node.IndexTreeNode.Parent.RemoveChild(node.IndexTreeNode); err != nil { + return err } + t.NodeMapByID.Remove(node.ID) - if node.Next != nil { - node.Next.Prev = node.Prev + insPrevID := node.InsPrevID + insNextID := node.InsNextID + if insPrevID != nil { + insPrev := t.findFloorNode(insPrevID) + insPrev.InsNextID = insNextID } + if insNextID != nil { + insNext := t.findFloorNode(insNextID) + insNext.InsPrevID = insPrevID + } + node.InsPrevID = nil + node.InsNextID = nil - node.Prev = nil - node.Next = nil - node.InsPrev = nil + delete(t.removedNodeMap, node.ID.toIDString()) + return nil } // marshal returns the JSON encoding of this Tree. @@ -420,20 +495,6 @@ func (t *Tree) Remove(removedAt *time.Ticket) bool { return false } -// InsertAfter inserts the given node after the given previous node. -func (t *Tree) InsertAfter(prevNode *TreeNode, newNode *TreeNode) { - next := prevNode.Next - prevNode.Next = newNode - newNode.Prev = prevNode - - if next != nil { - newNode.Next = next - next.Prev = newNode - } - - t.NodeMapByPos.Put(newNode.Pos, newNode) -} - // Nodes traverses the tree and returns the list of nodes. func (t *Tree) Nodes() []*TreeNode { var nodes []*TreeNode @@ -456,138 +517,177 @@ func (t *Tree) ToXML() string { // EditByIndex edits the given range with the given value. // This method uses indexes instead of a pair of TreePos for testing. -func (t *Tree) EditByIndex(start, end int, contents []*TreeNode, editedAt *time.Ticket) error { +func (t *Tree) EditByIndex(start, end int, + latestCreatedAtMapByActor map[string]*time.Ticket, + contents []*TreeNode, + editedAt *time.Ticket, +) (map[string]*time.Ticket, error) { fromPos, err := t.FindPos(start) if err != nil { - return err + return nil, err } toPos, err := t.FindPos(end) if err != nil { - return err + return nil, err } - return t.Edit(fromPos, toPos, contents, editedAt) + return t.Edit(fromPos, toPos, latestCreatedAtMapByActor, contents, editedAt) } // FindPos finds the position of the given index in the tree. +// (local) index -> (local) TreePos in indexTree -> (logical) TreePos in Tree func (t *Tree) FindPos(offset int) (*TreePos, error) { - treePos, err := t.IndexTree.FindTreePos(offset) + treePos, err := t.IndexTree.FindTreePos(offset) // local TreePos if err != nil { return nil, err } + node, offset := treePos.Node, treePos.Offset + var leftSibling *TreeNode + + if node.IsText() { + if node.Parent.Children(false)[0] == node && offset == 0 { + leftSibling = node.Parent.Value + } else { + leftSibling = node.Value + } + node = node.Parent + } else { + if offset == 0 { + leftSibling = node.Value + } else { + leftSibling = node.Children()[offset-1].Value + } + } + return &TreePos{ - CreatedAt: treePos.Node.Value.Pos.CreatedAt, - Offset: treePos.Node.Value.Pos.Offset + treePos.Offset, + ParentID: node.Value.ID, + LeftSiblingID: &TreeNodeID{ + CreatedAt: leftSibling.ID.CreatedAt, + Offset: leftSibling.ID.Offset + offset, + }, }, nil } // Edit edits the tree with the given range and content. // If the content is undefined, the range will be removed. -func (t *Tree) Edit(from, to *TreePos, contents []*TreeNode, editedAt *time.Ticket) error { +func (t *Tree) Edit(from, to *TreePos, + latestCreatedAtMapByActor map[string]*time.Ticket, + contents []*TreeNode, + editedAt *time.Ticket, +) (map[string]*time.Ticket, error) { // 01. split text nodes at the given range if needed. - toPos, toRight, err := t.findTreePosWithSplitText(to, editedAt) + fromParent, fromLeft, err := t.FindTreeNodesWithSplitText(from, editedAt) if err != nil { - return err + return nil, err } - fromPos, fromRight, err := t.findTreePosWithSplitText(from, editedAt) + toParent, toLeft, err := t.FindTreeNodesWithSplitText(to, editedAt) if err != nil { - return err + return nil, err } - toBeRemoveds := make([]*TreeNode, 0) - // 02. remove the nodes and update linked list and index tree. - if fromRight != toRight { - if err := t.nodesBetween(fromRight, toRight, func(node *TreeNode) { - if !node.IsRemoved() { - toBeRemoveds = append(toBeRemoveds, node) - } - }); err != nil { - return err - } + // 02. remove the nodes and update index tree. + createdAtMapByActor := make(map[string]*time.Ticket) + var toBeRemoved []*TreeNode - isRangeOnSameBranch := toPos.Node.IsAncestorOf(fromPos.Node) - for _, node := range toBeRemoveds { - node.remove(editedAt) - - if node.IsRemoved() { - t.removedNodeMap[node.Pos.CreatedAt.StructureAsString()+":"+strconv.Itoa(node.Pos.Offset)] = node + err = t.traverseInPosRange(fromParent.Value, fromLeft.Value, toParent.Value, toLeft.Value, + func(node *TreeNode, contain index.TagContained) { + // If node is a element node and half-contained in the range, + // it should not be removed. + if !node.IsText() && contain != index.AllContained { + return } - } - // move the alive children of the removed block node - if isRangeOnSameBranch { - var removedBlockNode *TreeNode - if fromPos.Node.Parent.Value.IsRemoved() { - removedBlockNode = fromPos.Node.Parent.Value - } else if !fromPos.Node.IsText() && fromPos.Node.Value.IsRemoved() { - removedBlockNode = fromPos.Node.Value - } + actorIDHex := node.ID.CreatedAt.ActorIDHex() - // If the nearest removed block node of the fromNode is found, - // insert the alive children of the removed block node to the toNode. - if removedBlockNode != nil { - blockNode := toPos.Node - offset, err := blockNode.FindBranchOffset(removedBlockNode.IndexTreeNode) - if err != nil { - return err - } - - for i := len(removedBlockNode.IndexTreeNode.Children()) - 1; i >= 0; i-- { - node := removedBlockNode.IndexTreeNode.Children()[i] - if err := blockNode.InsertAt(node, offset); err != nil { - return err - } + var latestCreatedAt *time.Ticket + if latestCreatedAtMapByActor == nil { + latestCreatedAt = time.MaxTicket + } else { + createdAt, ok := latestCreatedAtMapByActor[actorIDHex] + if ok { + latestCreatedAt = createdAt + } else { + latestCreatedAt = time.InitialTicket } } - } else { - if fromPos.Node.Parent != nil && fromPos.Node.Parent.Value.IsRemoved() { - if err := toPos.Node.Parent.Prepend(fromPos.Node.Parent.Children()...); err != nil { - return err + + if node.canDelete(editedAt, latestCreatedAt) { + latestCreatedAt = createdAtMapByActor[actorIDHex] + createdAt := node.ID.CreatedAt + if latestCreatedAt == nil || createdAt.After(latestCreatedAt) { + createdAtMapByActor[actorIDHex] = createdAt } + toBeRemoved = append(toBeRemoved, node) } + + }) + if err != nil { + return nil, err + } + + for _, node := range toBeRemoved { + if node.remove(editedAt) { + t.removedNodeMap[node.ID.toIDString()] = node } } // 03. insert the given node at the given position. if len(contents) != 0 { - - previous := fromRight.Prev - offset := fromPos.Offset - node := fromPos.Node + leftInChildren := fromLeft for _, content := range contents { - // 03-1. insert the content nodes to the list. - index.TraverseNode(content.IndexTreeNode, func(node *index.Node[*TreeNode], depth int) { - t.InsertAfter(previous, node.Value) - previous = node.Value - }) - - // 03-2. insert the content nodes to the tree. - if node.IsText() { - // if `contents` is consist of text nodes, then there'll be only one element in `contents` - // thus, there's no need to update fromPos - if fromPos.Offset == 0 { - if err := node.Parent.InsertBefore(content.IndexTreeNode, node); err != nil { - return err - } - } else { - if err := node.Parent.InsertAfter(content.IndexTreeNode, node); err != nil { - return err - } + // 03-1. insert the content nodes to the tree. + if leftInChildren == fromParent { + // 03-1-1. when there's no leftSibling, then insert content into very front of parent's children List + err := fromParent.InsertAt(content.IndexTreeNode, 0) + if err != nil { + return nil, err } } else { - target := node - if err := target.InsertAt(content.IndexTreeNode, offset+1); err != nil { - return err + // 03-1-2. insert after leftSibling + err := fromParent.InsertAfter(content.IndexTreeNode, leftInChildren) + if err != nil { + return nil, err } - - offset++ } + + leftInChildren = content.IndexTreeNode + index.TraverseNode(content.IndexTreeNode, func(node *index.Node[*TreeNode], depth int) { + // if insertion happens during concurrent editing and parent node has been removed, + // make new nodes as tombstone immediately + if fromParent.Value.IsRemoved() { + actorIDHex := node.Value.ID.CreatedAt.ActorIDHex() + if node.Value.remove(editedAt) { + latestCreatedAt := createdAtMapByActor[actorIDHex] + createdAt := node.Value.ID.CreatedAt + if latestCreatedAt == nil || createdAt.After(latestCreatedAt) { + createdAtMapByActor[actorIDHex] = createdAt + } + } + t.removedNodeMap[node.Value.ID.toIDString()] = node.Value + } + + t.NodeMapByID.Put(node.Value.ID, node.Value) + }) } } + return createdAtMapByActor, nil +} - return nil +func (t *Tree) traverseInPosRange(fromParent, fromLeft, toParent, toLeft *TreeNode, + callback func(node *TreeNode, contain index.TagContained), +) error { + fromIdx, err := t.ToIndex(fromParent, fromLeft) + if err != nil { + return err + } + toIdx, err := t.ToIndex(toParent, toLeft) + if err != nil { + return err + } + + return t.IndexTree.NodesBetween(fromIdx, toIdx, callback) } // StyleByIndex applies the given attributes of the given range. @@ -597,6 +697,7 @@ func (t *Tree) StyleByIndex(start, end int, attributes map[string]string, edited if err != nil { return err } + toPos, err := t.FindPos(end) if err != nil { return err @@ -607,146 +708,191 @@ func (t *Tree) StyleByIndex(start, end int, attributes map[string]string, edited // Style applies the given attributes of the given range. func (t *Tree) Style(from, to *TreePos, attributes map[string]string, editedAt *time.Ticket) error { - _, toRight, err := t.findTreePos(to, editedAt) + // 01. split text nodes at the given range if needed. + fromParent, fromLeft, err := t.FindTreeNodesWithSplitText(from, editedAt) if err != nil { return err } - _, fromRight, err := t.findTreePos(from, editedAt) + toParent, toLeft, err := t.FindTreeNodesWithSplitText(to, editedAt) if err != nil { return err } - // 02. style the nodes. - return t.nodesBetween(fromRight, toRight, func(node *TreeNode) { - if node.IsText() { - return - } + err = t.traverseInPosRange(fromParent.Value, fromLeft.Value, toParent.Value, toLeft.Value, + func(node *TreeNode, contain index.TagContained) { + if !node.IsRemoved() && !node.IsText() && len(attributes) > 0 { + if node.Attrs == nil { + node.Attrs = NewRHT() + } - for key, value := range attributes { - if node.Attrs == nil { - node.Attrs = NewRHT() + for key, value := range attributes { + node.Attrs.Set(key, value, editedAt) + } } - node.Attrs.Set(key, value, editedAt) - } - }) + }) + if err != nil { + return err + } + + return nil } -// findTreePos returns TreePos and the right node of the given index in postorder. -func (t *Tree) findTreePos(pos *TreePos, editedAt *time.Ticket) (*index.TreePos[*TreeNode], *TreeNode, error) { - treePos := t.toTreePos(pos) - if treePos == nil { +// FindTreeNodesWithSplitText finds TreeNode of the given crdt.TreePos and +// splits the text node if necessary. +// crdt.TreePos is a position in the CRDT perspective. This is different +// from indexTree.TreePos which is a position of the tree in the local perspective. +func (t *Tree) FindTreeNodesWithSplitText(pos *TreePos, editedAt *time.Ticket) ( + *index.Node[*TreeNode], *index.Node[*TreeNode], error, +) { + parentNode, leftSiblingNode := t.toTreeNodes(pos) + if parentNode == nil || leftSiblingNode == nil { return nil, nil, fmt.Errorf("%p: %w", pos, ErrNodeNotFound) } // Find the appropriate position. This logic is similar to the logical to // handle the same position insertion of RGA. - current := treePos - for current.Node.Value.Next != nil && current.Node.Value.Next.Pos.CreatedAt.After(editedAt) && - current.Node.Value.IndexTreeNode.Parent == current.Node.Value.Next.IndexTreeNode.Parent { + if leftSiblingNode.IsText() { + absOffset := leftSiblingNode.ID.Offset + split, err := leftSiblingNode.Split(pos.LeftSiblingID.Offset-absOffset, absOffset) + if err != nil { + return nil, nil, err + } + + if split != nil { + split.InsPrevID = leftSiblingNode.ID + t.NodeMapByID.Put(split.ID, split) - current = &index.TreePos[*TreeNode]{ - Node: current.Node.Value.Next.IndexTreeNode, - Offset: current.Node.Value.Next.Len(), + if leftSiblingNode.InsNextID != nil { + insNext := t.findFloorNode(leftSiblingNode.InsNextID) + insNext.InsPrevID = split.ID + split.InsNextID = leftSiblingNode.InsNextID + } + leftSiblingNode.InsNextID = split.ID } } - // TODO(hackerwins): Consider to use current instead of treePos. - right, err := t.IndexTree.FindPostorderRight(treePos) - if err != nil { - return nil, nil, err + idx := 0 + if parentNode != leftSiblingNode { + idx = parentNode.IndexTreeNode.OffsetOfChild(leftSiblingNode.IndexTreeNode) + 1 } - return current, right, nil + parentChildren := parentNode.IndexTreeNode.Children(true) + for i := idx; i < len(parentChildren); i++ { + next := parentChildren[i].Value + if !next.ID.CreatedAt.After(editedAt) { + break + } + leftSiblingNode = next + } + + return parentNode.IndexTreeNode, leftSiblingNode.IndexTreeNode, nil } -// findTreePosWithSplitText finds the right node of the given index in postorder. -func (t *Tree) findTreePosWithSplitText(pos *TreePos, editedAt *time.Ticket) ( - *index.TreePos[*TreeNode], *TreeNode, error, -) { - treePos := t.toTreePos(pos) - if treePos == nil { - return nil, nil, fmt.Errorf("%p: %w", pos, ErrNodeNotFound) +// toTreePos converts the given crdt.TreePos to local index.TreePos. +func (t *Tree) toTreePos(parentNode, leftSiblingNode *TreeNode) (*index.TreePos[*TreeNode], error) { + if parentNode == nil || leftSiblingNode == nil { + return nil, nil } - // Find the appropriate position. This logic is similar to the logical to - // handle the same position insertion of RGA. - current := treePos - for current.Node.Value.Next != nil && current.Node.Value.Next.Pos.CreatedAt.After(editedAt) && - current.Node.Value.IndexTreeNode.Parent == current.Node.Value.Next.IndexTreeNode.Parent { + var treePos *index.TreePos[*TreeNode] - current = &index.TreePos[*TreeNode]{ - Node: current.Node.Value.Next.IndexTreeNode, - Offset: current.Node.Value.Next.Len(), + if parentNode.IsRemoved() { + // If parentNode is removed, treePos is the position of its least alive ancestor. + var childNode *TreeNode + for parentNode.IsRemoved() { + childNode = parentNode + parentNode = childNode.IndexTreeNode.Parent.Value } - } - if current.Node.IsText() { - split, err := current.Node.Value.Split(current.Offset) + childOffset, err := parentNode.IndexTreeNode.FindOffset(childNode.IndexTreeNode) if err != nil { - return nil, nil, err + return nil, nil } - if split != nil { - t.InsertAfter(current.Node.Value, split) - split.InsPrev = current.Node.Value + treePos = &index.TreePos[*TreeNode]{ + Node: parentNode.IndexTreeNode, + Offset: childOffset, } - } - - right, err := t.IndexTree.FindPostorderRight(treePos) - if err != nil { - return nil, nil, err - } + } else { + if parentNode == leftSiblingNode { + treePos = &index.TreePos[*TreeNode]{ + Node: leftSiblingNode.IndexTreeNode, + Offset: 0, + } + } else { + // Find the closest existing leftSibling node. + offset, err := parentNode.IndexTreeNode.FindOffset(leftSiblingNode.IndexTreeNode) + if err != nil { + return nil, nil + } - return current, right, nil -} + if !leftSiblingNode.IsRemoved() { + if leftSiblingNode.IsText() { + treePos = &index.TreePos[*TreeNode]{ + Node: leftSiblingNode.IndexTreeNode, + Offset: leftSiblingNode.IndexTreeNode.PaddedLength(), + } + return treePos, nil + } + offset++ + } -// toTreePos converts the given crdt.TreePos to index.TreePos. -func (t *Tree) toTreePos(pos *TreePos) *index.TreePos[*TreeNode] { - key, node := t.NodeMapByPos.Floor(pos) - if node == nil || key.CreatedAt.Compare(pos.CreatedAt) != 0 { - return nil - } + treePos = &index.TreePos[*TreeNode]{ + Node: parentNode.IndexTreeNode, + Offset: offset, + } - // Choose the left node if the position is on the boundary of the split nodes. - if pos.Offset > 0 && pos.Offset == node.Pos.Offset && node.InsPrev != nil { - node = node.InsPrev + } } - return &index.TreePos[*TreeNode]{ - Node: node.IndexTreeNode, - Offset: pos.Offset - node.Pos.Offset, - } + return treePos, nil } -// toIndex converts the given CRDTTreePos to the index of the tree. -func (t *Tree) toIndex(pos *TreePos) (int, error) { - treePos := t.toTreePos(pos) +// ToIndex converts the given CRDTTreePos to the index of the tree. +func (t *Tree) ToIndex(parentNode, leftSiblingNode *TreeNode) (int, error) { + treePos, err := t.toTreePos(parentNode, leftSiblingNode) if treePos == nil { return -1, nil } - idx, err := t.IndexTree.IndexOf(treePos.Node) if err != nil { return 0, err } - return idx + treePos.Offset, nil + idx, err := t.IndexTree.IndexOf(treePos) + if err != nil { + return 0, err + } + + return idx, nil } -// nodesBetween returns the nodes between the given range. -// This method includes the given left node but excludes the given right node. -func (t *Tree) nodesBetween(left *TreeNode, right *TreeNode, callback func(*TreeNode)) error { - current := left - for current != right { - if current == nil { - return errors.New("left and right are not in the same list") - } +// findFloorNode returns node from given id. +func (t *Tree) findFloorNode(id *TreeNodeID) *TreeNode { + key, node := t.NodeMapByID.Floor(id) - callback(current) - current = current.Next + if node == nil || key.CreatedAt.Compare(id.CreatedAt) != 0 { + return nil } - return nil + return node +} + +func (t *Tree) toTreeNodes(pos *TreePos) (*TreeNode, *TreeNode) { + parentNode := t.findFloorNode(pos.ParentID) + leftSiblingNode := t.findFloorNode(pos.LeftSiblingID) + + if parentNode == nil || leftSiblingNode == nil { + return nil, nil + } + + if pos.LeftSiblingID.Offset > 0 && + pos.LeftSiblingID.Offset == leftSiblingNode.ID.Offset && + leftSiblingNode.InsPrevID != nil { + return parentNode, t.findFloorNode(leftSiblingNode.InsPrevID) + } + + return parentNode, leftSiblingNode } // Structure returns the structure of this tree. @@ -756,15 +902,17 @@ func (t *Tree) Structure() TreeNodeForTest { // PathToPos returns the position of the given path func (t *Tree) PathToPos(path []int) (*TreePos, error) { - treePos, err := t.IndexTree.PathToTreePos(path) + idx, err := t.IndexTree.PathToIndex(path) if err != nil { return nil, err } - return &TreePos{ - CreatedAt: treePos.Node.Value.Pos.CreatedAt, - Offset: treePos.Node.Value.Pos.Offset + treePos.Offset, - }, nil + pos, err := t.FindPos(idx) + if err != nil { + return nil, err + } + + return pos, nil } // ToStructure returns the JSON of this tree for debugging. diff --git a/pkg/document/crdt/tree_test.go b/pkg/document/crdt/tree_test.go index a7e028f53..5b8e6a411 100644 --- a/pkg/document/crdt/tree_test.go +++ b/pkg/document/crdt/tree_test.go @@ -26,10 +26,17 @@ import ( "github.com/yorkie-team/yorkie/test/helper" ) +var ( + dummyTreeNodeID = &crdt.TreeNodeID{ + CreatedAt: time.InitialTicket, + Offset: 0, + } +) + func TestTreeNode(t *testing.T) { t.Run("text node test", func(t *testing.T) { - node := crdt.NewTreeNode(crdt.DummyTreePos, "text", nil, "hello") - assert.Equal(t, crdt.DummyTreePos, node.Pos) + node := crdt.NewTreeNode(dummyTreeNodeID, "text", nil, "hello") + assert.Equal(t, dummyTreeNodeID, node.ID) assert.Equal(t, "text", node.Type()) assert.Equal(t, "hello", node.Value) assert.Equal(t, 5, node.Len()) @@ -38,8 +45,8 @@ func TestTreeNode(t *testing.T) { }) t.Run("element node test", func(t *testing.T) { - para := crdt.NewTreeNode(crdt.DummyTreePos, "p", nil) - err := para.Append(crdt.NewTreeNode(crdt.DummyTreePos, "text", nil, "helloyorkie")) + para := crdt.NewTreeNode(dummyTreeNodeID, "p", nil) + err := para.Append(crdt.NewTreeNode(dummyTreeNodeID, "text", nil, "helloyorkie")) assert.NoError(t, err) assert.Equal(t, "

helloyorkie

", crdt.ToXML(para)) assert.Equal(t, 11, para.Len()) @@ -47,22 +54,22 @@ func TestTreeNode(t *testing.T) { left, err := para.Child(0) assert.NoError(t, err) - right, err := left.Split(5) + right, err := left.Split(5, 0) assert.NoError(t, err) assert.Equal(t, "

helloyorkie

", crdt.ToXML(para)) assert.Equal(t, 11, para.Len()) assert.Equal(t, "hello", left.Value) assert.Equal(t, "yorkie", right.Value) - assert.Equal(t, &crdt.TreePos{CreatedAt: time.InitialTicket, Offset: 0}, left.Pos) - assert.Equal(t, &crdt.TreePos{CreatedAt: time.InitialTicket, Offset: 5}, right.Pos) + assert.Equal(t, &crdt.TreeNodeID{CreatedAt: time.InitialTicket, Offset: 0}, left.ID) + assert.Equal(t, &crdt.TreeNodeID{CreatedAt: time.InitialTicket, Offset: 5}, right.ID) }) t.Run("element node with attributes test", func(t *testing.T) { attrs := crdt.NewRHT() attrs.Set("font-weight", "bold", time.InitialTicket) - node := crdt.NewTreeNode(crdt.DummyTreePos, "span", attrs) - err := node.Append(crdt.NewTreeNode(crdt.DummyTreePos, "text", nil, "helloyorkie")) + node := crdt.NewTreeNode(dummyTreeNodeID, "span", attrs) + err := node.Append(crdt.NewTreeNode(dummyTreeNodeID, "text", nil, "helloyorkie")) assert.NoError(t, err) assert.Equal(t, `helloyorkie`, crdt.ToXML(node)) }) @@ -79,13 +86,13 @@ func TestTreeNode(t *testing.T) { {12, "🌷🎁💩😜👍🏳"}, } for _, test := range tests { - para := crdt.NewTreeNode(crdt.DummyTreePos, "p", nil) - err := para.Append(crdt.NewTreeNode(crdt.DummyTreePos, "text", nil, test.value)) + para := crdt.NewTreeNode(dummyTreeNodeID, "p", nil) + err := para.Append(crdt.NewTreeNode(dummyTreeNodeID, "text", nil, test.value)) assert.NoError(t, err) left, err := para.Child(0) assert.NoError(t, err) assert.Equal(t, test.length, left.Len()) - right, err := left.Split(2) + right, err := left.Split(2, 0) assert.NoError(t, err) assert.Equal(t, test.length-2, right.Len()) } @@ -101,27 +108,24 @@ func TestTree(t *testing.T) { tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "r", nil), helper.IssueTime(ctx)) assert.Equal(t, 0, tree.Root().Len()) assert.Equal(t, "", tree.ToXML()) - helper.ListEqual(t, tree, []string{"r"}) // 1 //

- err := tree.EditByIndex(0, 0, + _, err := tree.EditByIndex(0, 0, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) assert.Equal(t, "

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"p", "r"}) assert.Equal(t, 2, tree.Root().Len()) // 1 //

h e l l o

- err = tree.EditByIndex( - 1, 1, + _, err = tree.EditByIndex( + 1, 1, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "hello")}, helper.IssueTime(ctx), ) assert.NoError(t, err) assert.Equal(t, "

hello

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.hello", "p", "r"}) assert.Equal(t, 7, tree.Root().Len()) // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 @@ -129,22 +133,20 @@ func TestTree(t *testing.T) { p := crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil) err = p.InsertAt(crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "world"), 0) assert.NoError(t, err) - err = tree.EditByIndex(7, 7, []*crdt.TreeNode{p}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(7, 7, nil, []*crdt.TreeNode{p}, helper.IssueTime(ctx)) assert.NoError(t, err) assert.Equal(t, "

hello

world

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.hello", "p", "text.world", "p", "r"}) assert.Equal(t, 14, tree.Root().Len()) // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 //

h e l l o !

w o r l d

- err = tree.EditByIndex( - 6, 6, + _, err = tree.EditByIndex( + 6, 6, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "!")}, helper.IssueTime(ctx), ) assert.NoError(t, err) assert.Equal(t, "

hello!

world

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.hello", "text.!", "p", "text.world", "p", "r"}) assert.Equal(t, crdt.TreeNodeForTest{ Type: "r", Children: []crdt.TreeNodeForTest{ @@ -172,14 +174,13 @@ func TestTree(t *testing.T) { // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 //

h e l l o ~ !

w o r l d

- err = tree.EditByIndex( - 6, 6, + _, err = tree.EditByIndex( + 6, 6, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "~")}, helper.IssueTime(ctx), ) assert.NoError(t, err) assert.Equal(t, "

hello~!

world

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.hello", "text.~", "text.!", "p", "text.world", "p", "r"}) }) t.Run("delete text nodes with Edit test", func(t *testing.T) { @@ -190,20 +191,19 @@ func TestTree(t *testing.T) { ctx := helper.TextChangeContext(helper.TestRoot()) tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err := tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err := tree.EditByIndex(0, 0, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(1, 1, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "ab")}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(4, 4, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(4, 4, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(5, 5, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(5, 5, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "cd")}, helper.IssueTime(ctx)) assert.NoError(t, err) assert.Equal(t, "

ab

cd

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.ab", "p", "text.cd", "p", "root"}) structure := tree.Structure() assert.Equal(t, 8, structure.Size) @@ -213,10 +213,9 @@ func TestTree(t *testing.T) { // 02. Delete b from the first paragraph. // 0 1 2 3 4 5 6 7 //

a

c d

- err = tree.EditByIndex(2, 3, nil, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(2, 3, nil, nil, helper.IssueTime(ctx)) assert.NoError(t, err) assert.Equal(t, "

a

cd

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.a", "p", "text.cd", "p", "root"}) structure = tree.Structure() assert.Equal(t, 7, structure.Size) @@ -231,219 +230,182 @@ func TestTree(t *testing.T) { ctx := helper.TextChangeContext(helper.TestRoot()) tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err := tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err := tree.EditByIndex(0, 0, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(1, 1, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "ab")}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(4, 4, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(4, 4, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(5, 5, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(5, 5, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "cd")}, helper.IssueTime(ctx)) assert.NoError(t, err) assert.Equal(t, "

ab

cd

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.ab", "p", "text.cd", "p", "root"}) // 02. delete b, c and first paragraph. // 0 1 2 3 4 //

a d

- err = tree.EditByIndex(2, 6, nil, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(2, 6, nil, nil, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ad

", tree.ToXML()) - helper.ListEqual(t, tree, []string{"text.a", "text.d", "p", "root"}) + assert.Equal(t, "

a

d

", tree.ToXML()) - structure := tree.Structure() - assert.Equal(t, 4, structure.Size) - assert.Equal(t, 2, structure.Children[0].Size) - assert.Equal(t, 1, structure.Children[0].Children[0].Size) - assert.Equal(t, 1, structure.Children[0].Children[1].Size) + // TODO(sejongk): Use the below assertions after implementing Tree.Move. + // assert.Equal(t, "

ad

", tree.ToXML()) - // 03. insert a new text node at the start of the first paragraph. - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "@")}, helper.IssueTime(ctx)) - assert.NoError(t, err) - assert.Equal(t, "

@ad

", tree.ToXML()) + // structure := tree.Structure() + // assert.Equal(t, 4, structure.Size) + // assert.Equal(t, 2, structure.Children[0].Size) + // assert.Equal(t, 1, structure.Children[0].Children[0].Size) + // assert.Equal(t, 1, structure.Children[0].Children[1].Size) + + // // 03. insert a new text node at the start of the first paragraph. + // _, err = tree.EditByIndex(1, 1, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + // "text", nil, "@")}, helper.IssueTime(ctx)) + // assert.NoError(t, err) + // assert.Equal(t, "

@ad

", tree.ToXML()) }) - t.Run("merge different levels with Edit", func(t *testing.T) { - // 01. Edit between two element nodes in the same hierarchy. - // 0 1 2 3 4 5 6 7 8 - //

a b

+ t.Run("style node with element attributes test", func(t *testing.T) { + // 01. style attributes to an element node. ctx := helper.TextChangeContext(helper.TestRoot()) tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err := tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err := tree.EditByIndex(0, 0, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(2, 2, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "i", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(3, 3, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(1, 1, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "ab")}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - err = tree.EditByIndex(5, 6, nil, helper.IssueTime(ctx)) - assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - - // 02. Edit between two element nodes in same hierarchy. - tree = crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err = tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(4, 4, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(2, 2, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "i", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(3, 3, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ab")}, helper.IssueTime(ctx)) - assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - err = tree.EditByIndex(6, 7, nil, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(5, 5, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + "text", nil, "cd")}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) + assert.Equal(t, "

ab

cd

", tree.ToXML()) - // 03. Edit between text and element node in same hierarchy. - tree = crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err = tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "p", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) + // style attributes with opening tag + err = tree.StyleByIndex(0, 1, map[string]string{"weight": "bold"}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(2, 2, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "i", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(3, 3, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ab")}, helper.IssueTime(ctx)) - assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - err = tree.EditByIndex(4, 6, nil, helper.IssueTime(ctx)) - assert.NoError(t, err) - assert.Equal(t, "

a

", tree.ToXML()) + assert.Equal(t, `

ab

cd

`, tree.ToXML()) - // 04. Edit between text and element node in same hierarchy. - tree = crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err = tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "p", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(2, 2, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "i", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(3, 3, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ab")}, helper.IssueTime(ctx)) + // style attributes with closing tag + err = tree.StyleByIndex(3, 4, map[string]string{"color": "red"}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - err = tree.EditByIndex(5, 7, nil, helper.IssueTime(ctx)) + assert.Equal(t, `

ab

cd

`, tree.ToXML()) + + // style attributes with the whole + err = tree.StyleByIndex(0, 4, map[string]string{"size": "small"}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) + assert.Equal(t, `

ab

cd

`, tree.ToXML()) - // 05. Edit between text and element node in same hierarchy. - tree = crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err = tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "p", nil)}, helper.IssueTime(ctx)) + // 02. style attributes to elements. + err = tree.StyleByIndex(0, 5, map[string]string{"style": "italic"}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) + assert.Equal(t, `

ab

`+ + `

cd

`, tree.ToXML()) + + // 03. Ignore styling attributes to text nodes. + err = tree.StyleByIndex(1, 3, map[string]string{"bold": "true"}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(2, 2, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "i", nil)}, helper.IssueTime(ctx)) + assert.Equal(t, `

ab

`+ + `

cd

`, tree.ToXML()) + }) + + t.Run("can find the closest TreePos when parentNode or leftSiblingNode does not exist", func(t *testing.T) { + root := helper.TestRoot() + ctx := helper.TextChangeContext(root) + // 0 + // + tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "r", nil), helper.IssueTime(ctx)) + assert.Equal(t, 0, tree.Root().Len()) + assert.Equal(t, "", tree.ToXML()) + + // 0 1 2 3 4 + //

a b

+ pNode := crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil) + textNode := crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "ab") + + _, err := tree.EditByIndex(0, 0, nil, []*crdt.TreeNode{pNode}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(3, 3, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ab")}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(1, 1, nil, []*crdt.TreeNode{textNode}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - err = tree.EditByIndex(4, 7, nil, helper.IssueTime(ctx)) + assert.Equal(t, "

ab

", tree.ToXML()) + + // Find the closest index.TreePos when leftSiblingNode in crdt.TreePos is removed. + // 0 1 2 + //

+ _, err = tree.EditByIndex(1, 3, nil, nil, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

a

", tree.ToXML()) + assert.Equal(t, "

", tree.ToXML()) - // 06. Edit between text and element node in same hierarchy. - tree = crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err = tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "p", nil)}, helper.IssueTime(ctx)) + treePos := crdt.NewTreePos(pNode.ID, textNode.ID) + + parent, leftSibling, err := tree.FindTreeNodesWithSplitText(treePos, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) + idx, err := tree.ToIndex(parent.Value, leftSibling.Value) assert.NoError(t, err) - err = tree.EditByIndex(2, 2, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "i", nil)}, helper.IssueTime(ctx)) + assert.Equal(t, 1, idx) + + // Find the closest index.TreePos when parentNode in crdt.TreePos is removed. + // 0 + // + _, err = tree.EditByIndex(0, 2, nil, nil, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(3, 3, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ab")}, helper.IssueTime(ctx)) + assert.Equal(t, "", tree.ToXML()) + + treePos = crdt.NewTreePos(pNode.ID, textNode.ID) + parent, leftSibling, err = tree.FindTreeNodesWithSplitText(treePos, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

", tree.ToXML()) - err = tree.EditByIndex(3, 7, nil, helper.IssueTime(ctx)) + idx, err = tree.ToIndex(parent.Value, leftSibling.Value) assert.NoError(t, err) - assert.Equal(t, "

", tree.ToXML()) + assert.Equal(t, 0, idx) + }) - // 07. Edit between text and element node in same hierarchy. - tree = crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err = tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + t.Run("delete nodes in a multi-level range test", func(t *testing.T) { + ctx := helper.TextChangeContext(helper.TestRoot()) + tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) + _, err := tree.EditByIndex(0, 0, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(1, 1, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "text", nil, "ab")}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(4, 4, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(3, 3, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(5, 5, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "b", nil)}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(6, 6, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "cd")}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(4, 4, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + "text", nil, "x")}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(10, 10, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(7, 7, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(11, 11, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ef")}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(8, 8, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

cd

ef

", tree.ToXML()) - err = tree.EditByIndex(9, 10, nil, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(9, 9, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + "text", nil, "cd")}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

cd

ef

", tree.ToXML()) - }) - - t.Run("style node with attributes test", func(t *testing.T) { - // 01. style attributes to an element node. - ctx := helper.TextChangeContext(helper.TestRoot()) - tree := crdt.NewTree(crdt.NewTreeNode(helper.IssuePos(ctx), "root", nil), helper.IssueTime(ctx)) - err := tree.EditByIndex(0, 0, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(13, 13, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(1, 1, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "ab")}, helper.IssueTime(ctx)) - assert.NoError(t, err) - err = tree.EditByIndex(4, 4, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + _, err = tree.EditByIndex(14, 14, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), "p", nil)}, helper.IssueTime(ctx)) assert.NoError(t, err) - err = tree.EditByIndex(5, 5, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), - "text", nil, "cd")}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(15, 15, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + "text", nil, "y")}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, "

ab

cd

", tree.ToXML()) - - err = tree.StyleByIndex(3, 4, map[string]string{"weight": "bold"}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(17, 17, nil, []*crdt.TreeNode{crdt.NewTreeNode(helper.IssuePos(ctx), + "text", nil, "ef")}, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, `

ab

cd

`, tree.ToXML()) + assert.Equal(t, "

ab

x

cd

y

ef

", tree.ToXML()) - // 02. style attributes to elements. - err = tree.StyleByIndex(3, 8, map[string]string{"style": "italic"}, helper.IssueTime(ctx)) + _, err = tree.EditByIndex(2, 18, nil, nil, helper.IssueTime(ctx)) assert.NoError(t, err) - assert.Equal(t, `

ab

cd

`, tree.ToXML()) + assert.Equal(t, "

a

f

", tree.ToXML()) - // 03. style attributes to text nodes. - err = tree.StyleByIndex(1, 3, map[string]string{"style": "italic"}, helper.IssueTime(ctx)) - assert.NoError(t, err) - assert.Equal(t, `

ab

cd

`, tree.ToXML()) + // TODO(sejongk): Use the below assertion after implementing Tree.Move. + // assert.Equal(t, "

af

", tree.ToXML()) }) } diff --git a/pkg/document/document.go b/pkg/document/document.go index 985dd2f6d..d9ca6b2dd 100644 --- a/pkg/document/document.go +++ b/pkg/document/document.go @@ -43,6 +43,10 @@ const ( // enabling real-time synchronization. WatchedEvent DocEventType = "watched" + // UnwatchedEvent means that the client has disconnected from the server, + // disabling real-time synchronization. + UnwatchedEvent DocEventType = "unwatched" + // PresenceChangedEvent means that the presences of the clients who are editing // the document have changed. PresenceChangedEvent DocEventType = "presence-changed" @@ -282,48 +286,50 @@ func (d *Document) ensureClone() error { return nil } -// Presences returns the presence map of this document. -func (d *Document) Presences() map[string]innerpresence.Presence { - // TODO(hackerwins): We need to use client key instead of actor ID for exposing presence. - presences := make(map[string]innerpresence.Presence) - d.doc.presences.Range(func(key string, value innerpresence.Presence) bool { - presences[key] = value - return true - }) - return presences +// MyPresence returns the presence of the actor. +func (d *Document) MyPresence() innerpresence.Presence { + return d.doc.MyPresence() } // Presence returns the presence of the given client. +// If the client is not online, it returns nil. func (d *Document) Presence(clientID string) innerpresence.Presence { return d.doc.Presence(clientID) } -// MyPresence returns the presence of the actor. -func (d *Document) MyPresence() innerpresence.Presence { - return d.doc.MyPresence() +// PresenceForTest returns the presence of the given client +// regardless of whether the client is online or not. +func (d *Document) PresenceForTest(clientID string) innerpresence.Presence { + return d.doc.PresenceForTest(clientID) } -// SetOnlineClientSet sets the online client set. -func (d *Document) SetOnlineClientSet(clientIDs ...string) { - d.doc.SetOnlineClientSet(clientIDs...) +// Presences returns the presence map of online clients. +func (d *Document) Presences() map[string]innerpresence.Presence { + // TODO(hackerwins): We need to use client key instead of actor ID for exposing presence. + return d.doc.Presences() +} + +// AllPresences returns the presence map of all clients +// regardless of whether the client is online or not. +func (d *Document) AllPresences() map[string]innerpresence.Presence { + return d.doc.AllPresences() } -// AddOnlineClient adds the given client to the online client set. +// SetOnlineClients sets the online clients. +func (d *Document) SetOnlineClients(clientIDs ...string) { + d.doc.SetOnlineClients(clientIDs...) +} + +// AddOnlineClient adds the given client to the online clients. func (d *Document) AddOnlineClient(clientID string) { d.doc.AddOnlineClient(clientID) } -// RemoveOnlineClient removes the given client from the online client set. +// RemoveOnlineClient removes the given client from the online clients. func (d *Document) RemoveOnlineClient(clientID string) { d.doc.RemoveOnlineClient(clientID) } -// OnlinePresence returns the presence of the given client. If the client is not -// online, it returns nil. -func (d *Document) OnlinePresence(clientID string) innerpresence.Presence { - return d.doc.OnlinePresence(clientID) -} - // Events returns the events of this document. func (d *Document) Events() <-chan DocEvent { return d.events diff --git a/pkg/document/innerpresence/presence.go b/pkg/document/innerpresence/presence.go index 7376d0f02..fb1168a0b 100644 --- a/pkg/document/innerpresence/presence.go +++ b/pkg/document/innerpresence/presence.go @@ -50,13 +50,13 @@ func (m *Map) Range(f func(clientID string, presence Presence) bool) { } // Load returns the presence for the given clientID. -func (m *Map) Load(clientID string) (Presence, bool) { +func (m *Map) Load(clientID string) Presence { presence, ok := m.presences.Load(clientID) if !ok { - return nil, false + return nil } - return presence.(Presence), true + return presence.(Presence) } // LoadOrStore returns the existing presence if exists. @@ -140,6 +140,9 @@ func (p Presence) Clear() { // DeepCopy copies itself deeply. func (p Presence) DeepCopy() Presence { + if p == nil { + return nil + } clone := make(map[string]string) for k, v := range p { clone[k] = v diff --git a/pkg/document/internal_document.go b/pkg/document/internal_document.go index e4ce0d8fc..4f1cdd1ad 100644 --- a/pkg/document/internal_document.go +++ b/pkg/document/internal_document.go @@ -257,17 +257,37 @@ func (d *InternalDocument) ApplyChanges(changes ...*change.Change) ([]DocEvent, if c.PresenceChange() != nil { clientID := c.ID().ActorID().String() if _, ok := d.onlineClients.Load(clientID); ok { - event := DocEvent{ - Type: PresenceChangedEvent, - Presences: map[string]innerpresence.Presence{ - clientID: c.PresenceChange().Presence, - }, + switch c.PresenceChange().ChangeType { + case innerpresence.Put: + // NOTE(chacha912): When the user exists in onlineClients, but + // their presence was initially absent, we can consider that we have + // received their initial presence, so trigger the 'watched' event. + eventType := PresenceChangedEvent + if !d.presences.Has(clientID) { + eventType = WatchedEvent + } + event := DocEvent{ + Type: eventType, + Presences: map[string]innerpresence.Presence{ + clientID: c.PresenceChange().Presence, + }, + } + events = append(events, event) + case innerpresence.Clear: + // NOTE(chacha912): When the user exists in onlineClients, but + // PresenceChange(clear) is received, we can consider it as detachment + // occurring before unwatching. + // Detached user is no longer participating in the document, we remove + // them from the online clients and trigger the 'unwatched' event. + event := DocEvent{ + Type: UnwatchedEvent, + Presences: map[string]innerpresence.Presence{ + clientID: d.Presence(clientID), + }, + } + events = append(events, event) + d.RemoveOnlineClient(clientID) } - - if !d.presences.Has(clientID) { - event.Type = WatchedEvent - } - events = append(events, event) } } @@ -283,34 +303,56 @@ func (d *InternalDocument) ApplyChanges(changes ...*change.Change) ([]DocEvent, // MyPresence returns the presence of the actor currently editing the document. func (d *InternalDocument) MyPresence() innerpresence.Presence { - p := d.presences.LoadOrStore(d.changeID.ActorID().String(), innerpresence.NewPresence()) + if d.status != StatusAttached { + return innerpresence.NewPresence() + } + p := d.presences.Load(d.changeID.ActorID().String()) return p.DeepCopy() } -// Presences returns the map of presences of the actors currently editing the document. -func (d *InternalDocument) Presences() *innerpresence.Map { - return d.presences -} - -// OnlinePresence returns the presence of the given client. If the client is not -// online, it returns nil. -func (d *InternalDocument) OnlinePresence(clientID string) innerpresence.Presence { +// Presence returns the presence of the given client. +// If the client is not online, it returns nil. +func (d *InternalDocument) Presence(clientID string) innerpresence.Presence { if _, ok := d.onlineClients.Load(clientID); !ok { return nil } - presence, _ := d.presences.Load(clientID) - return presence + return d.presences.Load(clientID).DeepCopy() } -// Presence returns the presence of the given client. -func (d *InternalDocument) Presence(clientID string) innerpresence.Presence { - presence, _ := d.presences.Load(clientID) - return presence +// PresenceForTest returns the presence of the given client +// regardless of whether the client is online or not. +func (d *InternalDocument) PresenceForTest(clientID string) innerpresence.Presence { + return d.presences.Load(clientID).DeepCopy() +} + +// Presences returns the presence map of online clients. +func (d *InternalDocument) Presences() map[string]innerpresence.Presence { + presences := make(map[string]innerpresence.Presence) + d.onlineClients.Range(func(key, value interface{}) bool { + p := d.presences.Load(key.(string)) + if p == nil { + return true + } + presences[key.(string)] = p.DeepCopy() + return true + }) + return presences +} + +// AllPresences returns the presence map of all clients +// regardless of whether the client is online or not. +func (d *InternalDocument) AllPresences() map[string]innerpresence.Presence { + presences := make(map[string]innerpresence.Presence) + d.presences.Range(func(key string, value innerpresence.Presence) bool { + presences[key] = value.DeepCopy() + return true + }) + return presences } -// SetOnlineClientSet sets the online client set. -func (d *InternalDocument) SetOnlineClientSet(ids ...string) { +// SetOnlineClients sets the online clients. +func (d *InternalDocument) SetOnlineClients(ids ...string) { d.onlineClients.Range(func(key, value interface{}) bool { d.onlineClients.Delete(key) return true @@ -321,12 +363,12 @@ func (d *InternalDocument) SetOnlineClientSet(ids ...string) { } } -// AddOnlineClient adds the given client to the online client set. +// AddOnlineClient adds the given client to the online clients. func (d *InternalDocument) AddOnlineClient(clientID string) { d.onlineClients.Store(clientID, true) } -// RemoveOnlineClient removes the given client from the online client set. +// RemoveOnlineClient removes the given client from the online clients. func (d *InternalDocument) RemoveOnlineClient(clientID string) { d.onlineClients.Delete(clientID) } diff --git a/pkg/document/json/array.go b/pkg/document/json/array.go index 7f7c0bca9..5d13de46d 100644 --- a/pkg/document/json/array.go +++ b/pkg/document/json/array.go @@ -214,7 +214,9 @@ func (p *Array) insertAfterInternal( ticket, )) - p.InsertAfter(prevCreatedAt, value) + if err = p.InsertAfter(prevCreatedAt, value); err != nil { + panic(err) + } p.context.RegisterElement(value) return elem @@ -223,7 +225,10 @@ func (p *Array) insertAfterInternal( func (p *Array) moveBeforeInternal(nextCreatedAt, createdAt *time.Ticket) { ticket := p.context.IssueTimeTicket() - prevCreatedAt := p.FindPrevCreatedAt(nextCreatedAt) + prevCreatedAt, err := p.FindPrevCreatedAt(nextCreatedAt) + if err != nil { + panic(err) + } p.context.Push(operations.NewMove( p.Array.CreatedAt(), @@ -232,5 +237,7 @@ func (p *Array) moveBeforeInternal(nextCreatedAt, createdAt *time.Ticket) { ticket, )) - p.MoveAfter(prevCreatedAt, createdAt, ticket) + if err = p.MoveAfter(prevCreatedAt, createdAt, ticket); err != nil { + panic(err) + } } diff --git a/pkg/document/json/counter.go b/pkg/document/json/counter.go index 668ebd23c..0ae72809a 100644 --- a/pkg/document/json/counter.go +++ b/pkg/document/json/counter.go @@ -71,7 +71,9 @@ func (p *Counter) Increase(v interface{}) *Counter { panic("unsupported type") } - p.Counter.Increase(primitive) + if _, err := p.Counter.Increase(primitive); err != nil { + panic(err) + } p.context.Push(operations.NewIncrease( p.CreatedAt(), diff --git a/pkg/document/json/object.go b/pkg/document/json/object.go index 2cf157480..11b44bf24 100644 --- a/pkg/document/json/object.go +++ b/pkg/document/json/object.go @@ -75,14 +75,22 @@ func (p *Object) SetNewCounter(k string, t crdt.CounterType, n interface{}) *Cou v := p.setInternal(k, func(ticket *time.Ticket) crdt.Element { switch t { case crdt.IntegerCnt: + counter, err := crdt.NewCounter(crdt.IntegerCnt, n, ticket) + if err != nil { + panic(err) + } return NewCounter( p.context, - crdt.NewCounter(crdt.IntegerCnt, n, ticket), + counter, ) case crdt.LongCnt: + counter, err := crdt.NewCounter(crdt.LongCnt, n, ticket) + if err != nil { + panic(err) + } return NewCounter( p.context, - crdt.NewCounter(crdt.LongCnt, n, ticket), + counter, ) default: panic("unsupported type") diff --git a/pkg/document/json/text.go b/pkg/document/json/text.go index 7129ba82c..abac1becc 100644 --- a/pkg/document/json/text.go +++ b/pkg/document/json/text.go @@ -122,29 +122,3 @@ func (p *Text) Style(from, to int, attributes map[string]string) *Text { return p } - -// Select stores that the given range has been selected. -func (p *Text) Select(from, to int) *Text { - if from > to { - panic("from should be less than or equal to to") - } - fromPos, toPos, err := p.Text.CreateRange(from, to) - if err != nil { - panic(err) - } - - ticket := p.context.IssueTimeTicket() - p.Text.Select( - fromPos, - toPos, - ticket, - ) - - p.context.Push(operations.NewSelect( - p.CreatedAt(), - fromPos, - toPos, - ticket, - )) - return p -} diff --git a/pkg/document/json/tree.go b/pkg/document/json/tree.go index 78e33b228..7437c000a 100644 --- a/pkg/document/json/tree.go +++ b/pkg/document/json/tree.go @@ -153,7 +153,7 @@ func (t *Tree) edit(fromPos, toPos *crdt.TreePos, contents []*TreeNode) bool { value += content.Value } - nodes = append(nodes, crdt.NewTreeNode(crdt.NewTreePos(ticket, 0), index.DefaultTextType, nil, value)) + nodes = append(nodes, crdt.NewTreeNode(crdt.NewTreeNodeID(ticket, 0), index.DefaultTextType, nil, value)) } else { for _, content := range contents { var attributes *crdt.RHT @@ -165,7 +165,7 @@ func (t *Tree) edit(fromPos, toPos *crdt.TreePos, contents []*TreeNode) bool { } var node *crdt.TreeNode - node = crdt.NewTreeNode(crdt.NewTreePos(ticket, 0), content.Type, attributes, content.Value) + node = crdt.NewTreeNode(crdt.NewTreeNodeID(ticket, 0), content.Type, attributes, content.Value) for _, child := range content.Children { if err := buildDescendants(t.context, child, node); err != nil { @@ -193,7 +193,8 @@ func (t *Tree) edit(fromPos, toPos *crdt.TreePos, contents []*TreeNode) bool { } ticket = t.context.LastTimeTicket() - if err := t.Tree.Edit(fromPos, toPos, clones, ticket); err != nil { + maxCreationMapByActor, err := t.Tree.Edit(fromPos, toPos, nil, clones, ticket) + if err != nil { panic(err) } @@ -201,11 +202,12 @@ func (t *Tree) edit(fromPos, toPos *crdt.TreePos, contents []*TreeNode) bool { t.CreatedAt(), fromPos, toPos, + maxCreationMapByActor, nodes, ticket, )) - if fromPos.CreatedAt.Compare(toPos.CreatedAt) != 0 || fromPos.Offset != toPos.Offset { + if !fromPos.Equals(toPos) { t.context.RegisterElementHasRemovedNodes(t.Tree) } @@ -269,10 +271,10 @@ func (t *Tree) Style(fromIdx, toIdx int, attributes map[string]string) bool { // node is nil, it creates a default root node. func buildRoot(ctx *change.Context, node *TreeNode, createdAt *time.Ticket) *crdt.TreeNode { if node == nil { - return crdt.NewTreeNode(crdt.NewTreePos(createdAt, 0), DefaultRootNodeType, nil) + return crdt.NewTreeNode(crdt.NewTreeNodeID(createdAt, 0), DefaultRootNodeType, nil) } - root := crdt.NewTreeNode(crdt.NewTreePos(createdAt, 0), node.Type, nil) + root := crdt.NewTreeNode(crdt.NewTreeNodeID(createdAt, 0), node.Type, nil) for _, child := range node.Children { if err := buildDescendants(ctx, child, root); err != nil { panic(err) @@ -290,7 +292,7 @@ func buildDescendants(ctx *change.Context, n TreeNode, parent *crdt.TreeNode) er return err } - treeNode := crdt.NewTreeNode(crdt.NewTreePos(ctx.IssueTimeTicket(), 0), n.Type, nil, n.Value) + treeNode := crdt.NewTreeNode(crdt.NewTreeNodeID(ctx.IssueTimeTicket(), 0), n.Type, nil, n.Value) return parent.Append(treeNode) } @@ -304,7 +306,7 @@ func buildDescendants(ctx *change.Context, n TreeNode, parent *crdt.TreeNode) er } } - treeNode := crdt.NewTreeNode(crdt.NewTreePos(ticket, 0), n.Type, attributes) + treeNode := crdt.NewTreeNode(crdt.NewTreeNodeID(ticket, 0), n.Type, attributes) if err := parent.Append(treeNode); err != nil { return err } diff --git a/pkg/document/operations/add.go b/pkg/document/operations/add.go index d2e1ec58a..9ea18d5d8 100644 --- a/pkg/document/operations/add.go +++ b/pkg/document/operations/add.go @@ -64,7 +64,10 @@ func (o *Add) Execute(root *crdt.Root) error { if err != nil { return err } - obj.InsertAfter(o.prevCreatedAt, value) + + if err = obj.InsertAfter(o.prevCreatedAt, value); err != nil { + return err + } root.RegisterElement(value) return nil diff --git a/pkg/document/operations/increase.go b/pkg/document/operations/increase.go index cff99aeea..5b3d126a6 100644 --- a/pkg/document/operations/increase.go +++ b/pkg/document/operations/increase.go @@ -51,7 +51,9 @@ func (o *Increase) Execute(root *crdt.Root) error { } value := o.value.(*crdt.Primitive) - cnt.Increase(value) + if _, err := cnt.Increase(value); err != nil { + return err + } return nil } diff --git a/pkg/document/operations/move.go b/pkg/document/operations/move.go index 265006292..5207b8ecf 100644 --- a/pkg/document/operations/move.go +++ b/pkg/document/operations/move.go @@ -60,9 +60,7 @@ func (o *Move) Execute(root *crdt.Root) error { return ErrNotApplicableDataType } - obj.MoveAfter(o.prevCreatedAt, o.createdAt, o.executedAt) - - return nil + return obj.MoveAfter(o.prevCreatedAt, o.createdAt, o.executedAt) } // CreatedAt returns the creation time of the target element. diff --git a/pkg/document/operations/remove.go b/pkg/document/operations/remove.go index dce83903c..4abf3cdcc 100644 --- a/pkg/document/operations/remove.go +++ b/pkg/document/operations/remove.go @@ -53,7 +53,10 @@ func (o *Remove) Execute(root *crdt.Root) error { switch parent := parentElem.(type) { case crdt.Container: - elem := parent.DeleteByCreatedAt(o.createdAt, o.executedAt) + elem, err := parent.DeleteByCreatedAt(o.createdAt, o.executedAt) + if err != nil { + return err + } if elem != nil { root.RegisterRemovedElementPair(parent, elem) } diff --git a/pkg/document/operations/select.go b/pkg/document/operations/select.go deleted file mode 100644 index 0825dcbd1..000000000 --- a/pkg/document/operations/select.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2020 The Yorkie Authors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package operations - -import ( - "github.com/yorkie-team/yorkie/pkg/document/crdt" - "github.com/yorkie-team/yorkie/pkg/document/time" -) - -// Select represents an operation that selects an area in the text. -type Select struct { - // parentCreatedAt is the creation time of the Text that executes Select. - parentCreatedAt *time.Ticket - - // from represents the start point of the selection. - from *crdt.RGATreeSplitNodePos - - // to represents the end point of the selection. - to *crdt.RGATreeSplitNodePos - - // executedAt is the time the operation was executed. - executedAt *time.Ticket -} - -// NewSelect creates a new instance of Select. -func NewSelect( - parentCreatedAt *time.Ticket, - from *crdt.RGATreeSplitNodePos, - to *crdt.RGATreeSplitNodePos, - executedAt *time.Ticket, -) *Select { - return &Select{ - parentCreatedAt: parentCreatedAt, - from: from, - to: to, - executedAt: executedAt, - } -} - -// Execute executes this operation on the given document(`root`). -func (s *Select) Execute(root *crdt.Root) error { - parent := root.FindByCreatedAt(s.parentCreatedAt) - - switch obj := parent.(type) { - case *crdt.Text: - obj.Select(s.from, s.to, s.executedAt) - default: - return ErrNotApplicableDataType - } - - return nil -} - -// From returns the start point of the selection. -func (s *Select) From() *crdt.RGATreeSplitNodePos { - return s.from -} - -// To returns the end point of the selection. -func (s *Select) To() *crdt.RGATreeSplitNodePos { - return s.to -} - -// ExecutedAt returns execution time of this operation. -func (s *Select) ExecutedAt() *time.Ticket { - return s.executedAt -} - -// SetActor sets the given actor to this operation. -func (s *Select) SetActor(actorID *time.ActorID) { - s.executedAt = s.executedAt.SetActorID(actorID) -} - -// ParentCreatedAt returns the creation time of the Text. -func (s *Select) ParentCreatedAt() *time.Ticket { - return s.parentCreatedAt -} diff --git a/pkg/document/operations/tree_edit.go b/pkg/document/operations/tree_edit.go index 845c02451..400ffd094 100644 --- a/pkg/document/operations/tree_edit.go +++ b/pkg/document/operations/tree_edit.go @@ -33,6 +33,10 @@ type TreeEdit struct { // toPos represents the end point of the editing range. to *crdt.TreePos + // latestCreatedAtMapByActor is a map that stores the latest creation time + // by actor for the nodes included in the editing range. + latestCreatedAtMapByActor map[string]*time.Ticket + // contents is the content of tree added when editing. contents []*crdt.TreeNode @@ -45,15 +49,17 @@ func NewTreeEdit( parentCreatedAt *time.Ticket, from *crdt.TreePos, to *crdt.TreePos, + latestCreatedAtMapByActor map[string]*time.Ticket, contents []*crdt.TreeNode, executedAt *time.Ticket, ) *TreeEdit { return &TreeEdit{ - parentCreatedAt: parentCreatedAt, - from: from, - to: to, - contents: contents, - executedAt: executedAt, + parentCreatedAt: parentCreatedAt, + from: from, + to: to, + latestCreatedAtMapByActor: latestCreatedAtMapByActor, + contents: contents, + executedAt: executedAt, } } @@ -78,11 +84,11 @@ func (e *TreeEdit) Execute(root *crdt.Root) error { } } - if err = obj.Edit(e.from, e.to, contents, e.executedAt); err != nil { + if _, err = obj.Edit(e.from, e.to, e.latestCreatedAtMapByActor, contents, e.executedAt); err != nil { return err } - if e.from.CreatedAt.Compare(e.to.CreatedAt) != 0 || e.from.Offset != e.to.Offset { + if !e.from.Equals(e.to) { root.RegisterElementHasRemovedNodes(obj) } default: @@ -121,3 +127,9 @@ func (e *TreeEdit) ParentCreatedAt() *time.Ticket { func (e *TreeEdit) Contents() []*crdt.TreeNode { return e.contents } + +// CreatedAtMapByActor returns the map that stores the latest creation time +// by actor for the nodes included in the editing range. +func (e *TreeEdit) CreatedAtMapByActor() map[string]*time.Ticket { + return e.latestCreatedAtMapByActor +} diff --git a/pkg/index/tree.go b/pkg/index/tree.go index d0b29ab63..2f10154df 100644 --- a/pkg/index/tree.go +++ b/pkg/index/tree.go @@ -62,6 +62,18 @@ import ( * * In this case, index of TreePos(p, 0) is 0, index of TreePos(p, 1) is 2. * Index 1 can be converted to TreePos(i, 0). + * + * `path` of crdt.IndexTree represents a position like `index` in crdt.IndexTree. + * It contains offsets of each node from the root node as elements except the last. + * The last element of the path represents the position in the parent node. + * + * Let's say we have a tree like this: + * 0 1 2 + *

a b c d

+ * + * The path of the position between 'c' and 'd' is [1, 1]. The first element of the + * path is the offset of the in

and the second element represents the position + * between 'c' and 'd' in . */ var ( @@ -101,31 +113,43 @@ func postorderTraversal[V Value](node *Node[V], callback func(node *Node[V], dep return } - for _, child := range node.Children() { + for _, child := range node.Children(true) { postorderTraversal(child, callback, depth+1) } callback(node, depth) } -// postorderTraversalAll traverses the whole tree (include tombstones) with postorder traversal. -func postorderTraversalAll[V Value](node *Node[V], callback func(node *Node[V], depth int) error, depth int) error { - if node == nil { - return nil - } +// TagContained represents whether the opening or closing tag of a element is selected. +type TagContained int - for _, child := range node.children { - if err := postorderTraversalAll(child, callback, depth+1); err != nil { - return err - } - } +const ( + // AllContained represents that both opening and closing tag of a element are selected. + AllContained TagContained = 1 + iota + // OpeningContained represents that only the opening tag is selected. + OpeningContained + // ClosingContained represents that only the closing tag is selected. + ClosingContained +) - return callback(node, depth) +// ToString returns the string of TagContain. +func (c TagContained) ToString() string { + var str string + switch c { + case AllContained: + str = "All" + case OpeningContained: + str = "Opening" + case ClosingContained: + str = "Closing" + } + return str } // nodesBetween iterates the nodes between the given range. // If the given range is collapsed, the callback is not called. // It traverses the tree with postorder traversal. -func nodesBetween[V Value](root *Node[V], from, to int, callback func(node V)) error { +// NOTE(sejongk): Nodes should not be removed in callback, because it leads wrong behaviors. +func nodesBetween[V Value](root *Node[V], from, to int, callback func(node V, contain TagContained)) error { if from > to { return fmt.Errorf("from cannot be greater than to %d > %d", from, to) } @@ -153,6 +177,7 @@ func nodesBetween[V Value](root *Node[V], from, to int, callback func(node V)) e if child.IsText() { toChild = to - pos } + if err := nodesBetween( child, int(math.Max(0, float64(fromChild))), @@ -163,7 +188,15 @@ func nodesBetween[V Value](root *Node[V], from, to int, callback func(node V)) e } if fromChild < 0 || toChild > child.Length || child.IsText() { - callback(child.Value) + var contain TagContained + if (fromChild < 0 && toChild > child.Length) || child.IsText() { + contain = AllContained + } else if fromChild < 0 { + contain = OpeningContained + } else { + contain = ClosingContained + } + callback(child.Value, contain) } } pos += child.PaddedLength() @@ -193,11 +226,6 @@ func Traverse[V Value](tree *Tree[V], callback func(node *Node[V], depth int)) { postorderTraversal(tree.root, callback, 0) } -// TraverseAll traverses the whole tree (include tombstones) with postorder traversal. -func TraverseAll[V Value](tree *Tree[V], callback func(node *Node[V], depth int) error) error { - return postorderTraversalAll(tree.root, callback, 0) -} - // Value represents the data stored in the nodes of Tree. type Value interface { IsRemoved() bool @@ -344,7 +372,7 @@ func (n *Node[V]) InsertAfterInternal(newNode, prevNode *Node[V]) error { // nextSibling returns the next sibling of the node. func (n *Node[V]) nextSibling() (*Node[V], error) { - offset, err := n.Parent.findOffset(n) + offset, err := n.Parent.FindOffset(n) if err != nil { return nil, err } @@ -362,19 +390,25 @@ func (n *Node[V]) nextSibling() (*Node[V], error) { return nil, nil } -// findOffset returns the offset of the given node in the children. -func (n *Node[V]) findOffset(node *Node[V]) (int, error) { +// FindOffset returns the offset of the given node in the children. +func (n *Node[V]) FindOffset(node *Node[V]) (int, error) { if n.IsText() { return 0, ErrInvalidMethodCallForTextNode } - for i, child := range n.Children() { + // If nodes are removed, the offset of the removed node is the number of + // nodes before the node excluding the removed nodes. + offset := 0 + for _, child := range n.Children(true) { if child == node { - return i, nil + return offset, nil + } + if !child.Value.IsRemoved() { + offset++ } } - return -1, nil + return -1, ErrChildNotFound } // IsAncestorOf returns true if the node is an ancestor of the given node. @@ -462,7 +496,10 @@ func (n *Node[V]) Prepend(children ...*Node[V]) error { n.children = append(children, n.children...) for _, node := range children { node.Parent = n - node.UpdateAncestorsSize() + + if !node.Value.IsRemoved() { + node.UpdateAncestorsSize() + } } return nil @@ -555,7 +592,7 @@ func (n *Node[V]) OffsetOfChild(node *Node[V]) int { } // NodesBetween returns the nodes between the given range. -func (t *Tree[V]) NodesBetween(from int, to int, callback func(node V)) error { +func (t *Tree[V]) NodesBetween(from int, to int, callback func(node V, contain TagContained)) error { return nodesBetween(t.root, from, to, callback) } @@ -660,9 +697,9 @@ func (t *Tree[V]) TreePosToPath(treePos *TreePos[V]) ([]int, error) { return nil, ErrInvalidTreePos } - leftSiblingsSize := 0 - for _, child := range node.Parent.Children()[:offset] { - leftSiblingsSize += child.Length + leftSiblingsSize, err := t.LeftSiblingsSize(node.Parent, offset) + if err != nil { + return nil, err } node = node.Parent @@ -698,6 +735,20 @@ func (t *Tree[V]) TreePosToPath(treePos *TreePos[V]) ([]int, error) { return reversePath, nil } +// LeftSiblingsSize returns the size of left siblings of the given node +func (t *Tree[V]) LeftSiblingsSize(parent *Node[V], offset int) (int, error) { + leftSiblingsSize := 0 + children := parent.Children() + for i := 0; i < offset; i++ { + if children[i] == nil || children[i].Value.IsRemoved() { + continue + } + leftSiblingsSize += children[i].PaddedLength() + } + + return leftSiblingsSize, nil +} + // PathToTreePos returns treePos from given path func (t *Tree[V]) PathToTreePos(path []int) (*TreePos[V], error) { if len(path) == 0 { @@ -727,6 +778,21 @@ func (t *Tree[V]) PathToTreePos(path []int) (*TreePos[V], error) { }, nil } +// PathToIndex converts the given path to index. +func (t *Tree[V]) PathToIndex(path []int) (int, error) { + treePos, err := t.PathToTreePos(path) + if err != nil { + return -1, err + } + + idx, err := t.IndexOf(treePos) + if err != nil { + return 0, err + } + + return idx, nil +} + // findTextPos returns the tree position of the given path element. func findTextPos[V Value](node *Node[V], pathElement int) (*TreePos[V], error) { if node.Length < pathElement { @@ -822,35 +888,53 @@ func (t *Tree[V]) FindLeftmost(node *Node[V]) V { return t.FindLeftmost(node.Children()[0]) } -// IndexOf returns the index of the given node. -func (t *Tree[V]) IndexOf(node *Node[V]) (int, error) { - index := 0 - current := node +// IndexOf returns the index of the given tree position. +func (t *Tree[V]) IndexOf(pos *TreePos[V]) (int, error) { + node, offset := pos.Node, pos.Offset - for current != t.root { - parent := current.Parent - if parent == nil { - return 0, errors.New("parent is not found") + size := 0 + depth := 1 + + if node.IsText() { + size += offset + + parent := node.Parent + offsetOfNode, err := parent.FindOffset(node) + if err != nil { + return 0, err } - offset, err := parent.findOffset(current) + leftSiblingsSize, err := t.LeftSiblingsSize(parent, offsetOfNode) if err != nil { return 0, err } + size += leftSiblingsSize - childrenSlice := parent.Children()[:offset] - for _, previous := range childrenSlice { - index += previous.PaddedLength() + node = node.Parent + } else { + leftSiblingsSize, err := t.LeftSiblingsSize(node, offset) + if err != nil { + return 0, err } + size += leftSiblingsSize + } - // If this step escape from element node, we should add 1 to the index, - // because the element node has open tag. - if current != t.root && current != node && !current.IsText() { - index++ + for node.Parent != nil { + parent := node.Parent + offsetOfNode, err := parent.FindOffset(node) + if err != nil { + return 0, err + } + + leftSiblingsSize, err := t.LeftSiblingsSize(parent, offsetOfNode) + if err != nil { + return 0, err } - current = parent + size += leftSiblingsSize + depth++ + node = node.Parent } - return index, nil + return size + depth - 1, nil } diff --git a/pkg/index/tree_test.go b/pkg/index/tree_test.go index d2d87614c..e1e3b0d80 100644 --- a/pkg/index/tree_test.go +++ b/pkg/index/tree_test.go @@ -83,7 +83,7 @@ func TestIndexTree(t *testing.T) { }, }) - // postorder traversal: "ab", , "cd",

, + // postorder traversal: "ab",

, "cd",

, treePos, posErr := tree.FindTreePos(0) assert.NoError(t, posErr) @@ -166,12 +166,15 @@ func TestIndexTree(t *testing.T) { {Type: "p", Children: []json.TreeNode{{Type: "text", Value: "fg"}}}, }, }) - - helper.NodesBetweenEqual(t, tree, 2, 11, []string{"text.b", "p", "text.cde", "p", "text.fg", "p"}) - helper.NodesBetweenEqual(t, tree, 2, 6, []string{"text.b", "p", "text.cde", "p"}) - helper.NodesBetweenEqual(t, tree, 0, 1, []string{"p"}) - helper.NodesBetweenEqual(t, tree, 3, 4, []string{"p"}) - helper.NodesBetweenEqual(t, tree, 3, 5, []string{"p", "p"}) + // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 + // `

a b

c d e

f g

` + + helper.NodesBetweenEqual(t, tree, 2, 11, []string{"text.b:All", "p:Closing", + "text.cde:All", "p:All", "text.fg:All", "p:Opening"}) + helper.NodesBetweenEqual(t, tree, 2, 6, []string{"text.b:All", "p:Closing", "text.cde:All", "p:Opening"}) + helper.NodesBetweenEqual(t, tree, 0, 1, []string{"p:Opening"}) + helper.NodesBetweenEqual(t, tree, 3, 4, []string{"p:Closing"}) + helper.NodesBetweenEqual(t, tree, 3, 5, []string{"p:Closing", "p:Opening"}) }) t.Run("find index of the given node test", func(t *testing.T) { @@ -194,7 +197,7 @@ func TestIndexTree(t *testing.T) { assert.NoError(t, posErr) assert.Equal(t, "root", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 0, pos.Offset) - index, indexErr := tree.IndexOf(pos.Node) + index, indexErr := tree.IndexOf(pos) assert.NoError(t, indexErr) assert.Equal(t, 0, index) @@ -202,7 +205,7 @@ func TestIndexTree(t *testing.T) { assert.NoError(t, posErr) assert.Equal(t, "text.a", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 0, pos.Offset) - index, indexErr = tree.IndexOf(pos.Node) + index, indexErr = tree.IndexOf(pos) assert.NoError(t, indexErr) assert.Equal(t, 1, index) @@ -210,48 +213,28 @@ func TestIndexTree(t *testing.T) { assert.NoError(t, posErr) assert.Equal(t, "text.b", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 1, pos.Offset) - index, indexErr = tree.IndexOf(pos.Node) + index, indexErr = tree.IndexOf(pos) assert.NoError(t, indexErr) - assert.Equal(t, 2, index) + assert.Equal(t, 3, index) pos, posErr = tree.FindTreePos(4, true) assert.NoError(t, posErr) assert.Equal(t, "root", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 1, pos.Offset) - index, indexErr = tree.IndexOf(pos.Node) + index, indexErr = tree.IndexOf(pos) assert.NoError(t, indexErr) - assert.Equal(t, 0, index) + assert.Equal(t, 4, index) pos, posErr = tree.FindTreePos(10, true) assert.NoError(t, posErr) assert.Equal(t, "text.fg", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 0, pos.Offset) - index, indexErr = tree.IndexOf(pos.Node) + index, indexErr = tree.IndexOf(pos) assert.NoError(t, indexErr) assert.Equal(t, 10, index) - - firstP := tree.Root().Children()[0] - index, indexErr = tree.IndexOf(firstP) - assert.NoError(t, indexErr) - assert.Equal(t, "p", helper.ToDiagnostic(firstP.Value)) - assert.Equal(t, 0, index) - - secondP := tree.Root().Children()[1] - index, indexErr = tree.IndexOf(secondP) - assert.NoError(t, indexErr) - assert.Equal(t, "p", helper.ToDiagnostic(secondP.Value)) - assert.Equal(t, 4, index) - - thirdP := tree.Root().Children()[2] - index, indexErr = tree.IndexOf(thirdP) - assert.NoError(t, indexErr) - assert.Equal(t, "p", helper.ToDiagnostic(thirdP.Value)) - assert.Equal(t, 9, index) }) t.Run("find treePos from given path test", func(t *testing.T) { - t.Skip("TODO(hackerwins): remove skip") - // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 //

a b

c d e

f g

tree := helper.BuildIndexTree(&json.TreeNode{ @@ -274,83 +257,68 @@ func TestIndexTree(t *testing.T) { pos, err = tree.PathToTreePos([]int{0, 0}) assert.NoError(t, err) - assert.Equal(t, "p", helper.ToDiagnostic(pos.Node.Value)) - assert.Equal(t, 0, pos.Offset) - - pos, err = tree.PathToTreePos([]int{0, 0, 0}) - assert.NoError(t, err) assert.Equal(t, "text.a", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 0, pos.Offset) - pos, err = tree.PathToTreePos([]int{0, 0, 1}) + pos, err = tree.PathToTreePos([]int{0, 1}) assert.NoError(t, err) assert.Equal(t, "text.a", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 1, pos.Offset) - pos, err = tree.PathToTreePos([]int{0, 1, 0}) - assert.NoError(t, err) - assert.Equal(t, "text.b", helper.ToDiagnostic(pos.Node.Value)) - assert.Equal(t, 0, pos.Offset) - - pos, err = tree.PathToTreePos([]int{0, 1, 1}) + pos, err = tree.PathToTreePos([]int{0, 2}) assert.NoError(t, err) assert.Equal(t, "text.b", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 1, pos.Offset) pos, err = tree.PathToTreePos([]int{1}) assert.NoError(t, err) - assert.Equal(t, "p", helper.ToDiagnostic(pos.Node.Value)) - assert.Equal(t, 0, pos.Offset) + assert.Equal(t, "root", helper.ToDiagnostic(pos.Node.Value)) + assert.Equal(t, 1, pos.Offset) pos, err = tree.PathToTreePos([]int{1, 0}) assert.NoError(t, err) - assert.Equal(t, "p", helper.ToDiagnostic(pos.Node.Value)) - assert.Equal(t, 0, pos.Offset) - - pos, err = tree.PathToTreePos([]int{1, 0, 0}) - assert.NoError(t, err) assert.Equal(t, "text.cde", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 0, pos.Offset) - pos, err = tree.PathToTreePos([]int{1, 0, 1}) + pos, err = tree.PathToTreePos([]int{1, 1}) assert.NoError(t, err) assert.Equal(t, "text.cde", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 1, pos.Offset) - pos, err = tree.PathToTreePos([]int{1, 0, 2}) + pos, err = tree.PathToTreePos([]int{1, 2}) assert.NoError(t, err) assert.Equal(t, "text.cde", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 2, pos.Offset) - pos, err = tree.PathToTreePos([]int{1, 0, 3}) + pos, err = tree.PathToTreePos([]int{1, 3}) assert.NoError(t, err) assert.Equal(t, "text.cde", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 3, pos.Offset) pos, err = tree.PathToTreePos([]int{2}) assert.NoError(t, err) - assert.Equal(t, "p", helper.ToDiagnostic(pos.Node.Value)) - assert.Equal(t, 1, pos.Offset) + assert.Equal(t, "root", helper.ToDiagnostic(pos.Node.Value)) + assert.Equal(t, 2, pos.Offset) - pos, err = tree.PathToTreePos([]int{2, 0, 0}) + pos, err = tree.PathToTreePos([]int{2, 0}) assert.NoError(t, err) assert.Equal(t, "text.fg", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 0, pos.Offset) - pos, err = tree.PathToTreePos([]int{2, 0, 1}) + pos, err = tree.PathToTreePos([]int{2, 1}) assert.NoError(t, err) assert.Equal(t, "text.fg", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 1, pos.Offset) - pos, err = tree.PathToTreePos([]int{2, 0, 2}) + pos, err = tree.PathToTreePos([]int{2, 2}) assert.NoError(t, err) assert.Equal(t, "text.fg", helper.ToDiagnostic(pos.Node.Value)) assert.Equal(t, 2, pos.Offset) pos, err = tree.PathToTreePos([]int{3}) assert.NoError(t, err) - assert.Equal(t, "p", helper.ToDiagnostic(pos.Node.Value)) - assert.Equal(t, 2, pos.Offset) + assert.Equal(t, "root", helper.ToDiagnostic(pos.Node.Value)) + assert.Equal(t, 3, pos.Offset) }) t.Run("find path from given treePos test", func(t *testing.T) { diff --git a/server/backend/backend.go b/server/backend/backend.go index 19c5b19b3..55eeac5e9 100644 --- a/server/backend/backend.go +++ b/server/backend/backend.go @@ -163,7 +163,7 @@ func (b *Backend) Shutdown() error { logging.DefaultLogger().Error(err) } - logging.DefaultLogger().Infof("backend stoped: id: %s", b.serverInfo.ID) + logging.DefaultLogger().Infof("backend stopped: id: %s", b.serverInfo.ID) return nil } diff --git a/server/backend/config.go b/server/backend/config.go index 038b861c9..1201b4faf 100644 --- a/server/backend/config.go +++ b/server/backend/config.go @@ -70,6 +70,12 @@ type Config struct { // AuthWebhookCacheUnauthTTL is the TTL value to set when caching the unauthorized result. AuthWebhookCacheUnauthTTL string `yaml:"AuthWebhookCacheUnauthTTL"` + // ProjectInfoCacheSize is the cache size of the project info. + ProjectInfoCacheSize int `yaml:"ProjectInfoCacheSize"` + + // ProjectInfoCacheTTL is the TTL value to set when caching the project info. + ProjectInfoCacheTTL string `yaml:"ProjectInfoCacheTTL"` + // Hostname is yorkie server hostname. hostname is used by metrics. Hostname string `yaml:"Hostname"` } @@ -108,6 +114,14 @@ func (c *Config) Validate() error { ) } + if _, err := time.ParseDuration(c.ProjectInfoCacheTTL); err != nil { + return fmt.Errorf( + `invalid argument "%s" for "--project-info-cache-ttl" flag: %w`, + c.ProjectInfoCacheTTL, + err, + ) + } + return nil } @@ -154,3 +168,14 @@ func (c *Config) ParseAuthWebhookCacheUnauthTTL() time.Duration { return result } + +// ParseProjectInfoCacheTTL returns TTL for project info cache. +func (c *Config) ParseProjectInfoCacheTTL() time.Duration { + result, err := time.ParseDuration(c.ProjectInfoCacheTTL) + if err != nil { + fmt.Fprintln(os.Stderr, "parse project info cache ttl: %w", err) + os.Exit(1) + } + + return result +} diff --git a/server/backend/config_test.go b/server/backend/config_test.go index cbe447458..0a334212c 100644 --- a/server/backend/config_test.go +++ b/server/backend/config_test.go @@ -31,6 +31,7 @@ func TestConfig(t *testing.T) { AuthWebhookMaxWaitInterval: "0ms", AuthWebhookCacheAuthTTL: "10s", AuthWebhookCacheUnauthTTL: "10s", + ProjectInfoCacheTTL: "10m", } assert.NoError(t, validConf.Validate()) @@ -49,5 +50,9 @@ func TestConfig(t *testing.T) { conf4 := validConf conf4.AuthWebhookCacheUnauthTTL = "s" assert.Error(t, conf4.Validate()) + + conf5 := validConf + conf5.ProjectInfoCacheTTL = "10 minutes" + assert.Error(t, conf5.Validate()) }) } diff --git a/server/backend/database/database.go b/server/backend/database/database.go index 2d4922fee..be0118e2f 100644 --- a/server/backend/database/database.go +++ b/server/backend/database/database.go @@ -47,8 +47,8 @@ var ( // ErrDocumentNotFound is returned when the document could not be found. ErrDocumentNotFound = errors.New("document not found") - // ErrClientDocNotFound is returned when mapping between client and document could not be found. - ErrClientDocNotFound = errors.New("client document not found") + // ErrSnapshotNotFound is returned when the snapshot could not be found. + ErrSnapshotNotFound = errors.New("snapshot not found") // ErrConflictOnUpdate is returned when a conflict occurs during update. ErrConflictOnUpdate = errors.New("conflict on update") @@ -138,7 +138,9 @@ type Database interface { FindDeactivateCandidates( ctx context.Context, candidatesLimitPerProject int, - ) ([]*ClientInfo, error) + projectFetchSize int, + lastProjectID types.ID, + ) (types.ID, []*ClientInfo, error) // FindDocInfoByKey finds the document of the given key. FindDocInfoByKey( @@ -208,8 +210,16 @@ type Database interface { // CreateSnapshotInfo stores the snapshot of the given document. CreateSnapshotInfo(ctx context.Context, docID types.ID, doc *document.InternalDocument) error + // FindSnapshotInfoByID returns the snapshot by the given id. + FindSnapshotInfoByID(ctx context.Context, id types.ID) (*SnapshotInfo, error) + // FindClosestSnapshotInfo finds the closest snapshot info in a given serverSeq. - FindClosestSnapshotInfo(ctx context.Context, docID types.ID, serverSeq int64) (*SnapshotInfo, error) + FindClosestSnapshotInfo( + ctx context.Context, + docID types.ID, + serverSeq int64, + includeSnapshot bool, + ) (*SnapshotInfo, error) // FindMinSyncedSeqInfo finds the minimum synced sequence info. FindMinSyncedSeqInfo(ctx context.Context, docID types.ID) (*SyncedSeqInfo, error) diff --git a/server/backend/database/memory/database.go b/server/backend/database/memory/database.go index 93882c313..aa46dcfef 100644 --- a/server/backend/database/memory/database.go +++ b/server/backend/database/memory/database.go @@ -224,27 +224,38 @@ func (d *DB) CreateProjectInfo( return info, nil } -// ListAllProjectInfos returns all project infos. -func (d *DB) listAllProjectInfos( +// listProjectInfos returns all project infos rotationally. +func (d *DB) listProjectInfos( ctx context.Context, + pageSize int, + housekeepingLastProjectID types.ID, ) ([]*database.ProjectInfo, error) { txn := d.db.Txn(false) defer txn.Abort() - // TODO(krapie): txn.Get() loads all projects in memory, - // which will cause performance issue as number of projects in DB grows. - // Therefore, pagination of projects is needed to avoid this issue. - iter, err := txn.Get( + iter, err := txn.LowerBound( tblProjects, "id", + housekeepingLastProjectID.String(), ) if err != nil { - return nil, fmt.Errorf("fetch all projects: %w", err) + return nil, fmt.Errorf("fetch projects: %w", err) } var infos []*database.ProjectInfo - for raw := iter.Next(); raw != nil; raw = iter.Next() { + + for i := 0; i < pageSize; i++ { + raw := iter.Next() + if raw == nil { + break + } info := raw.(*database.ProjectInfo).DeepCopy() + + if i == 0 && info.ID == housekeepingLastProjectID { + pageSize++ + continue + } + infos = append(infos, info) } @@ -599,23 +610,32 @@ func (d *DB) findDeactivateCandidatesPerProject( func (d *DB) FindDeactivateCandidates( ctx context.Context, candidatesLimitPerProject int, -) ([]*database.ClientInfo, error) { - projects, err := d.listAllProjectInfos(ctx) + projectFetchSize int, + lastProjectID types.ID, +) (types.ID, []*database.ClientInfo, error) { + projects, err := d.listProjectInfos(ctx, projectFetchSize, lastProjectID) if err != nil { - return nil, err + return database.DefaultProjectID, nil, err } var candidates []*database.ClientInfo for _, project := range projects { infos, err := d.findDeactivateCandidatesPerProject(ctx, project, candidatesLimitPerProject) if err != nil { - return nil, err + return database.DefaultProjectID, nil, err } candidates = append(candidates, infos...) } - return candidates, nil + var topProjectID types.ID + if len(projects) < projectFetchSize { + topProjectID = database.DefaultProjectID + } else { + topProjectID = projects[len(projects)-1].ID + } + + return topProjectID, candidates, nil } // FindDocInfoByKeyAndOwner finds the document of the given key. If the @@ -947,7 +967,7 @@ func (d *DB) CreateSnapshotInfo( docID types.ID, doc *document.InternalDocument, ) error { - snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.Presences()) + snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.AllPresences()) if err != nil { return err } @@ -969,11 +989,27 @@ func (d *DB) CreateSnapshotInfo( return nil } +// FindSnapshotInfoByID returns the snapshot by the given id. +func (d *DB) FindSnapshotInfoByID(ctx context.Context, id types.ID) (*database.SnapshotInfo, error) { + txn := d.db.Txn(false) + defer txn.Abort() + raw, err := txn.First(tblSnapshots, "id", id.String()) + if err != nil { + return nil, fmt.Errorf("find snapshot by id: %w", err) + } + if raw == nil { + return nil, fmt.Errorf("%s: %w", id, database.ErrSnapshotNotFound) + } + + return raw.(*database.SnapshotInfo).DeepCopy(), nil +} + // FindClosestSnapshotInfo finds the last snapshot of the given document. func (d *DB) FindClosestSnapshotInfo( ctx context.Context, docID types.ID, serverSeq int64, + includeSnapshot bool, ) (*database.SnapshotInfo, error) { txn := d.db.Txn(false) defer txn.Abort() @@ -992,7 +1028,16 @@ func (d *DB) FindClosestSnapshotInfo( for raw := iterator.Next(); raw != nil; raw = iterator.Next() { info := raw.(*database.SnapshotInfo) if info.DocID == docID { - snapshotInfo = info + snapshotInfo = &database.SnapshotInfo{ + ID: info.ID, + DocID: info.DocID, + ServerSeq: info.ServerSeq, + Lamport: info.Lamport, + CreatedAt: info.CreatedAt, + } + if includeSnapshot { + snapshotInfo.Snapshot = info.Snapshot + } break } } diff --git a/server/backend/database/memory/housekeeping_test.go b/server/backend/database/memory/housekeeping_test.go index f71a91943..5c1aeaeb0 100644 --- a/server/backend/database/memory/housekeeping_test.go +++ b/server/backend/database/memory/housekeeping_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/assert" monkey "github.com/undefinedlabs/go-mpatch" + "github.com/yorkie-team/yorkie/server/backend/database" "github.com/yorkie-team/yorkie/server/backend/database/memory" ) @@ -39,7 +40,10 @@ func TestHousekeeping(t *testing.T) { ctx := context.Background() clientDeactivateThreshold := "23h" - _, project, err := memdb.EnsureDefaultUserAndProject(ctx, "test", "test", clientDeactivateThreshold) + + userInfo, err := memdb.CreateUserInfo(ctx, "test", "test") + assert.NoError(t, err) + project, err := memdb.CreateProjectInfo(ctx, database.DefaultProjectName, userInfo.ID, clientDeactivateThreshold) assert.NoError(t, err) yesterday := gotime.Now().Add(-24 * gotime.Hour) @@ -59,9 +63,11 @@ func TestHousekeeping(t *testing.T) { clientC, err := memdb.ActivateClient(ctx, project.ID, fmt.Sprintf("%s-C", t.Name())) assert.NoError(t, err) - candidates, err := memdb.FindDeactivateCandidates( + _, candidates, err := memdb.FindDeactivateCandidates( ctx, 10, + 10, + database.DefaultProjectID, ) assert.NoError(t, err) assert.Len(t, candidates, 2) @@ -69,4 +75,59 @@ func TestHousekeeping(t *testing.T) { assert.Contains(t, candidates, clientB) assert.NotContains(t, candidates, clientC) }) + + t.Run("housekeeping pagination test", func(t *testing.T) { + ctx := context.Background() + memdb, projects := createDBandProjects(t) + + fetchSize := 4 + lastProjectID, _, err := memdb.FindDeactivateCandidates( + ctx, + 0, + fetchSize, + database.DefaultProjectID, + ) + assert.NoError(t, err) + assert.Equal(t, projects[fetchSize-1].ID, lastProjectID) + + lastProjectID, _, err = memdb.FindDeactivateCandidates( + ctx, + 0, + fetchSize, + lastProjectID, + ) + assert.NoError(t, err) + assert.Equal(t, projects[fetchSize*2-1].ID, lastProjectID) + + lastProjectID, _, err = memdb.FindDeactivateCandidates( + ctx, + 0, + fetchSize, + lastProjectID, + ) + assert.NoError(t, err) + assert.Equal(t, database.DefaultProjectID, lastProjectID) + }) +} + +func createDBandProjects(t *testing.T) (*memory.DB, []*database.ProjectInfo) { + t.Helper() + + ctx := context.Background() + memdb, err := memory.New() + assert.NoError(t, err) + + clientDeactivateThreshold := "23h" + userInfo, err := memdb.CreateUserInfo(ctx, "test", "test") + assert.NoError(t, err) + + projects := make([]*database.ProjectInfo, 0) + for i := 0; i < 10; i++ { + p, err := memdb.CreateProjectInfo(ctx, fmt.Sprintf("%d project", i), userInfo.ID, clientDeactivateThreshold) + assert.NoError(t, err) + + projects = append(projects, p) + } + + return memdb, projects } diff --git a/server/backend/database/mongo/client.go b/server/backend/database/mongo/client.go index bdcdc460a..65c319ef8 100644 --- a/server/backend/database/mongo/client.go +++ b/server/backend/database/mongo/client.go @@ -235,21 +235,32 @@ func (c *Client) CreateProjectInfo( return info, nil } -// ListAllProjectInfos returns all project infos. -func (c *Client) listAllProjectInfos( +// listProjectInfos returns all project infos rotationally. +func (c *Client) listProjectInfos( ctx context.Context, + pageSize int, + housekeepingLastProjectID types.ID, ) ([]*database.ProjectInfo, error) { - // TODO(krapie): Find(ctx, bson.D{{}}) loads all projects in memory, - // which will cause performance issue as number of projects in DB grows. - // Therefore, pagination of projects is needed to avoid this issue. - cursor, err := c.collection(colProjects).Find(ctx, bson.D{{}}) + encodedID, err := encodeID(housekeepingLastProjectID) + if err != nil { + return nil, err + } + + opts := options.Find() + opts.SetLimit(int64(pageSize)) + + cursor, err := c.collection(colProjects).Find(ctx, bson.M{ + "_id": bson.M{ + "$gt": encodedID, + }, + }, opts) if err != nil { - return nil, fmt.Errorf("fetch all project infos: %w", err) + return nil, fmt.Errorf("find project infos: %w", err) } var infos []*database.ProjectInfo if err := cursor.All(ctx, &infos); err != nil { - return nil, fmt.Errorf("fetch all project infos: %w", err) + return nil, fmt.Errorf("fetch project infos: %w", err) } return infos, nil @@ -657,23 +668,31 @@ func (c *Client) findDeactivateCandidatesPerProject( func (c *Client) FindDeactivateCandidates( ctx context.Context, candidatesLimitPerProject int, -) ([]*database.ClientInfo, error) { - projects, err := c.listAllProjectInfos(ctx) + projectFetchSize int, + lastProjectID types.ID, +) (types.ID, []*database.ClientInfo, error) { + projects, err := c.listProjectInfos(ctx, projectFetchSize, lastProjectID) if err != nil { - return nil, err + return database.DefaultProjectID, nil, err } var candidates []*database.ClientInfo for _, project := range projects { clientInfos, err := c.findDeactivateCandidatesPerProject(ctx, project, candidatesLimitPerProject) if err != nil { - return nil, err + return database.DefaultProjectID, nil, err } candidates = append(candidates, clientInfos...) } - return candidates, nil + var topProjectID types.ID + if len(projects) < projectFetchSize { + topProjectID = database.DefaultProjectID + } else { + topProjectID = projects[len(projects)-1].ID + } + return topProjectID, candidates, nil } // FindDocInfoByKeyAndOwner finds the document of the given key. If the @@ -1031,7 +1050,7 @@ func (c *Client) CreateSnapshotInfo( if err != nil { return err } - snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.Presences()) + snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.AllPresences()) if err != nil { return err } @@ -1049,25 +1068,61 @@ func (c *Client) CreateSnapshotInfo( return nil } +// FindSnapshotInfoByID returns the snapshot by the given id. +func (c *Client) FindSnapshotInfoByID( + ctx context.Context, + id types.ID, +) (*database.SnapshotInfo, error) { + encodedID, err := encodeID(id) + if err != nil { + return nil, err + } + + result := c.collection(colSnapshots).FindOne(ctx, bson.M{ + "_id": encodedID, + }) + + snapshotInfo := &database.SnapshotInfo{} + if result.Err() == mongo.ErrNoDocuments { + return snapshotInfo, nil + } + if result.Err() != nil { + return nil, fmt.Errorf("find snapshot: %w", result.Err()) + } + + if err := result.Decode(snapshotInfo); err != nil { + return nil, fmt.Errorf("decode snapshot: %w", err) + } + + return snapshotInfo, nil +} + // FindClosestSnapshotInfo finds the last snapshot of the given document. func (c *Client) FindClosestSnapshotInfo( ctx context.Context, docID types.ID, serverSeq int64, + includeSnapshot bool, ) (*database.SnapshotInfo, error) { encodedDocID, err := encodeID(docID) if err != nil { return nil, err } + option := options.FindOne().SetSort(bson.M{ + "server_seq": -1, + }) + + if !includeSnapshot { + option.SetProjection(bson.M{"Snapshot": 0}) + } + result := c.collection(colSnapshots).FindOne(ctx, bson.M{ "doc_id": encodedDocID, "server_seq": bson.M{ "$lte": serverSeq, }, - }, options.FindOne().SetSort(bson.M{ - "server_seq": -1, - })) + }, option) snapshotInfo := &database.SnapshotInfo{} if result.Err() == mongo.ErrNoDocuments { diff --git a/server/backend/database/mongo/client_test.go b/server/backend/database/mongo/client_test.go index ab1719d8d..9eaf04034 100644 --- a/server/backend/database/mongo/client_test.go +++ b/server/backend/database/mongo/client_test.go @@ -56,7 +56,7 @@ func TestClient(t *testing.T) { }) t.Run("RunFindDocInfosByQuery test", func(t *testing.T) { - t.Skip("TODO: the order of docInfos is different with memDB") + t.Skip("TODO(hackerwins): the order of docInfos is different with memDB") testcases.RunFindDocInfosByQueryTest(t, cli, projectOneID) }) @@ -69,7 +69,7 @@ func TestClient(t *testing.T) { }) t.Run("ListUserInfos test", func(t *testing.T) { - t.Skip("TODO: time is returned as Local") + t.Skip("TODO(hackerwins): time is returned as Local") testcases.RunListUserInfosTest(t, cli) }) @@ -100,4 +100,8 @@ func TestClient(t *testing.T) { t.Run("IsDocumentAttached test", func(t *testing.T) { testcases.RunIsDocumentAttachedTest(t, cli, dummyProjectID) }) + + t.Run("FindDeactivateCandidates test", func(t *testing.T) { + testcases.RunFindDeactivateCandidates(t, cli) + }) } diff --git a/server/backend/database/project_info.go b/server/backend/database/project_info.go index 2be73d0d6..8c5fe7eca 100644 --- a/server/backend/database/project_info.go +++ b/server/backend/database/project_info.go @@ -81,22 +81,6 @@ func NewProjectInfo(name string, owner types.ID, clientDeactivateThreshold strin } } -// ToProjectInfo converts the given types.Project to ProjectInfo. -func ToProjectInfo(project *types.Project) *ProjectInfo { - return &ProjectInfo{ - ID: project.ID, - Name: project.Name, - Owner: project.Owner, - PublicKey: project.PublicKey, - SecretKey: project.SecretKey, - AuthWebhookURL: project.AuthWebhookURL, - AuthWebhookMethods: project.AuthWebhookMethods, - ClientDeactivateThreshold: project.ClientDeactivateThreshold, - CreatedAt: project.CreatedAt, - UpdatedAt: project.UpdatedAt, - } -} - // DeepCopy returns a deep copy of the ProjectInfo. func (i *ProjectInfo) DeepCopy() *ProjectInfo { if i == nil { diff --git a/server/backend/database/snapshot_info.go b/server/backend/database/snapshot_info.go index f7bd17870..efa88ea29 100644 --- a/server/backend/database/snapshot_info.go +++ b/server/backend/database/snapshot_info.go @@ -42,3 +42,19 @@ type SnapshotInfo struct { // CreatedAt is the time when the snapshot is created. CreatedAt time.Time `bson:"created_at"` } + +// DeepCopy returns a deep copy of the SnapshotInfo. +func (i *SnapshotInfo) DeepCopy() *SnapshotInfo { + if i == nil { + return nil + } + + return &SnapshotInfo{ + ID: i.ID, + DocID: i.DocID, + ServerSeq: i.ServerSeq, + Lamport: i.Lamport, + Snapshot: i.Snapshot, + CreatedAt: i.CreatedAt, + } +} diff --git a/server/backend/database/testcases/testcases.go b/server/backend/database/testcases/testcases.go index e1fcbca7a..3cc19121c 100644 --- a/server/backend/database/testcases/testcases.go +++ b/server/backend/database/testcases/testcases.go @@ -19,8 +19,10 @@ package testcases import ( + "bytes" "context" "fmt" + "sort" "strconv" "testing" gotime "time" @@ -227,25 +229,25 @@ func RunFindClosestSnapshotInfoTest(t *testing.T, db database.Database, projectI })) assert.NoError(t, db.CreateSnapshotInfo(ctx, docInfo.ID, doc.InternalDocument())) - snapshot, err := db.FindClosestSnapshotInfo(ctx, docInfo.ID, change.MaxCheckpoint.ServerSeq) + snapshot, err := db.FindClosestSnapshotInfo(ctx, docInfo.ID, change.MaxCheckpoint.ServerSeq, true) assert.NoError(t, err) assert.Equal(t, int64(0), snapshot.ServerSeq) pack := change.NewPack(doc.Key(), doc.Checkpoint().NextServerSeq(1), nil, nil) assert.NoError(t, doc.ApplyChangePack(pack)) assert.NoError(t, db.CreateSnapshotInfo(ctx, docInfo.ID, doc.InternalDocument())) - snapshot, err = db.FindClosestSnapshotInfo(ctx, docInfo.ID, change.MaxCheckpoint.ServerSeq) + snapshot, err = db.FindClosestSnapshotInfo(ctx, docInfo.ID, change.MaxCheckpoint.ServerSeq, true) assert.NoError(t, err) assert.Equal(t, int64(1), snapshot.ServerSeq) pack = change.NewPack(doc.Key(), doc.Checkpoint().NextServerSeq(2), nil, nil) assert.NoError(t, doc.ApplyChangePack(pack)) assert.NoError(t, db.CreateSnapshotInfo(ctx, docInfo.ID, doc.InternalDocument())) - snapshot, err = db.FindClosestSnapshotInfo(ctx, docInfo.ID, change.MaxCheckpoint.ServerSeq) + snapshot, err = db.FindClosestSnapshotInfo(ctx, docInfo.ID, change.MaxCheckpoint.ServerSeq, true) assert.NoError(t, err) assert.Equal(t, int64(2), snapshot.ServerSeq) - snapshot, err = db.FindClosestSnapshotInfo(ctx, docInfo.ID, 1) + snapshot, err = db.FindClosestSnapshotInfo(ctx, docInfo.ID, 1, true) assert.NoError(t, err) assert.Equal(t, int64(1), snapshot.ServerSeq) }) @@ -622,6 +624,52 @@ func RunFindDocInfosByPagingTest(t *testing.T, db database.Database, projectID t }) } +// RunFindDeactivateCandidates runs the FindDeactivateCandidates tests for the given db. +func RunFindDeactivateCandidates(t *testing.T, db database.Database) { + t.Run("housekeeping pagination test", func(t *testing.T) { + ctx := context.Background() + + // Lists all projects of the dummyOwnerID and otherOwnerID. + projects, err := db.ListProjectInfos(ctx, dummyOwnerID) + assert.NoError(t, err) + otherProjects, err := db.ListProjectInfos(ctx, otherOwnerID) + assert.NoError(t, err) + + projects = append(projects, otherProjects...) + + sort.Slice(projects, func(i, j int) bool { + iBytes, err := projects[i].ID.Bytes() + assert.NoError(t, err) + jBytes, err := projects[j].ID.Bytes() + assert.NoError(t, err) + return bytes.Compare(iBytes, jBytes) < 0 + }) + + fetchSize := 3 + lastProjectID := database.DefaultProjectID + + for i := 0; i < len(projects)/fetchSize; i++ { + lastProjectID, _, err = db.FindDeactivateCandidates( + ctx, + 0, + fetchSize, + lastProjectID, + ) + assert.NoError(t, err) + assert.Equal(t, projects[((i+1)*fetchSize)-1].ID, lastProjectID) + } + + lastProjectID, _, err = db.FindDeactivateCandidates( + ctx, + 0, + fetchSize, + lastProjectID, + ) + assert.NoError(t, err) + assert.Equal(t, database.DefaultProjectID, lastProjectID) + }) +} + // RunCreateChangeInfosTest runs the CreateChangeInfos tests for the given db. func RunCreateChangeInfosTest(t *testing.T, db database.Database, projectID types.ID) { t.Run("set RemovedAt in docInfo test", func(t *testing.T) { diff --git a/server/backend/housekeeping/config.go b/server/backend/housekeeping/config.go new file mode 100644 index 000000000..513bbd7a5 --- /dev/null +++ b/server/backend/housekeeping/config.go @@ -0,0 +1,61 @@ +/* + * Copyright 2023 The Yorkie Authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package housekeeping + +import ( + "fmt" + "time" +) + +// Config is the configuration for the housekeeping service. +type Config struct { + // Interval is the time between housekeeping runs. + Interval string `yaml:"Interval"` + + // CandidatesLimitPerProject is the maximum number of candidates to be returned per project. + CandidatesLimitPerProject int `yaml:"CandidatesLimitPerProject"` + + // ProjectFetchSize is the maximum number of projects to be returned to deactivate candidates. + ProjectFetchSize int `yaml:"HousekeepingProjectFetchSize"` +} + +// Validate validates the configuration. +func (c *Config) Validate() error { + if _, err := time.ParseDuration(c.Interval); err != nil { + return fmt.Errorf( + `invalid argument %s for "--housekeeping-interval" flag: %w`, + c.Interval, + err, + ) + } + + if c.CandidatesLimitPerProject <= 0 { + return fmt.Errorf( + `invalid argument %d for "--housekeeping-candidates-limit-per-project" flag`, + c.ProjectFetchSize, + ) + } + + if c.ProjectFetchSize <= 0 { + return fmt.Errorf( + `invalid argument %d for "--housekeeping-project-fetc-size" flag`, + c.ProjectFetchSize, + ) + } + + return nil +} diff --git a/server/backend/housekeeping/config_test.go b/server/backend/housekeeping/config_test.go new file mode 100644 index 000000000..876904135 --- /dev/null +++ b/server/backend/housekeeping/config_test.go @@ -0,0 +1,48 @@ +/* + * Copyright 2023 The Yorkie Authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package housekeeping_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/yorkie-team/yorkie/server/backend/housekeeping" +) + +func TestConfig(t *testing.T) { + t.Run("validate test", func(t *testing.T) { + validConf := housekeeping.Config{ + Interval: "1m", + CandidatesLimitPerProject: 100, + ProjectFetchSize: 100, + } + assert.NoError(t, validConf.Validate()) + + conf1 := validConf + conf1.Interval = "hour" + assert.Error(t, conf1.Validate()) + + conf2 := validConf + conf2.CandidatesLimitPerProject = 0 + assert.Error(t, conf2.Validate()) + + conf3 := validConf + conf3.ProjectFetchSize = -1 + assert.Error(t, conf3.Validate()) + }) +} diff --git a/server/backend/housekeeping/housekeeping.go b/server/backend/housekeeping/housekeeping.go index d08abcbdc..2672ab182 100644 --- a/server/backend/housekeeping/housekeeping.go +++ b/server/backend/housekeeping/housekeeping.go @@ -24,6 +24,7 @@ import ( "fmt" "time" + "github.com/yorkie-team/yorkie/api/types" "github.com/yorkie-team/yorkie/server/backend/database" "github.com/yorkie-team/yorkie/server/backend/sync" "github.com/yorkie-team/yorkie/server/clients" @@ -34,28 +35,6 @@ const ( deactivateCandidatesKey = "housekeeping/deactivateCandidates" ) -// Config is the configuration for the housekeeping service. -type Config struct { - // Interval is the time between housekeeping runs. - Interval string `yaml:"Interval"` - - // CandidatesLimitPerProject is the maximum number of candidates to be returned per project. - CandidatesLimitPerProject int `yaml:"CandidatesLimitPerProject"` -} - -// Validate validates the configuration. -func (c *Config) Validate() error { - if _, err := time.ParseDuration(c.Interval); err != nil { - return fmt.Errorf( - `invalid argument %s for "--housekeeping-interval" flag: %w`, - c.Interval, - err, - ) - } - - return nil -} - // Housekeeping is the housekeeping service. It periodically runs housekeeping // tasks. It is responsible for deactivating clients that have not been active // for a long time. @@ -65,6 +44,7 @@ type Housekeeping struct { interval time.Duration candidatesLimitPerProject int + projectFetchSize int ctx context.Context cancelFunc context.CancelFunc @@ -106,6 +86,7 @@ func New( interval: interval, candidatesLimitPerProject: conf.CandidatesLimitPerProject, + projectFetchSize: conf.ProjectFetchSize, ctx: ctx, cancelFunc: cancelFunc, @@ -127,11 +108,16 @@ func (h *Housekeeping) Stop() error { // run is the housekeeping loop. func (h *Housekeeping) run() { + housekeepingLastProjectID := database.DefaultProjectID + for { ctx := context.Background() - if err := h.deactivateCandidates(ctx); err != nil { + lastProjectID, err := h.deactivateCandidates(ctx, housekeepingLastProjectID) + if err != nil { + logging.From(ctx).Error(err) continue } + housekeepingLastProjectID = lastProjectID select { case <-time.After(h.interval): @@ -142,15 +128,18 @@ func (h *Housekeeping) run() { } // deactivateCandidates deactivates candidates. -func (h *Housekeeping) deactivateCandidates(ctx context.Context) error { +func (h *Housekeeping) deactivateCandidates( + ctx context.Context, + housekeepingLastProjectID types.ID, +) (types.ID, error) { start := time.Now() locker, err := h.coordinator.NewLocker(ctx, deactivateCandidatesKey) if err != nil { - return err + return database.DefaultProjectID, err } if err := locker.Lock(ctx); err != nil { - return err + return database.DefaultProjectID, err } defer func() { @@ -159,12 +148,14 @@ func (h *Housekeeping) deactivateCandidates(ctx context.Context) error { } }() - candidates, err := h.database.FindDeactivateCandidates( + lastProjectID, candidates, err := h.database.FindDeactivateCandidates( ctx, h.candidatesLimitPerProject, + h.projectFetchSize, + housekeepingLastProjectID, ) if err != nil { - return err + return database.DefaultProjectID, err } deactivatedCount := 0 @@ -175,7 +166,7 @@ func (h *Housekeeping) deactivateCandidates(ctx context.Context) error { clientInfo.ProjectID, clientInfo.ID, ); err != nil { - return err + return database.DefaultProjectID, err } deactivatedCount++ @@ -190,5 +181,5 @@ func (h *Housekeeping) deactivateCandidates(ctx context.Context) error { ) } - return nil + return lastProjectID, nil } diff --git a/server/backend/sync/memory/pubsub_test.go b/server/backend/sync/memory/pubsub_test.go index 138ec680c..f1152bfcc 100644 --- a/server/backend/sync/memory/pubsub_test.go +++ b/server/backend/sync/memory/pubsub_test.go @@ -39,7 +39,7 @@ func TestPubSub(t *testing.T) { pubSub := memory.NewPubSub() id := types.ID(t.Name() + "id") docEvent := sync.DocEvent{ - Type: types.DocumentsWatchedEvent, + Type: types.DocumentWatchedEvent, Publisher: idB, DocumentID: id, } diff --git a/server/config.go b/server/config.go index c875102af..63d51af41 100644 --- a/server/config.go +++ b/server/config.go @@ -42,6 +42,7 @@ const ( DefaultHousekeepingInterval = 30 * time.Second DefaultHousekeepingCandidatesLimitPerProject = 500 + DefaultHousekeepingProjectFetchSize = 100 DefaultMongoConnectionURI = "mongodb://localhost:27017" DefaultMongoConnectionTimeout = 5 * time.Second @@ -63,6 +64,8 @@ const ( DefaultAuthWebhookCacheSize = 5000 DefaultAuthWebhookCacheAuthTTL = 10 * time.Second DefaultAuthWebhookCacheUnauthTTL = 10 * time.Second + DefaultProjectInfoCacheSize = 256 + DefaultProjectInfoCacheTTL = 10 * time.Minute DefaultHostname = "" ) @@ -201,6 +204,14 @@ func (c *Config) ensureDefaultValue() { c.Backend.AuthWebhookCacheUnauthTTL = DefaultAuthWebhookCacheUnauthTTL.String() } + if c.Backend.ProjectInfoCacheSize == 0 { + c.Backend.ProjectInfoCacheSize = DefaultProjectInfoCacheSize + } + + if c.Backend.ProjectInfoCacheTTL == "" { + c.Backend.ProjectInfoCacheTTL = DefaultProjectInfoCacheTTL.String() + } + if c.Mongo != nil { if c.Mongo.ConnectionURI == "" { c.Mongo.ConnectionURI = DefaultMongoConnectionURI @@ -231,6 +242,7 @@ func newConfig(port int, profilingPort int) *Config { Housekeeping: &housekeeping.Config{ Interval: DefaultHousekeepingInterval.String(), CandidatesLimitPerProject: DefaultHousekeepingCandidatesLimitPerProject, + ProjectFetchSize: DefaultHousekeepingProjectFetchSize, }, Backend: &backend.Config{ ClientDeactivateThreshold: DefaultClientDeactivateThreshold, diff --git a/server/config.sample.yml b/server/config.sample.yml index 4c48b7128..e682b3fc3 100644 --- a/server/config.sample.yml +++ b/server/config.sample.yml @@ -36,6 +36,9 @@ Housekeeping: # CandidatesLimitPerProject is the maximum number of candidates to be returned per project (default: 100). CandidatesLimitPerProject: 100 + # ProjectFetchSize is the maximum number of projects to be returned to deactivate candidates. (default: 100). + ProjectFetchSize: 100 + # Backend is the configuration for the backend of Yorkie. Backend: # UseDefaultProject is whether to use the default project (default: true). @@ -74,6 +77,12 @@ Backend: # AuthWebhookCacheUnauthTTL is the TTL value to set when caching the unauthorized result. AuthWebhookCacheUnauthTTL: "10s" + # ProjectInfoCacheSize is the size of the project info cache. + ProjectInfoCacheSize: 256 + + # ProjectInfoCacheTTL is the TTL value to set when caching the project info. + ProjectInfoCacheTTL: "10m" + # Hostname is the hostname of the server. If not provided, the hostname will be # determined automatically by the OS (Optional, default: os.Hostname()). Hostname: "" diff --git a/server/config_test.go b/server/config_test.go index eb0cb9661..43d2103a7 100644 --- a/server/config_test.go +++ b/server/config_test.go @@ -77,5 +77,9 @@ func TestNewConfigFromFile(t *testing.T) { authWebhookCacheUnauthTTL, err := time.ParseDuration(conf.Backend.AuthWebhookCacheUnauthTTL) assert.NoError(t, err) assert.Equal(t, authWebhookCacheUnauthTTL, server.DefaultAuthWebhookCacheUnauthTTL) + + projectInfoCacheTTL, err := time.ParseDuration(conf.Backend.ProjectInfoCacheTTL) + assert.NoError(t, err) + assert.Equal(t, projectInfoCacheTTL, server.DefaultProjectInfoCacheTTL) }) } diff --git a/server/packs/history.go b/server/packs/history.go index 2295d08ea..84b0a402e 100644 --- a/server/packs/history.go +++ b/server/packs/history.go @@ -41,6 +41,7 @@ func FindChanges( snapshotInfo, err := be.DB.FindClosestSnapshotInfo( ctx, docInfo.ID, minSyncedSeqInfo.ServerSeq+be.Config.SnapshotInterval, + false, ) if err != nil { return nil, err diff --git a/server/packs/packs.go b/server/packs/packs.go index 159abf1e5..180644891 100644 --- a/server/packs/packs.go +++ b/server/packs/packs.go @@ -132,7 +132,7 @@ func PushPull( ctx, publisherID, sync.DocEvent{ - Type: types.DocumentsChangedEvent, + Type: types.DocumentChangedEvent, Publisher: publisherID, DocumentID: docInfo.ID, }, @@ -181,7 +181,7 @@ func BuildDocumentForServerSeq( docInfo *database.DocInfo, serverSeq int64, ) (*document.InternalDocument, error) { - snapshotInfo, err := be.DB.FindClosestSnapshotInfo(ctx, docInfo.ID, serverSeq) + snapshotInfo, err := be.DB.FindClosestSnapshotInfo(ctx, docInfo.ID, serverSeq, true) if err != nil { return nil, err } diff --git a/server/packs/pushpull.go b/server/packs/pushpull.go index 2eb0b95ae..e3c5151e2 100644 --- a/server/packs/pushpull.go +++ b/server/packs/pushpull.go @@ -157,7 +157,7 @@ func pullSnapshot( } cpAfterPull := cpAfterPush.NextServerSeq(docInfo.ServerSeq) - snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.Presences()) + snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.AllPresences()) if err != nil { return nil, err } diff --git a/server/packs/snapshots.go b/server/packs/snapshots.go index bcdde64e1..ac56be161 100644 --- a/server/packs/snapshots.go +++ b/server/packs/snapshots.go @@ -33,16 +33,15 @@ func storeSnapshot( docInfo *database.DocInfo, minSyncedTicket *time.Ticket, ) error { - // 01. get the closest snapshot of this docInfo - // TODO: For performance issue, we only need to read the snapshot's metadata. - snapshotInfo, err := be.DB.FindClosestSnapshotInfo(ctx, docInfo.ID, docInfo.ServerSeq) + // 01. get the closest snapshot's metadata of this docInfo + snapshotMetadata, err := be.DB.FindClosestSnapshotInfo(ctx, docInfo.ID, docInfo.ServerSeq, false) if err != nil { return err } - if snapshotInfo.ServerSeq == docInfo.ServerSeq { + if snapshotMetadata.ServerSeq == docInfo.ServerSeq { return nil } - if docInfo.ServerSeq-snapshotInfo.ServerSeq < be.Config.SnapshotInterval { + if docInfo.ServerSeq-snapshotMetadata.ServerSeq < be.Config.SnapshotInterval { return nil } @@ -50,7 +49,7 @@ func storeSnapshot( changes, err := be.DB.FindChangesBetweenServerSeqs( ctx, docInfo.ID, - snapshotInfo.ServerSeq+1, + snapshotMetadata.ServerSeq+1, docInfo.ServerSeq, ) if err != nil { @@ -58,6 +57,14 @@ func storeSnapshot( } // 03. create document instance of the docInfo + snapshotInfo := snapshotMetadata + if snapshotMetadata.ID != "" { + snapshotInfo, err = be.DB.FindSnapshotInfoByID(ctx, snapshotInfo.ID) + if err != nil { + return err + } + } + doc, err := document.NewInternalDocumentFromSnapshot( docInfo.Key, snapshotInfo.ServerSeq, diff --git a/server/rpc/admin_server.go b/server/rpc/admin_server.go index 3487b1ee9..2e754ff25 100644 --- a/server/rpc/admin_server.go +++ b/server/rpc/admin_server.go @@ -254,7 +254,7 @@ func (s *adminServer) GetSnapshotMeta( return nil, err } - snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.Presences()) + snapshot, err := converter.SnapshotToBytes(doc.RootObject(), doc.AllPresences()) if err != nil { return nil, err } @@ -375,7 +375,7 @@ func (s *adminServer) RemoveDocumentByAdmin( ctx, publisherID, sync.DocEvent{ - Type: types.DocumentsChangedEvent, + Type: types.DocumentChangedEvent, Publisher: publisherID, DocumentID: docInfo.ID, }, diff --git a/server/rpc/interceptors/context.go b/server/rpc/interceptors/context.go index 62cb393c0..8f75d9fdf 100644 --- a/server/rpc/interceptors/context.go +++ b/server/rpc/interceptors/context.go @@ -28,7 +28,9 @@ import ( grpcstatus "google.golang.org/grpc/status" "github.com/yorkie-team/yorkie/api/types" + "github.com/yorkie-team/yorkie/pkg/cache" "github.com/yorkie-team/yorkie/server/backend" + "github.com/yorkie-team/yorkie/server/logging" "github.com/yorkie-team/yorkie/server/projects" "github.com/yorkie-team/yorkie/server/rpc/grpchelper" "github.com/yorkie-team/yorkie/server/rpc/metadata" @@ -36,13 +38,19 @@ import ( // ContextInterceptor is an interceptor for building additional context. type ContextInterceptor struct { - backend *backend.Backend + backend *backend.Backend + projectInfoCache *cache.LRUExpireCache[string, *types.Project] } // NewContextInterceptor creates a new instance of ContextInterceptor. func NewContextInterceptor(be *backend.Backend) *ContextInterceptor { + projectInfoCache, err := cache.NewLRUExpireCache[string, *types.Project](be.Config.ProjectInfoCacheSize) + if err != nil { + logging.DefaultLogger().Fatal("Failed to create project info cache: %v", err) + } return &ContextInterceptor{ - backend: be, + backend: be, + projectInfoCache: projectInfoCache, } } @@ -146,15 +154,19 @@ func (i *ContextInterceptor) buildContext(ctx context.Context) (context.Context, md.Authorization = authorization[0] } ctx = metadata.With(ctx, md) + cacheKey := md.APIKey // 02. building project - // TODO(hackerwins): Improve the performance of this function. - // Consider using a cache to store the info. - project, err := projects.GetProjectFromAPIKey(ctx, i.backend, md.APIKey) - if err != nil { - return nil, grpchelper.ToStatusError(err) + if cachedProjectInfo, ok := i.projectInfoCache.Get(cacheKey); ok { + ctx = projects.With(ctx, cachedProjectInfo) + } else { + project, err := projects.GetProjectFromAPIKey(ctx, i.backend, md.APIKey) + if err != nil { + return nil, grpchelper.ToStatusError(err) + } + i.projectInfoCache.Add(cacheKey, project, i.backend.Config.ParseProjectInfoCacheTTL()) + ctx = projects.With(ctx, project) } - ctx = projects.With(ctx, project) return ctx, nil } diff --git a/server/rpc/server_test.go b/server/rpc/server_test.go index e883d0e91..faad247e8 100644 --- a/server/rpc/server_test.go +++ b/server/rpc/server_test.go @@ -48,9 +48,9 @@ var ( defaultProjectName = "default" invalidSlugName = "@#$%^&*()_+" - nilClientID, _ = hex.DecodeString("000000000000000000000000") - emptyClientID, _ = hex.DecodeString("") - invalidClientID, _ = hex.DecodeString("invalid") + nilClientID = "000000000000000000000000" + emptyClientID = "" + invalidClientID = "invalid" testRPCServer *rpc.Server testRPCAddr = fmt.Sprintf("localhost:%d", helper.RPCPort) @@ -76,6 +76,8 @@ func TestMain(m *testing.M) { ClientDeactivateThreshold: helper.ClientDeactivateThreshold, SnapshotThreshold: helper.SnapshotThreshold, AuthWebhookCacheSize: helper.AuthWebhookSize, + ProjectInfoCacheSize: helper.ProjectInfoCacheSize, + ProjectInfoCacheTTL: helper.ProjectInfoCacheTTL.String(), AdminTokenDuration: helper.AdminTokenDuration, }, &mongo.Config{ ConnectionURI: helper.MongoConnectionURI, @@ -85,6 +87,7 @@ func TestMain(m *testing.M) { }, &housekeeping.Config{ Interval: helper.HousekeepingInterval.String(), CandidatesLimitPerProject: helper.HousekeepingCandidatesLimitPerProject, + ProjectFetchSize: helper.HousekeepingProjectFetchSize, }, met) if err != nil { log.Fatal(err) @@ -384,6 +387,7 @@ func TestSDKRPCServerBackend(t *testing.T) { ) assert.NoError(t, err) + actorID, _ := hex.DecodeString(activateResp.ClientId) resPack, err := testClient.AttachDocument( context.Background(), &api.AttachDocumentRequest{ @@ -395,7 +399,7 @@ func TestSDKRPCServerBackend(t *testing.T) { Id: &api.ChangeID{ ClientSeq: 1, Lamport: 1, - ActorId: activateResp.ClientId, + ActorId: actorID, }, }}, }, @@ -415,7 +419,7 @@ func TestSDKRPCServerBackend(t *testing.T) { Id: &api.ChangeID{ ClientSeq: 2, Lamport: 2, - ActorId: activateResp.ClientId, + ActorId: actorID, }, }}, }, @@ -435,7 +439,7 @@ func TestSDKRPCServerBackend(t *testing.T) { Id: &api.ChangeID{ ClientSeq: 3, Lamport: 3, - ActorId: activateResp.ClientId, + ActorId: actorID, }, }}, }, diff --git a/server/rpc/yorkie_server.go b/server/rpc/yorkie_server.go index 5bf8f918d..c0c2aef09 100644 --- a/server/rpc/yorkie_server.go +++ b/server/rpc/yorkie_server.go @@ -69,14 +69,8 @@ func (s *yorkieServer) ActivateClient( return nil, err } - pbClientID, err := cli.ID.Bytes() - if err != nil { - return nil, err - } - return &api.ActivateClientResponse{ - ClientKey: cli.Key, - ClientId: pbClientID, + ClientId: cli.ID.String(), }, nil } @@ -85,7 +79,7 @@ func (s *yorkieServer) DeactivateClient( ctx context.Context, req *api.DeactivateClientRequest, ) (*api.DeactivateClientResponse, error) { - actorID, err := time.ActorIDFromBytes(req.ClientId) + actorID, err := time.ActorIDFromHex(req.ClientId) if err != nil { return nil, err } @@ -97,19 +91,12 @@ func (s *yorkieServer) DeactivateClient( } project := projects.From(ctx) - cli, err := clients.Deactivate(ctx, s.backend.DB, project.ID, types.IDFromActorID(actorID)) + _, err = clients.Deactivate(ctx, s.backend.DB, project.ID, types.IDFromActorID(actorID)) if err != nil { return nil, err } - pbClientID, err := cli.ID.Bytes() - if err != nil { - return nil, err - } - - return &api.DeactivateClientResponse{ - ClientId: pbClientID, - }, nil + return &api.DeactivateClientResponse{}, nil } // AttachDocument attaches the given document to the client. @@ -117,7 +104,7 @@ func (s *yorkieServer) AttachDocument( ctx context.Context, req *api.AttachDocumentRequest, ) (*api.AttachDocumentResponse, error) { - actorID, err := time.ActorIDFromBytes(req.ClientId) + actorID, err := time.ActorIDFromHex(req.ClientId) if err != nil { return nil, err } @@ -186,7 +173,7 @@ func (s *yorkieServer) DetachDocument( ctx context.Context, req *api.DetachDocumentRequest, ) (*api.DetachDocumentResponse, error) { - actorID, err := time.ActorIDFromBytes(req.ClientId) + actorID, err := time.ActorIDFromHex(req.ClientId) if err != nil { return nil, err } @@ -268,7 +255,7 @@ func (s *yorkieServer) PushPullChanges( ctx context.Context, req *api.PushPullChangesRequest, ) (*api.PushPullChangesResponse, error) { - actorID, err := time.ActorIDFromBytes(req.ClientId) + actorID, err := time.ActorIDFromHex(req.ClientId) if err != nil { return nil, err } @@ -348,7 +335,7 @@ func (s *yorkieServer) WatchDocument( req *api.WatchDocumentRequest, stream api.YorkieService_WatchDocumentServer, ) error { - clientID, err := time.ActorIDFromBytes(req.ClientId) + clientID, err := time.ActorIDFromHex(req.ClientId) if err != nil { return err } @@ -404,9 +391,9 @@ func (s *yorkieServer) WatchDocument( s.unwatchDoc(subscription, docID) }() - var pbClientIDs [][]byte + var pbClientIDs []string for _, id := range clientIDs { - pbClientIDs = append(pbClientIDs, id.Bytes()) + pbClientIDs = append(pbClientIDs, id.String()) } if err := stream.Send(&api.WatchDocumentResponse{ Body: &api.WatchDocumentResponse_Initialization_{ @@ -434,7 +421,7 @@ func (s *yorkieServer) WatchDocument( Body: &api.WatchDocumentResponse_Event{ Event: &api.DocEvent{ Type: eventType, - Publisher: event.Publisher.Bytes(), + Publisher: event.Publisher.String(), }, }, }); err != nil { @@ -449,7 +436,7 @@ func (s *yorkieServer) RemoveDocument( ctx context.Context, req *api.RemoveDocumentRequest, ) (*api.RemoveDocumentResponse, error) { - actorID, err := time.ActorIDFromBytes(req.ClientId) + actorID, err := time.ActorIDFromHex(req.ClientId) if err != nil { return nil, err } @@ -530,7 +517,7 @@ func (s *yorkieServer) watchDoc( ctx, subscription.Subscriber(), sync.DocEvent{ - Type: types.DocumentsWatchedEvent, + Type: types.DocumentWatchedEvent, Publisher: subscription.Subscriber(), DocumentID: documentID, }, @@ -549,7 +536,7 @@ func (s *yorkieServer) unwatchDoc( ctx, subscription.Subscriber(), sync.DocEvent{ - Type: types.DocumentsUnwatchedEvent, + Type: types.DocumentUnwatchedEvent, Publisher: subscription.Subscriber(), DocumentID: documentID, }, diff --git a/server/users/users.go b/server/users/users.go index cf158a0ae..e24d383f0 100644 --- a/server/users/users.go +++ b/server/users/users.go @@ -38,7 +38,7 @@ func SignUp( return nil, fmt.Errorf("cannot hash password: %w", err) } - info, err := be.DB.CreateUserInfo(ctx, username, string(hashed)) + info, err := be.DB.CreateUserInfo(ctx, username, hashed) if err != nil { return nil, err } diff --git a/test/bench/grpc_bench_test.go b/test/bench/grpc_bench_test.go index 05650a643..254bed7f1 100644 --- a/test/bench/grpc_bench_test.go +++ b/test/bench/grpc_bench_test.go @@ -33,7 +33,6 @@ import ( "github.com/yorkie-team/yorkie/client" "github.com/yorkie-team/yorkie/pkg/document" "github.com/yorkie-team/yorkie/pkg/document/json" - "github.com/yorkie-team/yorkie/pkg/document/key" "github.com/yorkie-team/yorkie/pkg/document/presence" "github.com/yorkie-team/yorkie/server" "github.com/yorkie-team/yorkie/server/backend/database" @@ -105,7 +104,7 @@ func benchmarkUpdateProject(ctx context.Context, b *testing.B, cnt int, adminCli for i := 0; i < cnt; i++ { name := fmt.Sprintf("name%d", i) authWebhookURL := fmt.Sprintf("http://authWebhookURL%d", i) - authWebhookMethods := []string{} + var authWebhookMethods []string for _, m := range types.AuthMethods() { authWebhookMethods = append(authWebhookMethods, string(m)) } @@ -203,7 +202,7 @@ func BenchmarkRPC(b *testing.B) { ctx := context.Background() - d1 := document.New(key.Key(helper.TestDocKey(b))) + d1 := document.New(helper.TestDocKey(b)) err := c1.Attach(ctx, d1) assert.NoError(b, err) testKey1 := "testKey1" @@ -213,7 +212,7 @@ func BenchmarkRPC(b *testing.B) { }) assert.NoError(b, err) - d2 := document.New(key.Key(helper.TestDocKey(b))) + d2 := document.New(helper.TestDocKey(b)) err = c2.Attach(ctx, d2) assert.NoError(b, err) testKey2 := "testKey2" @@ -268,8 +267,8 @@ func BenchmarkRPC(b *testing.B) { defer cleanupClients(b, clients) ctx := context.Background() - doc1 := document.New(key.Key(helper.TestDocKey(b))) - doc2 := document.New(key.Key(helper.TestDocKey(b))) + doc1 := document.New(helper.TestDocKey(b)) + doc2 := document.New(helper.TestDocKey(b)) err := doc1.Update(func(root *json.Object, p *presence.Presence) error { text := root.SetNewText("k1") diff --git a/test/helper/helper.go b/test/helper/helper.go index ba6c2da1f..745b5bb06 100644 --- a/test/helper/helper.go +++ b/test/helper/helper.go @@ -60,6 +60,7 @@ var ( AdminPassword = server.DefaultAdminPassword HousekeepingInterval = 10 * gotime.Second HousekeepingCandidatesLimitPerProject = 10 + HousekeepingProjectFetchSize = 10 AdminTokenDuration = "10s" ClientDeactivateThreshold = "10s" @@ -69,6 +70,8 @@ var ( AuthWebhookSize = 100 AuthWebhookCacheAuthTTL = 10 * gotime.Second AuthWebhookCacheUnauthTTL = 10 * gotime.Second + ProjectInfoCacheSize = 256 + ProjectInfoCacheTTL = 5 * gotime.Second MongoConnectionURI = "mongodb://localhost:27017" MongoConnectionTimeout = "5s" @@ -111,9 +114,9 @@ func TextChangeContext(root *crdt.Root) *change.Context { ) } -// IssuePos is a helper function that issues a new CRDTTreePos. -func IssuePos(change *change.Context, offset ...int) *crdt.TreePos { - pos := &crdt.TreePos{ +// IssuePos is a helper function that issues a new CRDTTreeNodeID. +func IssuePos(change *change.Context, offset ...int) *crdt.TreeNodeID { + pos := &crdt.TreeNodeID{ CreatedAt: change.IssueTimeTicket(), Offset: 0, } @@ -130,35 +133,20 @@ func IssueTime(change *change.Context) *time.Ticket { return change.IssueTimeTicket() } -// ListEqual is a helper function that checks the nodes in the RGA in Tree. -func ListEqual(t assert.TestingT, tree *crdt.Tree, expected []string) bool { - var nodes []*crdt.TreeNode - for _, node := range tree.Nodes() { - nodes = append(nodes, node) - } - - var actual []string - for _, node := range nodes { - actual = append(actual, ToDiagnostic(node)) - } - - assert.Equal(t, expected, actual) - - return true -} - // NodesBetweenEqual is a helper function that checks the nodes between the given // indexes. func NodesBetweenEqual(t assert.TestingT, tree *index.Tree[*crdt.TreeNode], from, to int, expected []string) bool { var nodes []*crdt.TreeNode - err := tree.NodesBetween(from, to, func(node *crdt.TreeNode) { + var contains []index.TagContained + err := tree.NodesBetween(from, to, func(node *crdt.TreeNode, contain index.TagContained) { nodes = append(nodes, node) + contains = append(contains, contain) }) assert.NoError(t, err) var actual []string - for _, node := range nodes { - actual = append(actual, ToDiagnostic(node)) + for i := 0; i < len(nodes); i++ { + actual = append(actual, fmt.Sprintf("%s:%s", ToDiagnostic(nodes[i]), contains[i].ToString())) } assert.Equal(t, expected, actual) @@ -223,6 +211,7 @@ func TestConfig() *server.Config { Housekeeping: &housekeeping.Config{ Interval: HousekeepingInterval.String(), CandidatesLimitPerProject: HousekeepingCandidatesLimitPerProject, + ProjectFetchSize: HousekeepingProjectFetchSize, }, Backend: &backend.Config{ AdminUser: server.DefaultAdminUser, @@ -238,6 +227,8 @@ func TestConfig() *server.Config { AuthWebhookCacheSize: AuthWebhookSize, AuthWebhookCacheAuthTTL: AuthWebhookCacheAuthTTL.String(), AuthWebhookCacheUnauthTTL: AuthWebhookCacheUnauthTTL.String(), + ProjectInfoCacheSize: ProjectInfoCacheSize, + ProjectInfoCacheTTL: ProjectInfoCacheTTL.String(), }, Mongo: &mongo.Config{ ConnectionURI: MongoConnectionURI, @@ -264,8 +255,8 @@ func TestDocKey(t testing.TB) key.Key { return key.Key(name) } - if len(name) > 60 { - name = name[:60] + if len(name) > 100 { + name = name[:100] } sb := strings.Builder{} diff --git a/test/integration/auth_webhook_test.go b/test/integration/auth_webhook_test.go index f5bb1aa5b..73b05ec38 100644 --- a/test/integration/auth_webhook_test.go +++ b/test/integration/auth_webhook_test.go @@ -161,6 +161,8 @@ func TestProjectAuthWebhook(t *testing.T) { ) assert.NoError(t, err) + projectInfoCacheTTL := 5 * time.Second + time.Sleep(projectInfoCacheTTL) cli, err := client.Dial( svr.RPCAddr(), client.WithAPIKey(project.PublicKey), diff --git a/test/integration/document_test.go b/test/integration/document_test.go index 2a5981c3d..c190fe9ae 100644 --- a/test/integration/document_test.go +++ b/test/integration/document_test.go @@ -30,7 +30,6 @@ import ( "github.com/yorkie-team/yorkie/pkg/document" "github.com/yorkie-team/yorkie/pkg/document/innerpresence" "github.com/yorkie-team/yorkie/pkg/document/json" - "github.com/yorkie-team/yorkie/pkg/document/key" "github.com/yorkie-team/yorkie/pkg/document/presence" "github.com/yorkie-team/yorkie/test/helper" ) @@ -68,7 +67,7 @@ func TestDocument(t *testing.T) { assert.True(t, doc2.IsAttached()) assert.Equal(t, `{"k1":"v2"}`, doc2.Marshal()) - doc3 := document.New(key.Key("invalid$key")) + doc3 := document.New("invalid$key") err = c1.Attach(ctx, doc3) assert.Error(t, err) }) diff --git a/test/integration/presence_test.go b/test/integration/presence_test.go index 1ff65809c..e2b37aa69 100644 --- a/test/integration/presence_test.go +++ b/test/integration/presence_test.go @@ -58,14 +58,14 @@ func TestPresence(t *testing.T) { p.Set("updated", "true") return nil })) - encoded, err := gojson.Marshal(d1.Presences()) + encoded, err := gojson.Marshal(d1.AllPresences()) assert.NoError(t, err) assert.Equal(t, fmt.Sprintf(`{"%s":{"updated":"true"}}`, c1.ID()), string(encoded)) // 03 Sync documents and check that the presence is updated on the other client assert.NoError(t, c1.Sync(ctx)) assert.NoError(t, c2.Sync(ctx)) - encoded, err = gojson.Marshal(d2.Presences()) + encoded, err = gojson.Marshal(d2.AllPresences()) assert.NoError(t, err) assert.Equal(t, fmt.Sprintf(`{"%s":{"updated":"true"},"%s":{}}`, c1.ID(), c2.ID()), string(encoded)) }) @@ -88,14 +88,14 @@ func TestPresence(t *testing.T) { return nil })) } - encoded, err := gojson.Marshal(d1.Presences()) + encoded, err := gojson.Marshal(d1.AllPresences()) assert.NoError(t, err) assert.Equal(t, fmt.Sprintf(`{"%s":{"updated":"9"}}`, c1.ID()), string(encoded)) // 03 Sync documents and check that the presence is updated on the other client assert.NoError(t, c1.Sync(ctx)) assert.NoError(t, c2.Sync(ctx)) - encoded, err = gojson.Marshal(d2.Presences()) + encoded, err = gojson.Marshal(d2.AllPresences()) assert.NoError(t, err) assert.Equal(t, fmt.Sprintf(`{"%s":{"updated":"9"},"%s":{}}`, c1.ID(), c2.ID()), string(encoded)) }) @@ -112,15 +112,15 @@ func TestPresence(t *testing.T) { // 02. Check that the presence is updated on the other client. assert.NoError(t, c1.Sync(ctx)) assert.Equal(t, innerpresence.Presence{"key": c1.Key()}, d1.MyPresence()) - assert.Equal(t, innerpresence.Presence{"key": c2.Key()}, d1.Presence(c2.ID().String())) + assert.Equal(t, innerpresence.Presence{"key": c2.Key()}, d1.PresenceForTest(c2.ID().String())) assert.Equal(t, innerpresence.Presence{"key": c2.Key()}, d2.MyPresence()) - assert.Equal(t, innerpresence.Presence{"key": c1.Key()}, d2.Presence(c1.ID().String())) + assert.Equal(t, innerpresence.Presence{"key": c1.Key()}, d2.PresenceForTest(c1.ID().String())) // 03. The first client detaches the document and check that the presence is updated on the other client. assert.NoError(t, c1.Detach(ctx, d1)) assert.NoError(t, c2.Sync(ctx)) assert.Equal(t, innerpresence.Presence{"key": c2.Key()}, d2.MyPresence()) - assert.Nil(t, d2.Presence(c1.ID().String())) + assert.Nil(t, d2.PresenceForTest(c1.ID().String())) }) t.Run("presence-related events test", func(t *testing.T) { @@ -249,7 +249,7 @@ func TestPresence(t *testing.T) { }) } - if len(responsePairs) == 4 { + if len(responsePairs) == 3 { return } } @@ -285,23 +285,16 @@ func TestPresence(t *testing.T) { // 05. Unwatch the second client's document. expected = append(expected, watchResponsePair{ - Type: client.PresenceChanged, + Type: client.DocumentUnwatched, Presences: map[string]innerpresence.Presence{ - c2.ID().String(): nil, + c2.ID().String(): d2.MyPresence(), }, }) assert.NoError(t, c2.Detach(ctx, d2)) assert.NoError(t, c1.Sync(ctx, client.WithDocKey(helper.TestDocKey(t)))) + wgEvents.Wait() - expected = append(expected, watchResponsePair{ - Type: client.DocumentUnwatched, - Presences: map[string]innerpresence.Presence{ - c2.ID().String(): nil, - }, - }) cancel2() - - wgEvents.Wait() assert.Equal(t, expected, responsePairs) }) diff --git a/test/integration/tree_test.go b/test/integration/tree_test.go index 2d6d36edb..ea62d13e9 100644 --- a/test/integration/tree_test.go +++ b/test/integration/tree_test.go @@ -27,6 +27,7 @@ import ( "github.com/yorkie-team/yorkie/pkg/document" "github.com/yorkie-team/yorkie/pkg/document/json" "github.com/yorkie-team/yorkie/pkg/document/presence" + "github.com/yorkie-team/yorkie/pkg/index" "github.com/yorkie-team/yorkie/test/helper" ) @@ -112,7 +113,7 @@ func TestTree(t *testing.T) { }) assert.Equal(t, "

ab

cdefgh
", root.GetTree("t").ToXML()) assert.Equal(t, 18, root.GetTree("t").Len()) - // TODO(krapie): add listEqual test later + return nil }) assert.NoError(t, err) @@ -173,6 +174,30 @@ func TestTree(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "

ab

", doc.Root().GetTree("t").ToXML()) + err = doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "doc", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "ab"}}, + }}, + }) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(2, 2, &json.TreeNode{ + Type: "text", + Value: "X", + }) + assert.Equal(t, "

aXb

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(1, 4, nil) + assert.Equal(t, "

", root.GetTree("t").ToXML()) + + return nil + }) + assert.NoError(t, err) + assert.Equal(t, "

", doc.Root().GetTree("t").ToXML()) + err = doc.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ Type: "doc", @@ -200,7 +225,7 @@ func TestTree(t *testing.T) { assert.NoError(t, err) }) - t.Run("edit its content with path", func(t *testing.T) { + t.Run("edit content with path test", func(t *testing.T) { doc := document.New(helper.TestDocKey(t)) err := doc.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ @@ -231,16 +256,48 @@ func TestTree(t *testing.T) { assert.Equal(t, "

aXb!

", root.GetTree("t").ToXML()) root.GetTree("t").EditByPath([]int{0, 0, 1}, []int{0, 0, 1}, &json.TreeNode{ - Type: "tn", - Children: []json.TreeNode{}, + Type: "tn", + Children: []json.TreeNode{{ + Type: "text", Value: "cd", + }}, + }) + assert.Equal(t, "

aXb!cd

", root.GetTree("t").ToXML()) + + root.GetTree("t").EditByPath([]int{0, 1}, []int{0, 1}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "tn", Children: []json.TreeNode{{ + Type: "text", Value: "q", + }}, + }}, + }) + assert.Equal(t, "

aXb!cd

q

", root.GetTree("t").ToXML()) + + root.GetTree("t").EditByPath([]int{0, 1, 0, 0}, []int{0, 1, 0, 0}, &json.TreeNode{ + Type: "text", + Value: "a", + }) + assert.Equal(t, "

aXb!cd

aq

", root.GetTree("t").ToXML()) + + root.GetTree("t").EditByPath([]int{0, 1, 0, 2}, []int{0, 1, 0, 2}, &json.TreeNode{ + Type: "text", + Value: "B", }) - assert.Equal(t, "

aXb!

", root.GetTree("t").ToXML()) + assert.Equal(t, "

aXb!cd

aqB

", root.GetTree("t").ToXML()) + + assert.Panics(t, func() {doc.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").EditByPath([]int{0, 0, 4}, []int{0, 0, 4}, &json.TreeNode{ + Type: "tn", + Children: []json.TreeNode{}, + }) + return nil + })}, index.ErrUnreachablePath) return nil }) assert.NoError(t, err) }) - t.Run("edit its content when multi tree nodes passed", func(t *testing.T) { + t.Run("edit content with path test 2", func(t *testing.T) { doc := document.New(helper.TestDocKey(t)) err := doc.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ @@ -249,86 +306,87 @@ func TestTree(t *testing.T) { Type: "tc", Children: []json.TreeNode{{ Type: "p", Children: []json.TreeNode{{ - Type: "tn", Children: []json.TreeNode{{ - Type: "text", Value: "ab", - }}, + Type: "tn", Children: []json.TreeNode{}, }}, }}, }}, }) - assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + assert.Equal(t, "

", root.GetTree("t").ToXML()) - root.GetTree("t").EditByPath([]int{0, 0, 0, 1}, []int{0, 0, 0, 1}, &json.TreeNode{ - Type: "text", - Value: "X"}, &json.TreeNode{ + root.GetTree("t").EditByPath([]int{0, 0, 0, 0}, []int{0, 0, 0, 0}, &json.TreeNode{ Type: "text", - Value: "X", + Value: "a", }) - assert.Equal(t, "

aXXb

", root.GetTree("t").ToXML()) + assert.Equal(t, "

a

", root.GetTree("t").ToXML()) root.GetTree("t").EditByPath([]int{0, 1}, []int{0, 1}, &json.TreeNode{ - Type: "p", - Children: []json.TreeNode{{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "st"}}}}}, - &json.TreeNode{ - Type: "p", - Children: []json.TreeNode{{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "xt"}}}}, - }) - assert.Equal(t, "

aXXb

test

text

", root.GetTree("t").ToXML()) - - root.GetTree("t").EditByPath([]int{0, 3}, []int{0, 3}, &json.TreeNode{ - Type: "p", - Children: []json.TreeNode{{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "st"}}}}}, - &json.TreeNode{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "xt"}}}) - assert.Equal(t, "

aXXb

test

text

test

text
", root.GetTree("t").ToXML()) + Type: "p", + Children: []json.TreeNode{{ + Type: "tn", Children: []json.TreeNode{}, + }}, + }) + assert.Equal(t, "

a

", root.GetTree("t").ToXML()) - return nil - }) - assert.NoError(t, err) - }) + root.GetTree("t").EditByPath([]int{0, 1, 0, 0}, []int{0, 1, 0, 0}, &json.TreeNode{ + Type: "text", + Value: "b", + }) + assert.Equal(t, "

a

b

", root.GetTree("t").ToXML()) - t.Run("edit its content with attributes test", func(t *testing.T) { - doc := document.New(helper.TestDocKey(t)) - err := doc.Update(func(root *json.Object, p *presence.Presence) error { - root.SetNewTree("t", &json.TreeNode{Type: "doc"}) - assert.Equal(t, "", root.GetTree("t").ToXML()) + root.GetTree("t").EditByPath([]int{0, 2}, []int{0, 2}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "tn", Children: []json.TreeNode{}, + }}, + }) + assert.Equal(t, "

a

b

", root.GetTree("t").ToXML()) - root.GetTree("t").Edit(0, 0, &json.TreeNode{ - Type: "p", - Attributes: map[string]string{"bold": "true"}, - Children: []json.TreeNode{{Type: "text", Value: "ab"}}, + root.GetTree("t").EditByPath([]int{0, 2, 0, 0}, []int{0, 2, 0, 0}, &json.TreeNode{ + Type: "text", + Value: "c", }) - assert.Equal(t, `

ab

`, root.GetTree("t").ToXML()) + assert.Equal(t, "

a

b

c

", root.GetTree("t").ToXML()) - root.GetTree("t").Edit(4, 4, &json.TreeNode{ - Type: "p", - Attributes: map[string]string{"italic": "true"}, - Children: []json.TreeNode{{Type: "text", Value: "cd"}}, + root.GetTree("t").EditByPath([]int{0, 3}, []int{0, 3}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "tn", Children: []json.TreeNode{}, + }}, }) - assert.Equal(t, `

ab

cd

`, root.GetTree("t").ToXML()) + assert.Equal(t, "

a

b

c

", root.GetTree("t").ToXML()) - root.GetTree("t").Edit(2, 6, nil) - assert.Equal(t, `

ad

`, root.GetTree("t").ToXML()) + root.GetTree("t").EditByPath([]int{0, 3, 0, 0}, []int{0, 3, 0, 0}, &json.TreeNode{ + Type: "text", + Value: "d", + }) + assert.Equal(t, "

a

b

c

d

", root.GetTree("t").ToXML()) + root.GetTree("t").EditByPath([]int{0, 3}, []int{0, 3}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "tn", Children: []json.TreeNode{}, + }}, + }) + assert.Equal(t, "

a

b

c

d

", root.GetTree("t").ToXML()) return nil }) assert.NoError(t, err) - assert.Equal(t, `

ad

`, doc.Root().GetTree("t").ToXML()) }) - t.Run("sync with other clients test", func(t *testing.T) { + t.Run("sync its content with other clients test", func(t *testing.T) { ctx := context.Background() d1 := document.New(helper.TestDocKey(t)) assert.NoError(t, c1.Attach(ctx, d1)) assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ - Type: "root", + Type: "doc", Children: []json.TreeNode{{ Type: "p", - Children: []json.TreeNode{{Type: "text", Value: "Hello"}}, + Children: []json.TreeNode{{Type: "text", Value: "hello"}}, }}, }) - assert.Equal(t, "

Hello

", root.GetTree("t").ToXML()) + assert.Equal(t, "

hello

", root.GetTree("t").ToXML()) return nil })) @@ -336,147 +394,1916 @@ func TestTree(t *testing.T) { d2 := document.New(helper.TestDocKey(t)) assert.NoError(t, c2.Attach(ctx, d2)) - assert.Equal(t, "

Hello

", d1.Root().GetTree("t").ToXML()) - assert.Equal(t, "

Hello

", d2.Root().GetTree("t").ToXML()) - }) - - t.Run("set attributes test", func(t *testing.T) { - ctx := context.Background() - d1 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c1.Attach(ctx, d1)) + assert.Equal(t, "

hello

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

hello

", d2.Root().GetTree("t").ToXML()) assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { - root.SetNewTree("t", &json.TreeNode{ - Type: "root", - Children: []json.TreeNode{ - {Type: "p", Children: []json.TreeNode{{Type: "text", Value: "ab"}}}, - {Type: "p", Attributes: map[string]string{"italic": "true"}, Children: []json.TreeNode{{Type: "text", Value: "cd"}}}, - }, + root.GetTree("t").Edit(7, 7, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "yorkie"}}, }) return nil })) - assert.NoError(t, c1.Sync(ctx)) - assert.Equal(t, `

ab

cd

`, d1.Root().GetTree("t").ToXML()) - - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Style(3, 4, map[string]string{"bold": "true"}) - return nil - })) - - assert.NoError(t, c1.Sync(ctx)) - d2 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c2.Attach(ctx, d2)) - - assert.Equal(t, `

ab

cd

`, d1.Root().GetTree("t").ToXML()) - assert.Equal(t, `

ab

cd

`, d2.Root().GetTree("t").ToXML()) - - assert.Equal(t, `{"type":"root","children":[{"type":"p","children":[{"type":"text","value":"ab"}],"attributes":{"bold":"true"}},{"type":"p","children":[{"type":"text","value":"cd"}],"attributes":{"italic":"true"}}]}`, d1.Root().GetTree("t").Marshal()) - assert.Equal(t, `{"type":"root","children":[{"type":"p","children":[{"type":"text","value":"ab"}],"attributes":{"bold":"true"}},{"type":"p","children":[{"type":"text","value":"cd"}],"attributes":{"italic":"true"}}]}`, d2.Root().GetTree("t").Marshal()) + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

hello

yorkie

", d1.Root().GetTree("t").ToXML()) }) - t.Run("insert inline content to the same position(left) concurrently test", func(t *testing.T) { - ctx := context.Background() - d1 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c1.Attach(ctx, d1)) - - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + t.Run("insert multiple text nodes test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ - Type: "root", + Type: "doc", Children: []json.TreeNode{{ Type: "p", - Children: []json.TreeNode{{Type: "text", Value: "12"}}, + Children: []json.TreeNode{{Type: "text", Value: "ab"}}, }}, }) - return nil - })) - assert.NoError(t, c1.Sync(ctx)) - assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) - - d2 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c2.Attach(ctx, d2)) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "text", Value: "A"}) - return nil - })) - assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "text", Value: "B"}) + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "text", + Value: "c", + }, &json.TreeNode{ + Type: "text", + Value: "d", + }) + assert.Equal(t, "

abcd

", root.GetTree("t").ToXML()) + return nil - })) - assert.Equal(t, "

A12

", d1.Root().GetTree("t").ToXML()) - assert.Equal(t, "

B12

", d2.Root().GetTree("t").ToXML()) - - t.Skip("TODO(krapie): find bug on concurrent insert inline content to the same position(left)") - syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + }) + assert.NoError(t, err) }) - t.Run("insert inline content to the same position(middle) concurrently", func(t *testing.T) { - ctx := context.Background() - d1 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c1.Attach(ctx, d1)) - - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + t.Run("insert multiple element nodes test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ - Type: "root", + Type: "doc", Children: []json.TreeNode{{ Type: "p", - Children: []json.TreeNode{{Type: "text", Value: "12"}}, + Children: []json.TreeNode{{Type: "text", Value: "ab"}}, }}, }) - return nil - })) - assert.NoError(t, c1.Sync(ctx)) - assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) - - d2 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c2.Attach(ctx, d2)) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "text", Value: "A"}) - return nil - })) - assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "text", Value: "B"}) + root.GetTree("t").Edit(4, 4, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "cd"}}, + }, &json.TreeNode{ + Type: "i", + Children: []json.TreeNode{{Type: "text", Value: "fg"}}, + }) + assert.Equal(t, "

ab

cd

fg
", root.GetTree("t").ToXML()) + return nil - })) - assert.Equal(t, "

1A2

", d1.Root().GetTree("t").ToXML()) - assert.Equal(t, "

1B2

", d2.Root().GetTree("t").ToXML()) - - syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + }) + assert.NoError(t, err) }) - t.Run("insert inline content to the same position(right) concurrently", func(t *testing.T) { - ctx := context.Background() - d1 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c1.Attach(ctx, d1)) - - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + t.Run("edit its content with path when multi tree nodes passed", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { root.SetNewTree("t", &json.TreeNode{ - Type: "root", + Type: "doc", Children: []json.TreeNode{{ - Type: "p", - Children: []json.TreeNode{{Type: "text", Value: "12"}}, + Type: "tc", + Children: []json.TreeNode{{ + Type: "p", Children: []json.TreeNode{{ + Type: "tn", Children: []json.TreeNode{{ + Type: "text", Value: "ab", + }}, + }}, + }}, }}, }) - return nil - })) - assert.NoError(t, c1.Sync(ctx)) - assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) - d2 := document.New(helper.TestDocKey(t)) - assert.NoError(t, c2.Attach(ctx, d2)) + root.GetTree("t").EditByPath([]int{0, 0, 0, 1}, []int{0, 0, 0, 1}, &json.TreeNode{ + Type: "text", + Value: "X"}, &json.TreeNode{ + Type: "text", + Value: "X", + }) + assert.Equal(t, "

aXXb

", root.GetTree("t").ToXML()) - assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "A"}) - return nil - })) - assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { - root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "B"}) - return nil - })) - assert.Equal(t, "

12A

", d1.Root().GetTree("t").ToXML()) - assert.Equal(t, "

12B

", d2.Root().GetTree("t").ToXML()) + root.GetTree("t").EditByPath([]int{0, 1}, []int{0, 1}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "st"}}}}}, + &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "xt"}}}}, + }) + assert.Equal(t, "

aXXb

test

text

", root.GetTree("t").ToXML()) - syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + root.GetTree("t").EditByPath([]int{0, 3}, []int{0, 3}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "st"}}}}}, + &json.TreeNode{Type: "tn", Children: []json.TreeNode{{Type: "text", Value: "te"}, {Type: "text", Value: "xt"}}}) + assert.Equal(t, "

aXXb

test

text

test

text
", root.GetTree("t").ToXML()) + + return nil + }) + assert.NoError(t, err) + }) + + t.Run("detecting error for empty text test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "doc", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "ab", + }}, + }}, + }) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + assert.Panics(t, func() {doc.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "text", + Value: "c"}, &json.TreeNode{ + Type: "text", + Value: "", + }) + return nil + })}, json.ErrEmptyTextNode) + return nil + }) + assert.NoError(t, err) + }) + + t.Run("detecting error for mixed type insertion test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "doc", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "ab", + }}, + }}, + }) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + assert.Panics(t, func() {doc.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{}}, &json.TreeNode{ + Type: "text", + Value: "d", + }) + return nil + })}, json.ErrMixedNodeType) + return nil + }) + assert.NoError(t, err) + }) + + t.Run("detecting correct error order test 1", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "doc", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "ab", + }}, + }}, + }) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + assert.Panics(t, func() {doc.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "c", + },{ + Type: "text", + Value: "", + }}}, &json.TreeNode{ + Type: "text", + Value: "d", + }) + return nil + })}, json.ErrMixedNodeType) + return nil + }) + assert.NoError(t, err) + }) + + t.Run("detecting correct error order test 2", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "doc", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "ab", + }}, + }}, + }) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + assert.Panics(t, func() {doc.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "c", + }}}, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "", + }}, + }) + return nil + })}, json.ErrEmptyTextNode) + return nil + }) + assert.NoError(t, err) + }) + + t.Run("detecting correct error order test 3", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "doc", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "ab", + }}, + }}, + }) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + assert.Panics(t, func() {doc.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "text", + Value: "d", + }, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{ + Type: "text", + Value: "c", + }}, + }) + return nil + })}, json.ErrMixedNodeType) + return nil + }) + assert.NoError(t, err) + }) + + t.Run("edit its content with attributes test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{Type: "doc"}) + assert.Equal(t, "", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(0, 0, &json.TreeNode{ + Type: "p", + Attributes: map[string]string{"bold": "true"}, + Children: []json.TreeNode{{Type: "text", Value: "ab"}}, + }) + assert.Equal(t, `

ab

`, root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(4, 4, &json.TreeNode{ + Type: "p", + Attributes: map[string]string{"italic": "true"}, + Children: []json.TreeNode{{Type: "text", Value: "cd"}}, + }) + assert.Equal(t, `

ab

cd

`, root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(2, 6, nil) + assert.Equal(t, `

a

d

`, root.GetTree("t").ToXML()) + // TODO(sejongk): Use the below assertion after implementing Tree.Move. + // assert.Equal(t, `

ad

`, root.GetTree("t").ToXML()) + + return nil + }) + assert.NoError(t, err) + assert.Equal(t, `

a

d

`, doc.Root().GetTree("t").ToXML()) + // TODO(sejongk): Use the below assertion after implementing Tree.Move. + // assert.Equal(t, `

ad

`, doc.Root().GetTree("t").ToXML()) + }) + + t.Run("set attributes test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{ + {Type: "p", Children: []json.TreeNode{{Type: "text", Value: "ab"}}}, + {Type: "p", Attributes: map[string]string{"italic": "true"}, Children: []json.TreeNode{{Type: "text", Value: "cd"}}}, + }, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, `

ab

cd

`, d1.Root().GetTree("t").ToXML()) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + // NOTE(sejongk): 0, 4 -> 0,1 / 3,4 + root.GetTree("t").Style(0, 4, map[string]string{"bold": "true"}) + return nil + })) + + assert.NoError(t, c1.Sync(ctx)) + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.Equal(t, `

ab

cd

`, d1.Root().GetTree("t").ToXML()) + assert.Equal(t, `

ab

cd

`, d2.Root().GetTree("t").ToXML()) + + assert.Equal(t, `{"type":"root","children":[{"type":"p","children":[{"type":"text","value":"ab"}],"attributes":{"bold":"true"}},{"type":"p","children":[{"type":"text","value":"cd"}],"attributes":{"italic":"true"}}]}`, d1.Root().GetTree("t").Marshal()) + assert.Equal(t, `{"type":"root","children":[{"type":"p","children":[{"type":"text","value":"ab"}],"attributes":{"bold":"true"}},{"type":"p","children":[{"type":"text","value":"cd"}],"attributes":{"italic":"true"}}]}`, d2.Root().GetTree("t").Marshal()) + }) + + // Concurrent editing, overlapping range test + t.Run("concurrently delete overlapping elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{}, + }, { + Type: "i", + Children: []json.TreeNode{}, + }, { + Type: "b", + Children: []json.TreeNode{}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 4, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 6, nil) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete overlapping text test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "abcd"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

abcd

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 4, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 5, nil) + return nil + })) + assert.Equal(t, "

d

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

a

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + // Concurrent editing, contained range test + t.Run("concurrently insert and delete contained elements of the same depth test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }, { + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "abcd"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

abcd

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(6, 6, &json.TreeNode{Type: "p", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 12, nil) + return nil + })) + assert.Equal(t, "

1234

abcd

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently multiple insert and delete contained elements of the same depth test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }, { + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "abcd"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

abcd

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(6, 6, &json.TreeNode{Type: "p", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(8, 8, &json.TreeNode{Type: "p", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(10, 10, &json.TreeNode{Type: "p", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 12, nil) + return nil + })) + assert.Equal(t, "

1234

abcd

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + }) + + t.Run("detecting error when inserting and deleting contained elements at different depths test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "i", + Children: []json.TreeNode{}, + }}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "i", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete contained elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "i", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 8, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 7, nil) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently insert and delete contained text test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 5, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "a"}) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12a34

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

a

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete contained text test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 5, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 4, nil) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

14

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently insert and delete contained text and elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 6, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "a"}) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12a34

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete contained text and elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 6, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 5, nil) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + }) + + // Concurrent editing, side by side range test + t.Run("concurrently insert side by side elements (left) test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 0, &json.TreeNode{Type: "b", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 0, &json.TreeNode{Type: "i", Children: []json.TreeNode{}}) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently insert side by side elements (middle) test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "b", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "i", Children: []json.TreeNode{}}) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently insert side by side elements (right) test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "b", Children: []json.TreeNode{}}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "i", Children: []json.TreeNode{}}) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently insert and delete side by side elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "b", + Children: []json.TreeNode{}, + }}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "i", Children: []json.TreeNode{}}) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete and insert side by side elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "b", + Children: []json.TreeNode{}, + }}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "i", Children: []json.TreeNode{}}) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete side by side elements test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{ + Type: "b", + Children: []json.TreeNode{}, + }, { + Type: "i", + Children: []json.TreeNode{}, + }}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 5, nil) + return nil + })) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("insert text to the same position(left) concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "text", Value: "A"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "text", Value: "B"}) + return nil + })) + assert.Equal(t, "

A12

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

B12

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

BA12

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("insert text to the same position(middle) concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "text", Value: "A"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "text", Value: "B"}) + return nil + })) + assert.Equal(t, "

1A2

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

1B2

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

1BA2

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("insert text content to the same position(right) concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "A"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "B"}) + return nil + })) + assert.Equal(t, "

12A

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12B

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

12BA

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently insert and delete side by side text test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "a"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 5, nil) + return nil + })) + assert.Equal(t, "

12a34

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

12a

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete and insert side by side text test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "a"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.Equal(t, "

12a34

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

34

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

a34

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("concurrently delete side by side text blocks test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 5, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

34

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("delete text content at the same position(left) concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "123"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

123

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 2, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 2, nil) + return nil + })) + assert.Equal(t, "

23

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

23

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

23

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("delete text content at the same position(middle) concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "123"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

123

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 3, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 3, nil) + return nil + })) + assert.Equal(t, "

13

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

13

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

13

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("delete text content at the same position(right) concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "123"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

123

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 4, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 4, nil) + return nil + })) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + }) + + // Concurrent editing, complex cases test + t.Run("delete text content anchored to another concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "123"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

123

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 2, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 3, nil) + return nil + })) + assert.Equal(t, "

23

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

13

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

3

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("produce complete deletion concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "123"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

123

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 2, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 4, nil) + return nil + })) + assert.Equal(t, "

23

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

1

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle block delete concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12345"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12345

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(4, 6, nil) + return nil + })) + assert.Equal(t, "

345

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

123

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

3

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle insert within block delete concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12345"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12345

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 5, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "B"}) + return nil + })) + assert.Equal(t, "

15

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12B345

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

1B5

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle insert within block delete concurrently test 2", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12345"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12345

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 6, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, []*json.TreeNode{{Type: "text", Value: "a"}, {Type: "text", Value: "bc"}}...) + return nil + })) + assert.Equal(t, "

1

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12abc345

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

1abc

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle block element insertion within delete test 2", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "1234"}}, + }, { + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "5678"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

1234

5678

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 12, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(6, 6, []*json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "cd"}}, + }, { + Type: "i", + Children: []json.TreeNode{{Type: "text", Value: "fg"}}, + }}...) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

1234

cd

fg

5678

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

cd

fg
", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle concurrent element insert/deletion (left) test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12345"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12345

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 7, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 0, []*json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "cd"}}, + }, { + Type: "i", + Children: []json.TreeNode{{Type: "text", Value: "fg"}}, + }}...) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

cd

fg

12345

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

cd

fg
", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle concurrent element insert/deletion (right) test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12345"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12345

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(0, 7, nil) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(7, 7, []*json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "cd"}}, + }, { + Type: "i", + Children: []json.TreeNode{{Type: "text", Value: "fg"}}, + }}...) + return nil + })) + assert.Equal(t, "", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

12345

cd

fg
", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

cd

fg
", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle deletion of insertion anchor concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{Type: "text", Value: "A"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 2, nil) + return nil + })) + assert.Equal(t, "

1A2

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

2

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

A2

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle deletion after insertion concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 1, &json.TreeNode{Type: "text", Value: "A"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.Equal(t, "

A12

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

A

", d1.Root().GetTree("t").ToXML()) + }) + + t.Run("handle deletion before insertion concurrently test", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "12"}}, + }}, + }) + return nil + })) + assert.NoError(t, c1.Sync(ctx)) + assert.Equal(t, "

12

", d1.Root().GetTree("t").ToXML()) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{Type: "text", Value: "A"}) + return nil + })) + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(1, 3, nil) + return nil + })) + assert.Equal(t, "

12A

", d1.Root().GetTree("t").ToXML()) + assert.Equal(t, "

", d2.Root().GetTree("t").ToXML()) + + syncClientsThenAssertEqual(t, []clientAndDocPair{{c1, d1}, {c2, d2}}) + assert.Equal(t, "

A

", d1.Root().GetTree("t").ToXML()) + }) + + // Edge cases test + t.Run("delete very first text when there is tombstone in front of target text test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + // 01. Create a tree and insert a paragraph. + root.SetNewTree("t").Edit(0, 0, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "abcdefghi"}}}) + assert.Equal(t, "

abcdefghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(1, 1, &json.TreeNode{ + Type: "text", + Value: "12345", + }) + assert.Equal(t, "

12345abcdefghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(2, 5) + assert.Equal(t, "

15abcdefghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(3, 5) + assert.Equal(t, "

15cdefghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(2, 4) + assert.Equal(t, "

1defghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(1, 3) + assert.Equal(t, "

efghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(1, 2) + assert.Equal(t, "

fghi

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(2, 5) + assert.Equal(t, "

f

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(1, 2) + assert.Equal(t, "

", root.GetTree("t").ToXML()) + + return nil + }) + assert.NoError(t, err) + }) + + t.Run("delete node when there is more than one text node in front which has size bigger than 1 test", func(t *testing.T) { + doc := document.New(helper.TestDocKey(t)) + err := doc.Update(func(root *json.Object, p *presence.Presence) error { + // 01. Create a tree and insert a paragraph. + root.SetNewTree("t").Edit(0, 0, &json.TreeNode{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "abcde"}}}) + assert.Equal(t, "

abcde

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(6, 6, &json.TreeNode{ + Type: "text", + Value: "f", + }) + assert.Equal(t, "

abcdef

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(7, 7, &json.TreeNode{ + Type: "text", + Value: "g", + }) + assert.Equal(t, "

abcdefg

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(7, 8) + assert.Equal(t, "

abcdef

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(6, 7) + assert.Equal(t, "

abcde

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(5, 6) + assert.Equal(t, "

abcd

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(4, 5) + assert.Equal(t, "

abc

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(3, 4) + assert.Equal(t, "

ab

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(2, 3) + assert.Equal(t, "

a

", root.GetTree("t").ToXML()) + + root.GetTree("t").Edit(1, 2) + assert.Equal(t, "

", root.GetTree("t").ToXML()) + + return nil + }) + assert.NoError(t, err) + }) + + t.Run("split link can transmitted through rpc", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "ab"}}, + }}, + }) + return nil + })) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{ + Type: "text", + Value: "1", + }) + return nil + })) + + assert.NoError(t, c1.Sync(ctx)) + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(3, 3, &json.TreeNode{ + Type: "text", + Value: "1", + }) + return nil + })) + assert.Equal(t, "

a11b

", d2.Root().GetTree("t").ToXML()) + + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 3, &json.TreeNode{ + Type: "text", + Value: "12", + }) + return nil + })) + + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(4, 5, &json.TreeNode{ + Type: "text", + Value: "21", + }) + return nil + })) + + assert.Equal(t, "

a1221b

", d2.Root().GetTree("t").ToXML()) + + assert.NoError(t, d2.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 4, &json.TreeNode{ + Type: "text", + Value: "123", + }) + return nil + })) + + assert.Equal(t, "

a12321b

", d2.Root().GetTree("t").ToXML()) + }) + + t.Run("can calculate size of index tree correctly", func(t *testing.T) { + ctx := context.Background() + d1 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c1.Attach(ctx, d1)) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.SetNewTree("t", &json.TreeNode{ + Type: "root", + Children: []json.TreeNode{{ + Type: "p", + Children: []json.TreeNode{{Type: "text", Value: "ab"}}, + }}, + }) + return nil + })) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{ + Type: "text", + Value: "123", + }) + return nil + })) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{ + Type: "text", + Value: "456", + }) + return nil + })) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{ + Type: "text", + Value: "789", + }) + return nil + })) + + assert.NoError(t, d1.Update(func(root *json.Object, p *presence.Presence) error { + root.GetTree("t").Edit(2, 2, &json.TreeNode{ + Type: "text", + Value: "0123", + }) + return nil + })) + + assert.Equal(t, "

a0123789456123b

", d1.Root().GetTree("t").ToXML()) + assert.NoError(t, c1.Sync(ctx)) + + d2 := document.New(helper.TestDocKey(t)) + assert.NoError(t, c2.Attach(ctx, d2)) + size := d1.Root().GetTree("t").IndexTree.Root().Len() + assert.Equal(t, size, d2.Root().GetTree("t").IndexTree.Root().Len()) }) }