From 7069f400ace72e7f7acfae87331673815124e162 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Thu, 5 Sep 2024 21:22:57 -0700 Subject: [PATCH 01/19] SQL refactor POC --- Dockerfile | 1 + command/command.go | 127 ++- command/item_count.go | 117 +++ command/server_defined_unique_entity.go | 4 +- controller/controller.go | 8 +- datastore/datastore.go | 8 +- datastore/datastoretest/dynamo.go | 4 +- datastore/datastoretest/mock_datastore.go | 6 +- datastore/instrumented_datastore.go | 8 +- .../{item_count.go => item_count_dynamo.go} | 16 +- ...ount_test.go => item_count_dynamo_test.go} | 8 +- datastore/item_count_sql.go | 28 + datastore/sql.go | 47 ++ datastore/sync_entity.go | 720 +----------------- datastore/sync_entity_dynamo.go | 698 +++++++++++++++++ datastore/sync_entity_sql.go | 158 ++++ docker-compose.yml | 11 + go.mod | 11 +- go.sum | 90 +++ middleware/disabled_chain.go | 2 +- migrations/20240904202925_init.down.postgres | 3 + migrations/20240904202925_init.up.postgres | 38 + server/server.go | 14 +- 23 files changed, 1320 insertions(+), 807 deletions(-) create mode 100644 command/item_count.go rename datastore/{item_count.go => item_count_dynamo.go} (93%) rename datastore/{item_count_test.go => item_count_dynamo_test.go} (93%) create mode 100644 datastore/item_count_sql.go create mode 100644 datastore/sql.go create mode 100644 datastore/sync_entity_dynamo.go create mode 100644 datastore/sync_entity_sql.go create mode 100644 migrations/20240904202925_init.down.postgres create mode 100644 migrations/20240904202925_init.up.postgres diff --git a/Dockerfile b/Dockerfile index 3010b51d..368231a9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,7 @@ RUN CGO_ENABLED=0 GOOS=linux go build \ FROM alpine:3.20 as artifact RUN apk add --update ca-certificates # Certificates for SSL COPY --from=builder /src/main main +COPY ./migrations/ ./migrations EXPOSE 8295 diff --git a/command/command.go b/command/command.go index 4faa4d20..159c3cd8 100644 --- a/command/command.go +++ b/command/command.go @@ -19,6 +19,11 @@ var ( maxClientHistoryObjectQuota = 30000 ) +var allowedSQLDataTypes = map[int]struct{}{ + // Sessions + 50119: {}, +} + const ( storeBirthday string = "1" maxCommitBatchSize int32 = 90 @@ -33,7 +38,7 @@ const ( // handleGetUpdatesRequest handles GetUpdatesMessage and fills // GetUpdatesResponse. Target sync entities in the database will be updated or // deleted based on the client's requests. -func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, db datastore.Datastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, db datastore.DynamoDatastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS // default value, might be changed later isNewClient := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_NEW_CLIENT isPoll := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_PERIODIC @@ -194,35 +199,11 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag return &errCode, nil } -func getItemCounts(cache *cache.Cache, db datastore.Datastore, clientID string) (*datastore.ClientItemCounts, int, int, error) { - itemCounts, err := db.GetClientItemCount(clientID) - if err != nil { - return nil, 0, 0, err - } - newNormalCount, newHistoryCount, err := getInterimItemCounts(cache, clientID, false) - if err != nil { - return nil, 0, 0, err - } - return itemCounts, newNormalCount, newHistoryCount, nil -} - -func getInterimItemCounts(cache *cache.Cache, clientID string, clear bool) (int, int, error) { - newNormalCount, err := cache.GetInterimCount(context.Background(), clientID, normalCountTypeStr, clear) - if err != nil { - return 0, 0, err - } - newHistoryCount, err := cache.GetInterimCount(context.Background(), clientID, historyCountTypeStr, clear) - if err != nil { - return 0, 0, err - } - return newNormalCount, newHistoryCount, nil -} - // handleCommitRequest handles the commit message and fills the commit response. // For each commit entry: // - new sync entity is created and inserted into the database if version is 0. // - existed sync entity will be updated if version is greater than 0. -func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, commitRsp *sync_pb.CommitResponse, db datastore.Datastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, commitRsp *sync_pb.CommitResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { if commitMsg == nil { return nil, fmt.Errorf("nil commitMsg is received") } @@ -232,24 +213,23 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c return &errCode, nil } - itemCounts, newNormalCount, newHistoryCount, err := getItemCounts(cache, db, clientID) + trx, err := sqlDB.Beginx() + if err != nil { + return nil, fmt.Errorf("error starting transaction: %w", err) + } + defer trx.Rollback() + + chainID, err := sqlDB.GetAndLockChainID(trx, &clientID) + if err != nil { + return nil, err + } + + itemCounts, err := getItemCounts(cache, dynamoDB, clientID) if err != nil { log.Error().Err(err).Msg("Get client's item count failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, fmt.Errorf("error getting client's item count: %w", err) } - currentNormalItemCount := itemCounts.ItemCount - currentHistoryItemCount := itemCounts.SumHistoryCounts() - - boostedQuotaAddition := 0 - if currentHistoryItemCount > maxClientHistoryObjectQuota { - // Sync chains with history entities stored before the history count fix - // may have history counts greater than the new history item quota. - // "Boost" the quota with the difference between the history quota and count, - // so users can start syncing other entities immediately, instead of waiting for the - // history TTL to get rid of the excess items. - boostedQuotaAddition = min(maxClientObjectQuota-maxClientHistoryObjectQuota, currentHistoryItemCount-maxClientHistoryObjectQuota) - } commitRsp.Entryresponse = make([]*sync_pb.CommitResponse_EntryResponse, len(commitMsg.Entries)) @@ -261,7 +241,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp := &sync_pb.CommitResponse_EntryResponse{} commitRsp.Entryresponse[i] = entryRsp - entityToCommit, err := datastore.CreateDBSyncEntity(v, commitMsg.CacheGuid, clientID) + entityToCommit, err := datastore.CreateDBSyncEntity(v, commitMsg.CacheGuid, clientID, chainID) if err != nil { // Can't unmarshal & marshal the message from PB into DB format rspType := sync_pb.CommitResponse_INVALID_MESSAGE entryRsp.ResponseType = &rspType @@ -280,10 +260,16 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c oldVersion := *entityToCommit.Version isUpdateOp := oldVersion != 0 isHistoryRelatedItem := *entityToCommit.DataType == datastore.HistoryTypeID || *entityToCommit.DataType == datastore.HistoryDeleteDirectiveTypeID + _, isStoredInSQL := allowedSQLDataTypes[*entityToCommit.DataType] *entityToCommit.Version = *entityToCommit.Mtime + if *entityToCommit.DataType == datastore.HistoryTypeID { // Check if item exists using client_unique_tag - isUpdateOp, err = db.HasItem(clientID, *entityToCommit.ClientDefinedUniqueTag) + if isStoredInSQL { + isUpdateOp, err = sqlDB.HasItem(trx, *chainID, []byte(*entityToCommit.ClientDefinedUniqueTag)) + } else { + isUpdateOp, err = dynamoDB.HasItem(clientID, *entityToCommit.ClientDefinedUniqueTag) + } if err != nil { log.Error().Err(err).Msg("Insert history sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -294,18 +280,24 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } if !isUpdateOp { // Create - if currentNormalItemCount+currentHistoryItemCount+newNormalCount+newHistoryCount >= maxClientObjectQuota+boostedQuotaAddition { + totalItemCount := itemCounts.sumCounts(false) + if totalItemCount >= maxClientObjectQuota { rspType := sync_pb.CommitResponse_OVER_QUOTA entryRsp.ResponseType = &rspType - entryRsp.ErrorMessage = aws.String(fmt.Sprintf("There are already %v non-deleted objects in store", currentNormalItemCount+currentHistoryItemCount)) + entryRsp.ErrorMessage = aws.String(fmt.Sprintf("There are already %v non-deleted objects in store", totalItemCount)) continue } - if !isHistoryRelatedItem || currentHistoryItemCount+newHistoryCount < maxClientHistoryObjectQuota { + if !isHistoryRelatedItem || itemCounts.sumCounts(true) < maxClientHistoryObjectQuota { // Insert all non-history items. For history items, ignore any items above history quoto // and lie to the client about the objects being synced instead of returning OVER_QUOTA // so the client can continue to sync other entities. - conflict, err := db.InsertSyncEntity(entityToCommit) + var conflict bool + if isStoredInSQL { + conflict, err = sqlDB.InsertSyncEntity(trx, entityToCommit) + } else { + conflict, err = dynamoDB.InsertSyncEntity(entityToCommit) + } if err != nil { log.Error().Err(err).Msg("Insert sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -323,14 +315,15 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c idMap[*entityToCommit.OriginatorClientItemID] = entityToCommit.ID } - if isHistoryRelatedItem { - newHistoryCount, err = cache.IncrementInterimCount(context.Background(), clientID, historyCountTypeStr, false) - } else { - newNormalCount, err = cache.IncrementInterimCount(context.Background(), clientID, normalCountTypeStr, false) - } + err = itemCounts.recordChange(*entityToCommit.DataType, false) } } else { // Update - conflict, deleted, err := db.UpdateSyncEntity(entityToCommit, oldVersion) + var conflict, deleted bool + if isStoredInSQL { + conflict, deleted, err = sqlDB.UpdateSyncEntity(trx, entityToCommit, oldVersion) + } else { + conflict, deleted, err = dynamoDB.UpdateSyncEntity(entityToCommit, oldVersion) + } if err != nil { log.Error().Err(err).Msg("Update sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -344,11 +337,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c continue } if deleted { - if isHistoryRelatedItem { - newHistoryCount, err = cache.IncrementInterimCount(context.Background(), clientID, historyCountTypeStr, true) - } else { - newNormalCount, err = cache.IncrementInterimCount(context.Background(), clientID, normalCountTypeStr, true) - } + err = itemCounts.recordChange(*entityToCommit.DataType, true) } } if err != nil { @@ -366,7 +355,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.Mtime = entityToCommit.Mtime } - newNormalCount, newHistoryCount, err = getInterimItemCounts(cache, clientID, true) + err = itemCounts.save() if err != nil { log.Error().Err(err).Msg("Get interim item counts failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -378,24 +367,14 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c cache.SetTypeMtime(context.Background(), clientID, dataType, mtime) } - err = db.UpdateClientItemCount(itemCounts, newNormalCount, newHistoryCount) - if err != nil { - // We only impose a soft quota limit on the item count for each client, so - // we only log the error without further actions here. The reason of this - // is we do not want to pay the cost to ensure strong consistency on this - // value and we do not want to give up previous DB operations if we cannot - // update the count this time. In addition, we do not retry this operation - // either because it is acceptable to miss one time of this update and - // chances of failing to update the item count multiple times in a row for - // a single client is quite low. - log.Error().Err(err).Msg("Update client item count failed") - } + trx.Commit() + return &errCode, nil } // handleClearServerDataRequest handles clearing user data from the datastore and cache // and fills the response -func handleClearServerDataRequest(cache *cache.Cache, db datastore.Datastore, _ *sync_pb.ClearServerDataMessage, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleClearServerDataRequest(cache *cache.Cache, db datastore.DynamoDatastore, _ *sync_pb.ClearServerDataMessage, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS var err error @@ -433,7 +412,7 @@ func handleClearServerDataRequest(cache *cache.Cache, db datastore.Datastore, _ // HandleClientToServerMessage handles the protobuf ClientToServerMessage and // fills the protobuf ClientToServerResponse. -func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerMessage, pbRsp *sync_pb.ClientToServerResponse, db datastore.Datastore, clientID string) error { +func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerMessage, pbRsp *sync_pb.ClientToServerResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string) error { // Create ClientToServerResponse and fill general fields for both GU and // Commit. pbRsp.StoreBirthday = aws.String(storeBirthday) @@ -447,7 +426,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_GET_UPDATES { guRsp := &sync_pb.GetUpdatesResponse{} pbRsp.GetUpdates = guRsp - pbRsp.ErrorCode, err = handleGetUpdatesRequest(cache, pb.GetUpdates, guRsp, db, clientID) + pbRsp.ErrorCode, err = handleGetUpdatesRequest(cache, pb.GetUpdates, guRsp, dynamoDB, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) @@ -461,7 +440,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_COMMIT { commitRsp := &sync_pb.CommitResponse{} pbRsp.Commit = commitRsp - pbRsp.ErrorCode, err = handleCommitRequest(cache, pb.Commit, commitRsp, db, clientID) + pbRsp.ErrorCode, err = handleCommitRequest(cache, pb.Commit, commitRsp, dynamoDB, sqlDB, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) @@ -475,7 +454,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_CLEAR_SERVER_DATA { csdRsp := &sync_pb.ClearServerDataResponse{} pbRsp.ClearServerData = csdRsp - pbRsp.ErrorCode, err = handleClearServerDataRequest(cache, db, pb.ClearServerData, clientID) + pbRsp.ErrorCode, err = handleClearServerDataRequest(cache, dynamoDB, pb.ClearServerData, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) diff --git a/command/item_count.go b/command/item_count.go new file mode 100644 index 00000000..95a6be1f --- /dev/null +++ b/command/item_count.go @@ -0,0 +1,117 @@ +package command + +import ( + "context" + "fmt" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/datastore" + "github.com/rs/zerolog/log" +) + +type ItemCounts struct { + cache *cache.Cache + dynamoDB datastore.DynamoDatastore + dynamoItemCounts *datastore.DynamoItemCounts + clientID string + cacheNewNormalCount int + cacheNewHistoryCount int + sqlTxNewNormalCount int + sqlTxNewHistoryCount int +} + +func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, clientID string) (*ItemCounts, error) { + dynamoItemCounts, err := dynamoDB.GetClientItemCount(clientID) + if err != nil { + return nil, err + } + + itemCounts := ItemCounts{ + cache: cache, + dynamoDB: dynamoDB, + dynamoItemCounts: dynamoItemCounts, + clientID: clientID, + cacheNewNormalCount: 0, + cacheNewHistoryCount: 0, + sqlTxNewNormalCount: 0, + sqlTxNewHistoryCount: 0, + } + err = itemCounts.updateInterimItemCounts(false) + if err != nil { + return nil, err + } + + return &itemCounts, nil +} + +func (itemCounts *ItemCounts) updateInterimItemCounts(clear bool) error { + newNormalCount, err := itemCounts.cache.GetInterimCount(context.Background(), itemCounts.clientID, normalCountTypeStr, clear) + if err != nil { + return err + } + newHistoryCount, err := itemCounts.cache.GetInterimCount(context.Background(), itemCounts.clientID, historyCountTypeStr, clear) + if err != nil { + return err + } + itemCounts.cacheNewNormalCount = newNormalCount + itemCounts.cacheNewHistoryCount = newHistoryCount + return nil +} + +func (itemCounts *ItemCounts) recordChange(dataType int, subtract bool) error { + _, isStoredInSQL := allowedSQLDataTypes[dataType] + isHistory := dataType == datastore.HistoryTypeID + if isStoredInSQL { + delta := 1 + if subtract { + delta = -1 + } + if isHistory { + itemCounts.sqlTxNewHistoryCount += delta + } else { + itemCounts.sqlTxNewNormalCount += delta + } + } else { + countType := normalCountTypeStr + if isHistory { + countType = historyCountTypeStr + } + newCount, err := itemCounts.cache.IncrementInterimCount(context.Background(), itemCounts.clientID, countType, subtract) + if err != nil { + return fmt.Errorf("failed to increment history cache count") + } + if isHistory { + itemCounts.cacheNewNormalCount = newCount + } else { + itemCounts.cacheNewHistoryCount = newCount + } + } + return nil +} + +func (itemCounts *ItemCounts) sumCounts(historyOnly bool) int { + sum := itemCounts.dynamoItemCounts.SumHistoryCounts() + itemCounts.sqlTxNewHistoryCount + itemCounts.cacheNewHistoryCount + if !historyOnly { + sum += itemCounts.dynamoItemCounts.ItemCount + itemCounts.sqlTxNewNormalCount + itemCounts.cacheNewNormalCount + } + return sum +} + +func (itemCounts *ItemCounts) save() error { + err := itemCounts.updateInterimItemCounts(true) + if err != nil { + return fmt.Errorf("error getting interim item count: %w", err) + } + if err = itemCounts.dynamoDB.UpdateClientItemCount(itemCounts.dynamoItemCounts, itemCounts.cacheNewNormalCount, itemCounts.cacheNewHistoryCount); err != nil { + // We only impose a soft quota limit on the item count for each client, so + // we only log the error without further actions here. The reason of this + // is we do not want to pay the cost to ensure strong consistency on this + // value and we do not want to give up previous DB operations if we cannot + // update the count this time. In addition, we do not retry this operation + // either because it is acceptable to miss one time of this update and + // chances of failing to update the item count multiple times in a row for + // a single client is quite low. + log.Error().Err(err).Msg("Update client item count failed") + } + return nil +} diff --git a/command/server_defined_unique_entity.go b/command/server_defined_unique_entity.go index 394171ad..93057fdf 100644 --- a/command/server_defined_unique_entity.go +++ b/command/server_defined_unique_entity.go @@ -37,12 +37,12 @@ func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clien Version: &version, ParentIdString: &parentID, IdString: &idString, Specifics: specifics} - return datastore.CreateDBSyncEntity(pbEntity, nil, clientID) + return datastore.CreateDBSyncEntity(pbEntity, nil, clientID, nil) } // InsertServerDefinedUniqueEntities inserts the server defined unique tag // entities if it is not in the DB yet for a specific client. -func InsertServerDefinedUniqueEntities(db datastore.Datastore, clientID string) error { +func InsertServerDefinedUniqueEntities(db datastore.DynamoDatastore, clientID string) error { var entities []*datastore.SyncEntity // Check if they're existed already for this client. // If yes, just return directly. diff --git a/controller/controller.go b/controller/controller.go index 7566072c..5561558c 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -24,16 +24,16 @@ const ( ) // SyncRouter add routers for command and auth endpoint requests. -func SyncRouter(cache *cache.Cache, datastore datastore.Datastore) chi.Router { +func SyncRouter(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB) chi.Router { r := chi.NewRouter() r.Use(syncMiddleware.Auth) r.Use(syncMiddleware.DisabledChain) - r.Method("POST", "/command/", middleware.InstrumentHandler("Command", Command(cache, datastore))) + r.Method("POST", "/command/", middleware.InstrumentHandler("Command", Command(cache, dynamoDB, sqlDB))) return r } // Command handles GetUpdates and Commit requests from sync clients. -func Command(cache *cache.Cache, db datastore.Datastore) http.HandlerFunc { +func Command(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() clientID, ok := ctx.Value(syncContext.ContextKeyClientID).(string) @@ -72,7 +72,7 @@ func Command(cache *cache.Cache, db datastore.Datastore) http.HandlerFunc { } pbRsp := &sync_pb.ClientToServerResponse{} - err = command.HandleClientToServerMessage(cache, pb, pbRsp, db, clientID) + err = command.HandleClientToServerMessage(cache, pb, pbRsp, dynamoDB, sqlDB, clientID) if err != nil { log.Error().Err(err).Msg("Handle command message failed") http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/datastore/datastore.go b/datastore/datastore.go index 04796120..bfcc73b0 100644 --- a/datastore/datastore.go +++ b/datastore/datastore.go @@ -1,7 +1,7 @@ package datastore -// Datastore abstracts over the underlying datastore. -type Datastore interface { +// DynamoDatastore abstracts over the underlying datastore. +type DynamoDatastore interface { // Insert a new sync entity. InsertSyncEntity(entity *SyncEntity) (bool, error) // Insert a series of sync entities in a write transaction. @@ -16,9 +16,9 @@ type Datastore interface { // Check if a server-defined unique tag is in the datastore. HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) // Get the count of sync items for a client. - GetClientItemCount(clientID string) (*ClientItemCounts, error) + GetClientItemCount(clientID string) (*DynamoItemCounts, error) // Update the count of sync items for a client. - UpdateClientItemCount(counts *ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) error + UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error // ClearServerData deletes all items for a given clientID ClearServerData(clientID string) ([]SyncEntity, error) // DisableSyncChain marks a chain as disabled so no further updates or commits can happen diff --git a/datastore/datastoretest/dynamo.go b/datastore/datastoretest/dynamo.go index 2c831ab0..9bc1ae34 100644 --- a/datastore/datastoretest/dynamo.go +++ b/datastore/datastoretest/dynamo.go @@ -125,7 +125,7 @@ func ScanTagItems(dynamo *datastore.Dynamo) ([]datastore.ServerClientUniqueTagIt // ScanClientItemCounts scans the dynamoDB table and returns all client item // counts. -func ScanClientItemCounts(dynamo *datastore.Dynamo) ([]datastore.ClientItemCounts, error) { +func ScanClientItemCounts(dynamo *datastore.Dynamo) ([]datastore.DynamoItemCounts, error) { filter := expression.AttributeExists(expression.Name("ItemCount")) expr, err := expression.NewBuilder().WithFilter(filter).Build() if err != nil { @@ -142,7 +142,7 @@ func ScanClientItemCounts(dynamo *datastore.Dynamo) ([]datastore.ClientItemCount if err != nil { return nil, fmt.Errorf("error doing scan for item counts: %w", err) } - clientItemCounts := []datastore.ClientItemCounts{} + clientItemCounts := []datastore.DynamoItemCounts{} err = dynamodbattribute.UnmarshalListOfMaps(out.Items, &clientItemCounts) if err != nil { return nil, fmt.Errorf("error unmarshalling item counts: %w", err) diff --git a/datastore/datastoretest/mock_datastore.go b/datastore/datastoretest/mock_datastore.go index 60e8e854..70911fd5 100644 --- a/datastore/datastoretest/mock_datastore.go +++ b/datastore/datastoretest/mock_datastore.go @@ -46,13 +46,13 @@ func (m *MockDatastore) HasItem(clientID string, ID string) (bool, error) { } // GetClientItemCount mocks calls to GetClientItemCount -func (m *MockDatastore) GetClientItemCount(clientID string) (*datastore.ClientItemCounts, error) { +func (m *MockDatastore) GetClientItemCount(clientID string) (*datastore.DynamoItemCounts, error) { args := m.Called(clientID) - return &datastore.ClientItemCounts{ClientID: clientID, ID: clientID}, args.Error(1) + return &datastore.DynamoItemCounts{ClientID: clientID, ID: clientID}, args.Error(1) } // UpdateClientItemCount mocks calls to UpdateClientItemCount -func (m *MockDatastore) UpdateClientItemCount(counts *datastore.ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) error { +func (m *MockDatastore) UpdateClientItemCount(counts *datastore.DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error { args := m.Called(counts, newNormalItemCount, newHistoryItemCount) return args.Error(0) } diff --git a/datastore/instrumented_datastore.go b/datastore/instrumented_datastore.go index 6ce5efe7..964bb3b3 100644 --- a/datastore/instrumented_datastore.go +++ b/datastore/instrumented_datastore.go @@ -16,7 +16,7 @@ import ( // DatastoreWithPrometheus implements Datastore interface with all methods wrapped // with Prometheus metrics type DatastoreWithPrometheus struct { - base Datastore + base DynamoDatastore instanceName string } @@ -30,7 +30,7 @@ var datastoreDurationSummaryVec = promauto.NewSummaryVec( []string{"instance_name", "method", "result"}) // NewDatastoreWithPrometheus returns an instance of the Datastore decorated with prometheus summary metric -func NewDatastoreWithPrometheus(base Datastore, instanceName string) DatastoreWithPrometheus { +func NewDatastoreWithPrometheus(base DynamoDatastore, instanceName string) DatastoreWithPrometheus { return DatastoreWithPrometheus{ base: base, instanceName: instanceName, @@ -66,7 +66,7 @@ func (_d DatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) } // GetClientItemCount implements Datastore -func (_d DatastoreWithPrometheus) GetClientItemCount(clientID string) (counts *ClientItemCounts, err error) { +func (_d DatastoreWithPrometheus) GetClientItemCount(clientID string) (counts *DynamoItemCounts, err error) { _since := time.Now() defer func() { result := "ok" @@ -164,7 +164,7 @@ func (_d DatastoreWithPrometheus) IsSyncChainDisabled(clientID string) (b1 bool, } // UpdateClientItemCount implements Datastore -func (_d DatastoreWithPrometheus) UpdateClientItemCount(counts *ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) (err error) { +func (_d DatastoreWithPrometheus) UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) (err error) { _since := time.Now() defer func() { result := "ok" diff --git a/datastore/item_count.go b/datastore/item_count_dynamo.go similarity index 93% rename from datastore/item_count.go rename to datastore/item_count_dynamo.go index 1cda9903..daad3f99 100644 --- a/datastore/item_count.go +++ b/datastore/item_count_dynamo.go @@ -18,9 +18,9 @@ const ( CurrentCountVersion int = 2 ) -// ClientItemCounts is used to marshal and unmarshal ClientItemCounts items in +// DynamoItemCounts is used to marshal and unmarshal DynamoItemCounts items in // dynamoDB. -type ClientItemCounts struct { +type DynamoItemCounts struct { ClientID string ID string ItemCount int @@ -34,7 +34,7 @@ type ClientItemCounts struct { // ClientItemCountByClientID implements sort.Interface for []ClientItemCount // based on ClientID. -type ClientItemCountByClientID []ClientItemCounts +type ClientItemCountByClientID []DynamoItemCounts func (a ClientItemCountByClientID) Len() int { return len(a) } func (a ClientItemCountByClientID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } @@ -42,14 +42,14 @@ func (a ClientItemCountByClientID) Less(i, j int) bool { return a[i].ClientID < a[j].ClientID } -func (counts *ClientItemCounts) SumHistoryCounts() int { +func (counts *DynamoItemCounts) SumHistoryCounts() int { return counts.HistoryItemCountPeriod1 + counts.HistoryItemCountPeriod2 + counts.HistoryItemCountPeriod3 + counts.HistoryItemCountPeriod4 } -func (dynamo *Dynamo) initRealCountsAndUpdateHistoryCounts(counts *ClientItemCounts) error { +func (dynamo *Dynamo) initRealCountsAndUpdateHistoryCounts(counts *DynamoItemCounts) error { now := time.Now().Unix() if counts.Version < CurrentCountVersion { if counts.ItemCount > 0 { @@ -128,7 +128,7 @@ func (dynamo *Dynamo) initRealCountsAndUpdateHistoryCounts(counts *ClientItemCou // GetClientItemCount returns the count of non-deleted sync items stored for // a given client. -func (dynamo *Dynamo) GetClientItemCount(clientID string) (*ClientItemCounts, error) { +func (dynamo *Dynamo) GetClientItemCount(clientID string) (*DynamoItemCounts, error) { primaryKey := PrimaryKey{ClientID: clientID, ID: clientID} key, err := dynamodbattribute.MarshalMap(primaryKey) if err != nil { @@ -145,7 +145,7 @@ func (dynamo *Dynamo) GetClientItemCount(clientID string) (*ClientItemCounts, er return nil, fmt.Errorf("error getting an item-count item: %w", err) } - clientItemCounts := &ClientItemCounts{} + clientItemCounts := &DynamoItemCounts{} err = dynamodbattribute.UnmarshalMap(out.Item, clientItemCounts) if err != nil { return nil, fmt.Errorf("error unmarshalling item-count item: %w", err) @@ -165,7 +165,7 @@ func (dynamo *Dynamo) GetClientItemCount(clientID string) (*ClientItemCounts, er // UpdateClientItemCount updates the count of non-deleted sync items for a // given client stored in the dynamoDB. -func (dynamo *Dynamo) UpdateClientItemCount(counts *ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) error { +func (dynamo *Dynamo) UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error { counts.HistoryItemCountPeriod4 += newHistoryItemCount counts.ItemCount += newNormalItemCount diff --git a/datastore/item_count_test.go b/datastore/item_count_dynamo_test.go similarity index 93% rename from datastore/item_count_test.go rename to datastore/item_count_dynamo_test.go index ccb78ac2..513118c6 100644 --- a/datastore/item_count_test.go +++ b/datastore/item_count_dynamo_test.go @@ -33,12 +33,12 @@ func (suite *ItemCountTestSuite) TearDownTest() { func (suite *ItemCountTestSuite) TestGetClientItemCount() { // Insert two items for test. - items := []datastore.ClientItemCounts{ + items := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 5}, {ClientID: "client2", ID: "client2", ItemCount: 10}, } for _, item := range items { - existing := datastore.ClientItemCounts{ClientID: item.ClientID, ID: item.ID, Version: datastore.CurrentCountVersion} + existing := datastore.DynamoItemCounts{ClientID: item.ClientID, ID: item.ID, Version: datastore.CurrentCountVersion} suite.Require().NoError( suite.dynamo.UpdateClientItemCount(&existing, item.ItemCount, 0)) } @@ -56,12 +56,12 @@ func (suite *ItemCountTestSuite) TestGetClientItemCount() { } func (suite *ItemCountTestSuite) TestUpdateClientItemCount() { - items := []datastore.ClientItemCounts{ + items := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 1}, {ClientID: "client1", ID: "client1", ItemCount: 5}, {ClientID: "client2", ID: "client2", ItemCount: 10}, } - expectedItems := []datastore.ClientItemCounts{ + expectedItems := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 6}, {ClientID: "client2", ID: "client2", ItemCount: 10}, } diff --git a/datastore/item_count_sql.go b/datastore/item_count_sql.go new file mode 100644 index 00000000..95a5e059 --- /dev/null +++ b/datastore/item_count_sql.go @@ -0,0 +1,28 @@ +package datastore + +import ( + "fmt" + "strconv" + + "github.com/jmoiron/sqlx" +) + +type SQLItemCounts struct { + NormalItemCount int `db:"normal_item_count"` + HistoryItemCount int `db:"history_item_count"` +} + +func (sqlDB *SQLDB) GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, error) { + counts := SQLItemCounts{} + err := tx.Get(&counts, ` + SELECT + COUNT(*) FILTER (WHERE data_type != `+strconv.Itoa(HistoryTypeID)+`) normal_item_count, + COUNT(*) FILTER (WHERE data_type = `+strconv.Itoa(HistoryTypeID)+`) history_item_count + FROM entities + WHERE chain_id = $1 + `, chainID) + if err != nil { + return nil, fmt.Errorf("failed to get item counts: %w", err) + } + return &counts, nil +} diff --git a/datastore/sql.go b/datastore/sql.go new file mode 100644 index 00000000..877a7adc --- /dev/null +++ b/datastore/sql.go @@ -0,0 +1,47 @@ +package datastore + +import ( + "errors" + "fmt" + "os" + + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/jmoiron/sqlx" +) + +const sqlURLEnvKey = "SQL_DATABASE_URL" + +// SQLDB is a Datastore wrapper around a SQL-based database. +type SQLDB struct { + *sqlx.DB +} + +// NewSQLDB returns a SQLDB client to be used. +func NewSQLDB() (*SQLDB, error) { + sqlURL := os.Getenv(sqlURLEnvKey) + if len(sqlURL) == 0 { + return nil, fmt.Errorf("%s must be defined", sqlURLEnvKey) + } + migration, err := migrate.New( + "file://./migrations", + sqlURL, + ) + if err != nil { + return nil, fmt.Errorf("Failed to init migrations: %v", err) + } + if err = migration.Up(); err != nil { + if !errors.Is(err, migrate.ErrNoChange) { + return nil, fmt.Errorf("Failed to run migrations: %v", err) + } + } + + db, err := sqlx.Connect("postgres", sqlURL) + if err != nil { + return nil, fmt.Errorf("Failed to connect to SQL DB: %v", err) + } + + wrappedDB := SQLDB{db} + return &wrappedDB, nil +} diff --git a/datastore/sync_entity.go b/datastore/sync_entity.go index 3d032619..56078e38 100644 --- a/datastore/sync_entity.go +++ b/datastore/sync_entity.go @@ -3,31 +3,18 @@ package datastore import ( "fmt" "reflect" - "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/expression" "github.com/brave/go-sync/schema/protobuf/sync_pb" - "github.com/brave/go-sync/utils" "github.com/rs/zerolog/log" uuid "github.com/satori/go.uuid" "google.golang.org/protobuf/proto" ) const ( - maxBatchGetItemSize = 100 // Limited by AWS. - maxTransactDeleteItemSize = 10 // Limited by AWS. - clientTagItemPrefix = "Client#" - serverTagItemPrefix = "Server#" - conditionalCheckFailed = "ConditionalCheckFailed" - disabledChainID = "disabled_chain" - reasonDeleted = "deleted" HistoryTypeID int = 963985 HistoryDeleteDirectiveTypeID int = 150251 // Expiration time for history and history delete directive @@ -37,698 +24,29 @@ const ( // SyncEntity is used to marshal and unmarshal sync items in dynamoDB. type SyncEntity struct { - ClientID string + ClientID string + // ChainID is a synthetic key that is connected to the client id in the SQL db. + ChainID *int64 `dynamodbav:"-" db:"chain_id"` ID string - ParentID *string `dynamodbav:",omitempty"` + IDBytes []byte `dynamodbav:"-" db:"id"` + ParentID *string `dynamodbav:",omitempty" db:"parent_id"` Version *int64 Mtime *int64 Ctime *int64 Name *string `dynamodbav:",omitempty"` - NonUniqueName *string `dynamodbav:",omitempty"` - ServerDefinedUniqueTag *string `dynamodbav:",omitempty"` + NonUniqueName *string `dynamodbav:",omitempty" db:"non_unique_name"` + ServerDefinedUniqueTag *string `dynamodbav:",omitempty" db:"server_defined_unique_tags"` Deleted *bool - OriginatorCacheGUID *string `dynamodbav:",omitempty"` - OriginatorClientItemID *string `dynamodbav:",omitempty"` + OriginatorCacheGUID *string `dynamodbav:",omitempty" db:"originator_cache_guid"` + OriginatorClientItemID *string `dynamodbav:",omitempty" db:"originator_client_item_id"` Specifics []byte - DataType *int + DataType *int `db:"data_type"` Folder *bool - ClientDefinedUniqueTag *string `dynamodbav:",omitempty"` - UniquePosition []byte `dynamodbav:",omitempty"` + ClientDefinedUniqueTag *string `dynamodbav:",omitempty" db:"client_defined_unique_tag"` + UniquePosition []byte `dynamodbav:",omitempty" db:"server_defined_unique_tag"` DataTypeMtime *string ExpirationTime *int64 -} - -// SyncEntityByClientIDID implements sort.Interface for []SyncEntity based on -// the string concatenation of ClientID and ID fields. -type SyncEntityByClientIDID []SyncEntity - -func (a SyncEntityByClientIDID) Len() int { return len(a) } -func (a SyncEntityByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a SyncEntityByClientIDID) Less(i, j int) bool { - return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID -} - -// SyncEntityByMtime implements sort.Interface for []SyncEntity based on Mtime. -type SyncEntityByMtime []SyncEntity - -func (a SyncEntityByMtime) Len() int { return len(a) } -func (a SyncEntityByMtime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a SyncEntityByMtime) Less(i, j int) bool { - return *a[i].Mtime < *a[j].Mtime -} - -// DisabledMarkerItem is used to mark sync chain as deleted in Dynamodb -type DisabledMarkerItem struct { - ClientID string - ID string - Reason string - Mtime *int64 - Ctime *int64 -} - -// DisabledMarkerItemQuery is used to query for disabled marker item in -// DynamoDB -type DisabledMarkerItemQuery struct { - ClientID string - ID string -} - -// ServerClientUniqueTagItem is used to marshal and unmarshal tag items in -// dynamoDB. -type ServerClientUniqueTagItem struct { - ClientID string // Hash key - ID string // Range key - Mtime *int64 - Ctime *int64 -} - -// ServerClientUniqueTagItemQuery is used to query for unique tag items in -// dynamoDB. -type ServerClientUniqueTagItemQuery struct { - ClientID string // Hash key - ID string // Range key -} - -// TagItemByClientIDID implements sort.Interface for []ServerClientUniqueTagItem -// based on the string concatenation of ClientID and ID fields. -type TagItemByClientIDID []ServerClientUniqueTagItem - -func (a TagItemByClientIDID) Len() int { return len(a) } -func (a TagItemByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a TagItemByClientIDID) Less(i, j int) bool { - return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID -} - -// getTagPrefix is a helper method to give the proper prefix for unique tag -func getTagPrefix(isServer bool) string { - if isServer { - return serverTagItemPrefix - } - return clientTagItemPrefix -} - -// NewServerClientUniqueTagItem creates a tag item which is used to ensure the -// uniqueness of server-defined or client-defined unique tags for a client. -func NewServerClientUniqueTagItem(clientID string, tag string, isServer bool) *ServerClientUniqueTagItem { - prefix := getTagPrefix(isServer) - now := aws.Int64(utils.UnixMilli(time.Now())) - - return &ServerClientUniqueTagItem{ - ClientID: clientID, - ID: prefix + tag, - Mtime: now, - Ctime: now, - } -} - -// NewServerClientUniqueTagItemQuery creates a tag item query which is used to -// determine whether a sync entity has a unique tag item or not -func NewServerClientUniqueTagItemQuery(clientID string, tag string, isServer bool) *ServerClientUniqueTagItemQuery { - prefix := getTagPrefix(isServer) - - return &ServerClientUniqueTagItemQuery{ - ClientID: clientID, - ID: prefix + tag, - } -} - -// InsertSyncEntity inserts a new sync entity into dynamoDB. -// If ClientDefinedUniqueTag is not null, we will use a write transaction to -// write a sync item along with a tag item to ensure the uniqueness of the -// client tag. Otherwise, only a sync item is written into DB without using -// transactions. -func (dynamo *Dynamo) InsertSyncEntity(entity *SyncEntity) (bool, error) { - // Create a condition for inserting new items only. - cond := expression.AttributeNotExists(expression.Name(pk)) - expr, err := expression.NewBuilder().WithCondition(cond).Build() - if err != nil { - return false, fmt.Errorf("error building expression to insert sync entity: %w", err) - } - - // Write tag item for all data types, except for - // the history type, which does not use tag items. - if entity.ClientDefinedUniqueTag != nil && *entity.DataType != HistoryTypeID { - items := []*dynamodb.TransactWriteItem{} - // Additional item for ensuring tag's uniqueness for a specific client. - item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ClientDefinedUniqueTag, false) - av, err := dynamodbattribute.MarshalMap(*item) - if err != nil { - return false, fmt.Errorf("error marshalling unique tag item to insert sync entity: %w", err) - } - tagItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - - // Normal sync item - av, err = dynamodbattribute.MarshalMap(*entity) - if err != nil { - return false, fmt.Errorf("error marshlling sync item to insert sync entity: %w", err) - } - syncItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - items = append(items, tagItem) - items = append(items, syncItem) - - _, err = dynamo.TransactWriteItems( - &dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - // Return conflict if insert condition failed. - if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { - for _, reason := range canceledException.CancellationReasons { - if reason.Code != nil && *reason.Code == conditionalCheckFailed { - return true, fmt.Errorf("error inserting sync item with client tag: %w", err) - } - } - } - return false, fmt.Errorf("error writing tag item and sync item in a transaction to insert sync entity: %w", err) - } - - return false, nil - } - - // Normal sync item - av, err := dynamodbattribute.MarshalMap(*entity) - if err != nil { - return false, fmt.Errorf("error marshalling sync item to insert sync entity: %w", err) - } - input := &dynamodb.PutItemInput{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - } - _, err = dynamo.PutItem(input) - if err != nil { - return false, fmt.Errorf("error calling PutItem to insert sync item: %w", err) - } - return false, nil -} - -// HasServerDefinedUniqueTag check the tag item to see if there is already a -// tag item exists with the tag value for a specific client. -func (dynamo *Dynamo) HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) { - tagItem := NewServerClientUniqueTagItemQuery(clientID, tag, true) - key, err := dynamodbattribute.MarshalMap(tagItem) - if err != nil { - return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) - } - - input := &dynamodb.GetItemInput{ - Key: key, - ProjectionExpression: aws.String(projPk), - TableName: aws.String(Table), - } - - out, err := dynamo.GetItem(input) - if err != nil { - return false, fmt.Errorf("error calling GetItem to check if server tag existed: %w", err) - } - - return out.Item != nil, nil -} - -func (dynamo *Dynamo) HasItem(clientID string, ID string) (bool, error) { - primaryKey := PrimaryKey{ClientID: clientID, ID: ID} - key, err := dynamodbattribute.MarshalMap(primaryKey) - - if err != nil { - return false, fmt.Errorf("error marshalling key to check if item existed: %w", err) - } - - input := &dynamodb.GetItemInput{ - Key: key, - ProjectionExpression: aws.String(projPk), - TableName: aws.String(Table), - } - - out, err := dynamo.GetItem(input) - if err != nil { - return false, fmt.Errorf("error calling GetItem to check if item existed: %w", err) - } - - return out.Item != nil, nil -} - -// InsertSyncEntitiesWithServerTags is used to insert sync entities with -// server-defined unique tags. To ensure the uniqueness, for each sync entity, -// we will write a tag item and a sync item. Items for all the entities in the -// array would be written into DB in one transaction. -func (dynamo *Dynamo) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error { - items := []*dynamodb.TransactWriteItem{} - for _, entity := range entities { - // Create a condition for inserting new items only. - cond := expression.AttributeNotExists(expression.Name(pk)) - expr, err := expression.NewBuilder().WithCondition(cond).Build() - if err != nil { - return fmt.Errorf("error building expression to insert sync entity with server tag: %w", err) - } - - // Additional item for ensuring tag's uniqueness for a specific client. - item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ServerDefinedUniqueTag, true) - av, err := dynamodbattribute.MarshalMap(*item) - if err != nil { - return fmt.Errorf("error marshalling tag item to insert sync entity with server tag: %w", err) - } - tagItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - - // Normal sync item - av, err = dynamodbattribute.MarshalMap(*entity) - if err != nil { - return fmt.Errorf("error marshalling sync item to insert sync entity with server tag: %w", err) - } - syncItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - - items = append(items, tagItem) - items = append(items, syncItem) - } - - _, err := dynamo.TransactWriteItems( - &dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - return fmt.Errorf("error writing sync entities with server tags in a transaction: %w", err) - } - return nil -} - -// DisableSyncChain marks a chain as disabled so no further updates or commits can happen -func (dynamo *Dynamo) DisableSyncChain(clientID string) error { - now := aws.Int64(utils.UnixMilli(time.Now())) - disabledMarker := DisabledMarkerItem{ - ClientID: clientID, - ID: disabledChainID, - Reason: reasonDeleted, - Mtime: now, - Ctime: now, - } - - av, err := dynamodbattribute.MarshalMap(disabledMarker) - if err != nil { - return fmt.Errorf("error marshalling disabled marker: %w", err) - } - - markerInput := &dynamodb.PutItemInput{ - Item: av, - TableName: aws.String(Table), - } - - _, err = dynamo.PutItem(markerInput) - if err != nil { - return fmt.Errorf("error calling PutItem to insert sync item: %w", err) - } - - return nil -} - -// ClearServerData deletes all items for a given clientID -func (dynamo *Dynamo) ClearServerData(clientID string) ([]SyncEntity, error) { - syncEntities := []SyncEntity{} - pkb := expression.Key(pk) - pkv := expression.Value(clientID) - keyCond := expression.KeyEqual(pkb, pkv) - exprs := expression.NewBuilder().WithKeyCondition(keyCond) - expr, err := exprs.Build() - if err != nil { - return syncEntities, fmt.Errorf("error building expression to get updates: %w", err) - } - - input := &dynamodb.QueryInput{ - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - KeyConditionExpression: expr.KeyCondition(), - FilterExpression: expr.Filter(), - TableName: aws.String(Table), - } - - out, err := dynamo.Query(input) - if err != nil { - return syncEntities, fmt.Errorf("error doing query to get updates: %w", err) - } - count := *out.Count - - err = dynamodbattribute.UnmarshalListOfMaps(out.Items, &syncEntities) - if err != nil { - return syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) - } - - var i, j int64 - for i = 0; i < count; i += maxTransactDeleteItemSize { - j = i + maxTransactDeleteItemSize - if j > count { - j = count - } - - items := []*dynamodb.TransactWriteItem{} - for _, item := range syncEntities[i:j] { - if item.ID == disabledChainID { - continue - } - - // Fail delete if race condition detected (modified time has changed). - if item.Version != nil { - cond := expression.Name("Mtime").Equal(expression.Value(*item.Mtime)) - expr, err := expression.NewBuilder().WithCondition(cond).Build() - if err != nil { - return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) - } - - writeItem := dynamodb.TransactWriteItem{ - Delete: &dynamodb.Delete{ - ConditionExpression: expr.Condition(), - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - TableName: aws.String(Table), - Key: map[string]*dynamodb.AttributeValue{ - pk: { - S: aws.String(item.ClientID), - }, - sk: { - S: aws.String(item.ID), - }, - }, - }, - } - - items = append(items, &writeItem) - } else { - // If row doesn't hold Mtime, delete as usual. - writeItem := dynamodb.TransactWriteItem{ - Delete: &dynamodb.Delete{ - TableName: aws.String(Table), - Key: map[string]*dynamodb.AttributeValue{ - pk: { - S: aws.String(item.ClientID), - }, - sk: { - S: aws.String(item.ID), - }, - }, - }, - } - - items = append(items, &writeItem) - } - - } - - _, err = dynamo.TransactWriteItems(&dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) - } - } - - return syncEntities, nil -} - -// IsSyncChainDisabled checks whether a given sync chain has been deleted -func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { - key, err := dynamodbattribute.MarshalMap(DisabledMarkerItemQuery{ - ClientID: clientID, - ID: disabledChainID, - }) - if err != nil { - return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) - } - - input := &dynamodb.GetItemInput{ - Key: key, - TableName: aws.String(Table), - } - - out, err := dynamo.GetItem(input) - if err != nil { - return false, fmt.Errorf("error calling GetItem to check if sync chain disabled: %w", err) - } - - return len(out.Item) > 0, nil -} - -// UpdateSyncEntity updates a sync item in dynamoDB. -func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, bool, error) { - primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: entity.ID} - key, err := dynamodbattribute.MarshalMap(primaryKey) - if err != nil { - return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) - } - - // condition to ensure the request is update only... - cond := expression.AttributeExists(expression.Name(pk)) - // ...and the version matches, if applicable - if *entity.DataType != HistoryTypeID { - cond = expression.And(cond, expression.Name("Version").Equal(expression.Value(oldVersion))) - } - - update := expression.Set(expression.Name("Version"), expression.Value(entity.Version)) - update = update.Set(expression.Name("Mtime"), expression.Value(entity.Mtime)) - update = update.Set(expression.Name("Specifics"), expression.Value(entity.Specifics)) - update = update.Set(expression.Name("DataTypeMtime"), expression.Value(entity.DataTypeMtime)) - - // Update optional fields only if the value is not null. - if entity.UniquePosition != nil { - update = update.Set(expression.Name("UniquePosition"), expression.Value(entity.UniquePosition)) - } - if entity.ParentID != nil { - update = update.Set(expression.Name("ParentID"), expression.Value(entity.ParentID)) - } - if entity.Name != nil { - update = update.Set(expression.Name("Name"), expression.Value(entity.Name)) - } - if entity.NonUniqueName != nil { - update = update.Set(expression.Name("NonUniqueName"), expression.Value(entity.NonUniqueName)) - } - if entity.Deleted != nil { - update = update.Set(expression.Name("Deleted"), expression.Value(entity.Deleted)) - } - if entity.Folder != nil { - update = update.Set(expression.Name("Folder"), expression.Value(entity.Folder)) - } - - expr, err := expression.NewBuilder().WithCondition(cond).WithUpdate(update).Build() - if err != nil { - return false, false, fmt.Errorf("error building expression to update sync entity: %w", err) - } - - // Soft-delete a sync item with a client tag, use a transaction to delete its - // tag item too. - if entity.Deleted != nil && entity.ClientDefinedUniqueTag != nil && *entity.Deleted && *entity.DataType != HistoryTypeID { - pk := PrimaryKey{ - ClientID: entity.ClientID, ID: clientTagItemPrefix + *entity.ClientDefinedUniqueTag} - tagItemKey, err := dynamodbattribute.MarshalMap(pk) - if err != nil { - return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) - } - - items := []*dynamodb.TransactWriteItem{} - updateSyncItem := &dynamodb.TransactWriteItem{ - Update: &dynamodb.Update{ - Key: key, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - UpdateExpression: expr.Update(), - ReturnValuesOnConditionCheckFailure: aws.String(dynamodb.ReturnValueAllOld), - TableName: aws.String(Table), - }, - } - deleteTagItem := &dynamodb.TransactWriteItem{ - Delete: &dynamodb.Delete{ - Key: tagItemKey, - TableName: aws.String(Table), - }, - } - items = append(items, updateSyncItem) - items = append(items, deleteTagItem) - - _, err = dynamo.TransactWriteItems( - &dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - // Return conflict if the update condition fails. - if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { - for _, reason := range canceledException.CancellationReasons { - if reason.Code != nil && *reason.Code == conditionalCheckFailed { - return true, false, nil - } - } - } - - return false, false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) - } - - // Successfully soft-delete the sync item and delete the tag item. - return false, true, nil - } - - // Not deleting a sync item with a client tag, do a normal update on sync - // item. - input := &dynamodb.UpdateItemInput{ - Key: key, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - UpdateExpression: expr.Update(), - ReturnValues: aws.String(dynamodb.ReturnValueAllOld), - TableName: aws.String(Table), - } - - out, err := dynamo.UpdateItem(input) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - // Return conflict if the write condition fails. - if aerr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return true, false, nil - } - } - return false, false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) - } - - // Unmarshal out.Attributes - oldEntity := &SyncEntity{} - err = dynamodbattribute.UnmarshalMap(out.Attributes, oldEntity) - if err != nil { - return false, false, fmt.Errorf("error unmarshalling old sync entity: %w", err) - } - var deleted bool - if entity.Deleted == nil { // No updates on Deleted this time. - deleted = false - } else if oldEntity.Deleted == nil { // Consider it as Deleted = false. - deleted = *entity.Deleted - } else { - deleted = !*oldEntity.Deleted && *entity.Deleted - } - return false, deleted, nil -} - -// GetUpdatesForType returns sync entities of a data type where it's mtime is -// later than the client token. -// To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a -// list of (ClientID, ID) primary keys with the given condition, then read the -// actual sync item using the list of primary keys. -func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []SyncEntity, error) { - syncEntities := []SyncEntity{} - - // Get (ClientID, ID) pairs which are updates after mtime for a data type, - // sorted by dataType#mTime. e.g. sorted by mtime since dataType is the same. - dataTypeMtimeLowerBound := strconv.Itoa(dataType) + "#" + strconv.FormatInt(clientToken+1, 10) - dataTypeMtimeUpperBound := strconv.Itoa(dataType+1) + "#0" - pkCond := expression.Key(clientIDDataTypeMtimeIdxPk).Equal(expression.Value(clientID)) - skCond := expression.KeyBetween( - expression.Key(clientIDDataTypeMtimeIdxSk), - expression.Value(dataTypeMtimeLowerBound), - expression.Value(dataTypeMtimeUpperBound)) - keyCond := expression.KeyAnd(pkCond, skCond) - exprs := expression.NewBuilder().WithKeyCondition(keyCond) - - if !fetchFolders { // Filter folder entities out if fetchFolder is false. - exprs = exprs.WithFilter( - expression.Equal(expression.Name("Folder"), expression.Value(false))) - } - - expr, err := exprs.Build() - if err != nil { - return false, syncEntities, fmt.Errorf("error building expression to get updates: %w", err) - } - - input := &dynamodb.QueryInput{ - IndexName: aws.String(clientIDDataTypeMtimeIdx), - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - KeyConditionExpression: expr.KeyCondition(), - FilterExpression: expr.Filter(), - ProjectionExpression: aws.String(projPk), - TableName: aws.String(Table), - Limit: aws.Int64(maxSize), - } - - out, err := dynamo.Query(input) - if err != nil { - return false, syncEntities, fmt.Errorf("error doing query to get updates: %w", err) - } - - hasChangesRemaining := false - if out.LastEvaluatedKey != nil && len(out.LastEvaluatedKey) > 0 { - hasChangesRemaining = true - } - - count := *(out.Count) - if count == 0 { // No updates - return hasChangesRemaining, syncEntities, nil - } - - // Use return (ClientID, ID) primary keys to get the actual items. - var outAv []map[string]*dynamodb.AttributeValue - var i, j int64 - for i = 0; i < count; i += maxBatchGetItemSize { - j = i + maxBatchGetItemSize - if j > count { - j = count - } - - batchInput := &dynamodb.BatchGetItemInput{ - RequestItems: map[string]*dynamodb.KeysAndAttributes{ - Table: { - Keys: out.Items[i:j], - }, - }, - } - - err := dynamo.BatchGetItemPages(batchInput, - func(batchOut *dynamodb.BatchGetItemOutput, last bool) bool { - outAv = append(outAv, batchOut.Responses[Table]...) - return last - }) - if err != nil { - return false, syncEntities, fmt.Errorf("error getting update items in a batch: %w", err) - } - } - - err = dynamodbattribute.UnmarshalListOfMaps(outAv, &syncEntities) - if err != nil { - return false, syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) - } - - // filter out any expired items, i.e. history sync entities over 90 days old - nowUnix := time.Now().Unix() - var filteredSyncEntities []SyncEntity - for _, syncEntity := range syncEntities { - if syncEntity.ExpirationTime != nil && *syncEntity.ExpirationTime > 0 { - if *syncEntity.ExpirationTime < nowUnix { - continue - } - } - filteredSyncEntities = append(filteredSyncEntities, syncEntity) - } - - sort.Sort(SyncEntityByMtime(filteredSyncEntities)) - return hasChangesRemaining, filteredSyncEntities, nil + OldVersion *int64 `dynamodbav:"-" db:"old_version"` } func validatePBEntity(entity *sync_pb.SyncEntity) error { @@ -752,7 +70,7 @@ func validatePBEntity(entity *sync_pb.SyncEntity) error { } // CreateDBSyncEntity converts a protobuf sync entity into a DB sync item. -func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID string) (*SyncEntity, error) { +func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID string, chainID *int64) (*SyncEntity, error) { err := validatePBEntity(entity) if err != nil { log.Error().Err(err).Msg("Invalid sync_pb.SyncEntity received") @@ -829,9 +147,19 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID } } + var idBytes []byte + idUUID, err := uuid.FromString(id) + if err != nil { + idBytes = []byte(id) + } else { + idBytes = idUUID.Bytes() + } + return &SyncEntity{ ClientID: clientID, + ChainID: chainID, ID: id, + IDBytes: idBytes, ParentID: entity.ParentIdString, Version: entity.Version, Ctime: cTime, diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go new file mode 100644 index 00000000..3dcdce70 --- /dev/null +++ b/datastore/sync_entity_dynamo.go @@ -0,0 +1,698 @@ +package datastore + +import ( + "fmt" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "github.com/aws/aws-sdk-go/service/dynamodb/expression" + "github.com/brave/go-sync/utils" +) + +const ( + maxBatchGetItemSize = 100 // Limited by AWS. + maxTransactDeleteItemSize = 10 // Limited by AWS. + clientTagItemPrefix = "Client#" + serverTagItemPrefix = "Server#" + conditionalCheckFailed = "ConditionalCheckFailed" + disabledChainID = "disabled_chain" + reasonDeleted = "deleted" +) + +// SyncEntityByClientIDID implements sort.Interface for []SyncEntity based on +// the string concatenation of ClientID and ID fields. +type SyncEntityByClientIDID []SyncEntity + +func (a SyncEntityByClientIDID) Len() int { return len(a) } +func (a SyncEntityByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SyncEntityByClientIDID) Less(i, j int) bool { + return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID +} + +// SyncEntityByMtime implements sort.Interface for []SyncEntity based on Mtime. +type SyncEntityByMtime []SyncEntity + +func (a SyncEntityByMtime) Len() int { return len(a) } +func (a SyncEntityByMtime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SyncEntityByMtime) Less(i, j int) bool { + return *a[i].Mtime < *a[j].Mtime +} + +// DisabledMarkerItem is used to mark sync chain as deleted in Dynamodb +type DisabledMarkerItem struct { + ClientID string + ID string + Reason string + Mtime *int64 + Ctime *int64 +} + +// DisabledMarkerItemQuery is used to query for disabled marker item in +// DynamoDB +type DisabledMarkerItemQuery struct { + ClientID string + ID string +} + +// ServerClientUniqueTagItem is used to marshal and unmarshal tag items in +// dynamoDB. +type ServerClientUniqueTagItem struct { + ClientID string // Hash key + ID string // Range key + Mtime *int64 + Ctime *int64 +} + +// ServerClientUniqueTagItemQuery is used to query for unique tag items in +// dynamoDB. +type ServerClientUniqueTagItemQuery struct { + ClientID string // Hash key + ID string // Range key +} + +// TagItemByClientIDID implements sort.Interface for []ServerClientUniqueTagItem +// based on the string concatenation of ClientID and ID fields. +type TagItemByClientIDID []ServerClientUniqueTagItem + +func (a TagItemByClientIDID) Len() int { return len(a) } +func (a TagItemByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a TagItemByClientIDID) Less(i, j int) bool { + return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID +} + +// getTagPrefix is a helper method to give the proper prefix for unique tag +func getTagPrefix(isServer bool) string { + if isServer { + return serverTagItemPrefix + } + return clientTagItemPrefix +} + +// NewServerClientUniqueTagItem creates a tag item which is used to ensure the +// uniqueness of server-defined or client-defined unique tags for a client. +func NewServerClientUniqueTagItem(clientID string, tag string, isServer bool) *ServerClientUniqueTagItem { + prefix := getTagPrefix(isServer) + now := aws.Int64(utils.UnixMilli(time.Now())) + + return &ServerClientUniqueTagItem{ + ClientID: clientID, + ID: prefix + tag, + Mtime: now, + Ctime: now, + } +} + +// NewServerClientUniqueTagItemQuery creates a tag item query which is used to +// determine whether a sync entity has a unique tag item or not +func NewServerClientUniqueTagItemQuery(clientID string, tag string, isServer bool) *ServerClientUniqueTagItemQuery { + prefix := getTagPrefix(isServer) + + return &ServerClientUniqueTagItemQuery{ + ClientID: clientID, + ID: prefix + tag, + } +} + +// InsertSyncEntity inserts a new sync entity into dynamoDB. +// If ClientDefinedUniqueTag is not null, we will use a write transaction to +// write a sync item along with a tag item to ensure the uniqueness of the +// client tag. Otherwise, only a sync item is written into DB without using +// transactions. +func (dynamo *Dynamo) InsertSyncEntity(entity *SyncEntity) (bool, error) { + // Create a condition for inserting new items only. + cond := expression.AttributeNotExists(expression.Name(pk)) + expr, err := expression.NewBuilder().WithCondition(cond).Build() + if err != nil { + return false, fmt.Errorf("error building expression to insert sync entity: %w", err) + } + + // Write tag item for all data types, except for + // the history type, which does not use tag items. + if entity.ClientDefinedUniqueTag != nil && *entity.DataType != HistoryTypeID { + items := []*dynamodb.TransactWriteItem{} + // Additional item for ensuring tag's uniqueness for a specific client. + item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ClientDefinedUniqueTag, false) + av, err := dynamodbattribute.MarshalMap(*item) + if err != nil { + return false, fmt.Errorf("error marshalling unique tag item to insert sync entity: %w", err) + } + tagItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + + // Normal sync item + av, err = dynamodbattribute.MarshalMap(*entity) + if err != nil { + return false, fmt.Errorf("error marshlling sync item to insert sync entity: %w", err) + } + syncItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + items = append(items, tagItem) + items = append(items, syncItem) + + _, err = dynamo.TransactWriteItems( + &dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + // Return conflict if insert condition failed. + if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { + for _, reason := range canceledException.CancellationReasons { + if reason.Code != nil && *reason.Code == conditionalCheckFailed { + return true, fmt.Errorf("error inserting sync item with client tag: %w", err) + } + } + } + return false, fmt.Errorf("error writing tag item and sync item in a transaction to insert sync entity: %w", err) + } + + return false, nil + } + + // Normal sync item + av, err := dynamodbattribute.MarshalMap(*entity) + if err != nil { + return false, fmt.Errorf("error marshalling sync item to insert sync entity: %w", err) + } + input := &dynamodb.PutItemInput{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + } + _, err = dynamo.PutItem(input) + if err != nil { + return false, fmt.Errorf("error calling PutItem to insert sync item: %w", err) + } + return false, nil +} + +// HasServerDefinedUniqueTag check the tag item to see if there is already a +// tag item exists with the tag value for a specific client. +func (dynamo *Dynamo) HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) { + tagItem := NewServerClientUniqueTagItemQuery(clientID, tag, true) + key, err := dynamodbattribute.MarshalMap(tagItem) + if err != nil { + return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) + } + + input := &dynamodb.GetItemInput{ + Key: key, + ProjectionExpression: aws.String(projPk), + TableName: aws.String(Table), + } + + out, err := dynamo.GetItem(input) + if err != nil { + return false, fmt.Errorf("error calling GetItem to check if server tag existed: %w", err) + } + + return out.Item != nil, nil +} + +func (dynamo *Dynamo) HasItem(clientID string, ID string) (bool, error) { + primaryKey := PrimaryKey{ClientID: clientID, ID: ID} + key, err := dynamodbattribute.MarshalMap(primaryKey) + + if err != nil { + return false, fmt.Errorf("error marshalling key to check if item existed: %w", err) + } + + input := &dynamodb.GetItemInput{ + Key: key, + ProjectionExpression: aws.String(projPk), + TableName: aws.String(Table), + } + + out, err := dynamo.GetItem(input) + if err != nil { + return false, fmt.Errorf("error calling GetItem to check if item existed: %w", err) + } + + return out.Item != nil, nil +} + +// InsertSyncEntitiesWithServerTags is used to insert sync entities with +// server-defined unique tags. To ensure the uniqueness, for each sync entity, +// we will write a tag item and a sync item. Items for all the entities in the +// array would be written into DB in one transaction. +func (dynamo *Dynamo) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error { + items := []*dynamodb.TransactWriteItem{} + for _, entity := range entities { + // Create a condition for inserting new items only. + cond := expression.AttributeNotExists(expression.Name(pk)) + expr, err := expression.NewBuilder().WithCondition(cond).Build() + if err != nil { + return fmt.Errorf("error building expression to insert sync entity with server tag: %w", err) + } + + // Additional item for ensuring tag's uniqueness for a specific client. + item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ServerDefinedUniqueTag, true) + av, err := dynamodbattribute.MarshalMap(*item) + if err != nil { + return fmt.Errorf("error marshalling tag item to insert sync entity with server tag: %w", err) + } + tagItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + + // Normal sync item + av, err = dynamodbattribute.MarshalMap(*entity) + if err != nil { + return fmt.Errorf("error marshalling sync item to insert sync entity with server tag: %w", err) + } + syncItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + + items = append(items, tagItem) + items = append(items, syncItem) + } + + _, err := dynamo.TransactWriteItems( + &dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + return fmt.Errorf("error writing sync entities with server tags in a transaction: %w", err) + } + return nil +} + +// DisableSyncChain marks a chain as disabled so no further updates or commits can happen +func (dynamo *Dynamo) DisableSyncChain(clientID string) error { + now := aws.Int64(utils.UnixMilli(time.Now())) + disabledMarker := DisabledMarkerItem{ + ClientID: clientID, + ID: disabledChainID, + Reason: reasonDeleted, + Mtime: now, + Ctime: now, + } + + av, err := dynamodbattribute.MarshalMap(disabledMarker) + if err != nil { + return fmt.Errorf("error marshalling disabled marker: %w", err) + } + + markerInput := &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String(Table), + } + + _, err = dynamo.PutItem(markerInput) + if err != nil { + return fmt.Errorf("error calling PutItem to insert sync item: %w", err) + } + + return nil +} + +// ClearServerData deletes all items for a given clientID +func (dynamo *Dynamo) ClearServerData(clientID string) ([]SyncEntity, error) { + syncEntities := []SyncEntity{} + pkb := expression.Key(pk) + pkv := expression.Value(clientID) + keyCond := expression.KeyEqual(pkb, pkv) + exprs := expression.NewBuilder().WithKeyCondition(keyCond) + expr, err := exprs.Build() + if err != nil { + return syncEntities, fmt.Errorf("error building expression to get updates: %w", err) + } + + input := &dynamodb.QueryInput{ + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + KeyConditionExpression: expr.KeyCondition(), + FilterExpression: expr.Filter(), + TableName: aws.String(Table), + } + + out, err := dynamo.Query(input) + if err != nil { + return syncEntities, fmt.Errorf("error doing query to get updates: %w", err) + } + count := *out.Count + + err = dynamodbattribute.UnmarshalListOfMaps(out.Items, &syncEntities) + if err != nil { + return syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) + } + + var i, j int64 + for i = 0; i < count; i += maxTransactDeleteItemSize { + j = i + maxTransactDeleteItemSize + if j > count { + j = count + } + + items := []*dynamodb.TransactWriteItem{} + for _, item := range syncEntities[i:j] { + if item.ID == disabledChainID { + continue + } + + // Fail delete if race condition detected (modified time has changed). + if item.Version != nil { + cond := expression.Name("Mtime").Equal(expression.Value(*item.Mtime)) + expr, err := expression.NewBuilder().WithCondition(cond).Build() + if err != nil { + return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) + } + + writeItem := dynamodb.TransactWriteItem{ + Delete: &dynamodb.Delete{ + ConditionExpression: expr.Condition(), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + TableName: aws.String(Table), + Key: map[string]*dynamodb.AttributeValue{ + pk: { + S: aws.String(item.ClientID), + }, + sk: { + S: aws.String(item.ID), + }, + }, + }, + } + + items = append(items, &writeItem) + } else { + // If row doesn't hold Mtime, delete as usual. + writeItem := dynamodb.TransactWriteItem{ + Delete: &dynamodb.Delete{ + TableName: aws.String(Table), + Key: map[string]*dynamodb.AttributeValue{ + pk: { + S: aws.String(item.ClientID), + }, + sk: { + S: aws.String(item.ID), + }, + }, + }, + } + + items = append(items, &writeItem) + } + + } + + _, err = dynamo.TransactWriteItems(&dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) + } + } + + return syncEntities, nil +} + +// IsSyncChainDisabled checks whether a given sync chain has been deleted +func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { + key, err := dynamodbattribute.MarshalMap(DisabledMarkerItemQuery{ + ClientID: clientID, + ID: disabledChainID, + }) + if err != nil { + return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) + } + + input := &dynamodb.GetItemInput{ + Key: key, + TableName: aws.String(Table), + } + + out, err := dynamo.GetItem(input) + if err != nil { + return false, fmt.Errorf("error calling GetItem to check if sync chain disabled: %w", err) + } + + return len(out.Item) > 0, nil +} + +// UpdateSyncEntity updates a sync item in dynamoDB. +func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, bool, error) { + primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: entity.ID} + key, err := dynamodbattribute.MarshalMap(primaryKey) + if err != nil { + return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + } + + // condition to ensure the request is update only... + cond := expression.AttributeExists(expression.Name(pk)) + // ...and the version matches, if applicable + if *entity.DataType != HistoryTypeID { + cond = expression.And(cond, expression.Name("Version").Equal(expression.Value(oldVersion))) + } + + update := expression.Set(expression.Name("Version"), expression.Value(entity.Version)) + update = update.Set(expression.Name("Mtime"), expression.Value(entity.Mtime)) + update = update.Set(expression.Name("Specifics"), expression.Value(entity.Specifics)) + update = update.Set(expression.Name("DataTypeMtime"), expression.Value(entity.DataTypeMtime)) + + // Update optional fields only if the value is not null. + if entity.UniquePosition != nil { + update = update.Set(expression.Name("UniquePosition"), expression.Value(entity.UniquePosition)) + } + if entity.ParentID != nil { + update = update.Set(expression.Name("ParentID"), expression.Value(entity.ParentID)) + } + if entity.Name != nil { + update = update.Set(expression.Name("Name"), expression.Value(entity.Name)) + } + if entity.NonUniqueName != nil { + update = update.Set(expression.Name("NonUniqueName"), expression.Value(entity.NonUniqueName)) + } + if entity.Deleted != nil { + update = update.Set(expression.Name("Deleted"), expression.Value(entity.Deleted)) + } + if entity.Folder != nil { + update = update.Set(expression.Name("Folder"), expression.Value(entity.Folder)) + } + + expr, err := expression.NewBuilder().WithCondition(cond).WithUpdate(update).Build() + if err != nil { + return false, false, fmt.Errorf("error building expression to update sync entity: %w", err) + } + + // Soft-delete a sync item with a client tag, use a transaction to delete its + // tag item too. + if entity.Deleted != nil && entity.ClientDefinedUniqueTag != nil && *entity.Deleted && *entity.DataType != HistoryTypeID { + pk := PrimaryKey{ + ClientID: entity.ClientID, ID: clientTagItemPrefix + *entity.ClientDefinedUniqueTag} + tagItemKey, err := dynamodbattribute.MarshalMap(pk) + if err != nil { + return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + } + + items := []*dynamodb.TransactWriteItem{} + updateSyncItem := &dynamodb.TransactWriteItem{ + Update: &dynamodb.Update{ + Key: key, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + UpdateExpression: expr.Update(), + ReturnValuesOnConditionCheckFailure: aws.String(dynamodb.ReturnValueAllOld), + TableName: aws.String(Table), + }, + } + deleteTagItem := &dynamodb.TransactWriteItem{ + Delete: &dynamodb.Delete{ + Key: tagItemKey, + TableName: aws.String(Table), + }, + } + items = append(items, updateSyncItem) + items = append(items, deleteTagItem) + + _, err = dynamo.TransactWriteItems( + &dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + // Return conflict if the update condition fails. + if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { + for _, reason := range canceledException.CancellationReasons { + if reason.Code != nil && *reason.Code == conditionalCheckFailed { + return true, false, nil + } + } + } + + return false, false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) + } + + // Successfully soft-delete the sync item and delete the tag item. + return false, true, nil + } + + // Not deleting a sync item with a client tag, do a normal update on sync + // item. + input := &dynamodb.UpdateItemInput{ + Key: key, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + UpdateExpression: expr.Update(), + ReturnValues: aws.String(dynamodb.ReturnValueAllOld), + TableName: aws.String(Table), + } + + out, err := dynamo.UpdateItem(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + // Return conflict if the write condition fails. + if aerr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { + return true, false, nil + } + } + return false, false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) + } + + // Unmarshal out.Attributes + oldEntity := &SyncEntity{} + err = dynamodbattribute.UnmarshalMap(out.Attributes, oldEntity) + if err != nil { + return false, false, fmt.Errorf("error unmarshalling old sync entity: %w", err) + } + var deleted bool + if entity.Deleted == nil { // No updates on Deleted this time. + deleted = false + } else if oldEntity.Deleted == nil { // Consider it as Deleted = false. + deleted = *entity.Deleted + } else { + deleted = !*oldEntity.Deleted && *entity.Deleted + } + return false, deleted, nil +} + +// GetUpdatesForType returns sync entities of a data type where it's mtime is +// later than the client token. +// To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a +// list of (ClientID, ID) primary keys with the given condition, then read the +// actual sync item using the list of primary keys. +func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []SyncEntity, error) { + syncEntities := []SyncEntity{} + + // Get (ClientID, ID) pairs which are updates after mtime for a data type, + // sorted by dataType#mTime. e.g. sorted by mtime since dataType is the same. + dataTypeMtimeLowerBound := strconv.Itoa(dataType) + "#" + strconv.FormatInt(clientToken+1, 10) + dataTypeMtimeUpperBound := strconv.Itoa(dataType+1) + "#0" + pkCond := expression.Key(clientIDDataTypeMtimeIdxPk).Equal(expression.Value(clientID)) + skCond := expression.KeyBetween( + expression.Key(clientIDDataTypeMtimeIdxSk), + expression.Value(dataTypeMtimeLowerBound), + expression.Value(dataTypeMtimeUpperBound)) + keyCond := expression.KeyAnd(pkCond, skCond) + exprs := expression.NewBuilder().WithKeyCondition(keyCond) + + if !fetchFolders { // Filter folder entities out if fetchFolder is false. + exprs = exprs.WithFilter( + expression.Equal(expression.Name("Folder"), expression.Value(false))) + } + + expr, err := exprs.Build() + if err != nil { + return false, syncEntities, fmt.Errorf("error building expression to get updates: %w", err) + } + + input := &dynamodb.QueryInput{ + IndexName: aws.String(clientIDDataTypeMtimeIdx), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + KeyConditionExpression: expr.KeyCondition(), + FilterExpression: expr.Filter(), + ProjectionExpression: aws.String(projPk), + TableName: aws.String(Table), + Limit: aws.Int64(maxSize), + } + + out, err := dynamo.Query(input) + if err != nil { + return false, syncEntities, fmt.Errorf("error doing query to get updates: %w", err) + } + + hasChangesRemaining := false + if out.LastEvaluatedKey != nil && len(out.LastEvaluatedKey) > 0 { + hasChangesRemaining = true + } + + count := *(out.Count) + if count == 0 { // No updates + return hasChangesRemaining, syncEntities, nil + } + + // Use return (ClientID, ID) primary keys to get the actual items. + var outAv []map[string]*dynamodb.AttributeValue + var i, j int64 + for i = 0; i < count; i += maxBatchGetItemSize { + j = i + maxBatchGetItemSize + if j > count { + j = count + } + + batchInput := &dynamodb.BatchGetItemInput{ + RequestItems: map[string]*dynamodb.KeysAndAttributes{ + Table: { + Keys: out.Items[i:j], + }, + }, + } + + err := dynamo.BatchGetItemPages(batchInput, + func(batchOut *dynamodb.BatchGetItemOutput, last bool) bool { + outAv = append(outAv, batchOut.Responses[Table]...) + return last + }) + if err != nil { + return false, syncEntities, fmt.Errorf("error getting update items in a batch: %w", err) + } + } + + err = dynamodbattribute.UnmarshalListOfMaps(outAv, &syncEntities) + if err != nil { + return false, syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) + } + + // filter out any expired items, i.e. history sync entities over 90 days old + nowUnix := time.Now().Unix() + var filteredSyncEntities []SyncEntity + for _, syncEntity := range syncEntities { + if syncEntity.ExpirationTime != nil && *syncEntity.ExpirationTime > 0 { + if *syncEntity.ExpirationTime < nowUnix { + continue + } + } + filteredSyncEntities = append(filteredSyncEntities, syncEntity) + } + + sort.Sort(SyncEntityByMtime(filteredSyncEntities)) + return hasChangesRemaining, filteredSyncEntities, nil +} diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go new file mode 100644 index 00000000..6904265a --- /dev/null +++ b/datastore/sync_entity_sql.go @@ -0,0 +1,158 @@ +package datastore + +import ( + "database/sql" + "encoding/hex" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" +) + +const chainIDSelectQuery = "SELECT id FROM chains WHERE client_id = $1 FOR UPDATE" + +type ChainRow struct { + ID *int64 +} + +type MigrationStatus struct { + ChainID int64 `db:"chain_id"` + DataType int `db:"data_type"` + EarliestMtime int64 `db:"earliest_mtime"` +} + +func (sqlDB *SQLDB) InsertSyncEntity(tx *sqlx.Tx, entity *SyncEntity) (bool, error) { + res, err := tx.NamedExec(` + INSERT INTO entities ( + id, chain_id, data_type, ctime, mtime, specifics, client_defined_unique_tag, + server_defined_unique_tag, deleted, folder, version, name, originator_cache_guid, + originator_client_item_id, parent_id, non_unique_name, unique_position + ) VALUES ( + :id, :chain_id, :data_type, :ctime, :mtime, :specifics, :client_defined_unique_tag, + :server_defined_unique_tag, :deleted, :folder, :version, :name, :originator_cache_guid, + :originator_client_item_id, :parent_id, :non_unique_name, :unique_position + ) ON CONFLICT DO NOTHING + `, entity) + if err != nil { + return false, fmt.Errorf("failed to insert entity: %w", err) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return false, fmt.Errorf("failed to get rows affected after insert: %w", err) + } + + // if rows affected is 0, then there must be a conflict. return true to indicate this condition. + return rowsAffected == 0, nil +} + +func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, idBytes []byte) (bool, error) { + var exists bool + err := tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND id = $2)", chainId, idBytes).Scan(&exists) + if err != nil { + return false, fmt.Errorf("failed to check existence of item: %w", err) + } + return exists, nil +} + +func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, data_type_earliest_mtime_map map[int]int64) error { + var statuses []MigrationStatus + for dataType, earliestMtime := range data_type_earliest_mtime_map { + statuses = append(statuses, MigrationStatus{ + ChainID: chainID, + DataType: dataType, + EarliestMtime: earliestMtime, + }) + } + + _, err := tx.NamedExec(` + INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) + VALUES (:chain_id, :data_type, :earliest_mtime) + ON CONFLICT (chain_id, data_type) DO UPDATE + SET earliest_mtime = $3 + WHERE earliest_mtime IS NOT NULL AND earliest_mtime > :earliest_mtime + `, statuses) + if err != nil { + return fmt.Errorf("failed to update dynamo migration statuses: %w", err) + } + + return nil +} + +func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (bool, bool, error) { + whereClause := "WHERE id = :id AND chain_id = :chain_id" + if *entity.DataType != HistoryTypeID { + entity.OldVersion = &oldVersion + whereClause += " AND version = :old_version" + } + + var query string + + deleted := entity.Deleted != nil && *entity.Deleted + if deleted { + query = `DELETE FROM entities ` + whereClause + } else { + var updateFields []string + if entity.UniquePosition != nil { + updateFields = append(updateFields, "unique_position = :unique_position") + } + if entity.ParentID != nil { + updateFields = append(updateFields, "parent_id = :parent_id") + } + if entity.Name != nil { + updateFields = append(updateFields, "name = :name") + } + if entity.NonUniqueName != nil { + updateFields = append(updateFields, "non_unique_name = :non_unique_name") + } + if entity.Folder != nil { + updateFields = append(updateFields, "folder = :folder") + } + + var joinedUpdateFields string + if len(updateFields) > 0 { + joinedUpdateFields = ", " + strings.Join(updateFields, ", ") + } + query = ` + UPDATE entities + SET version = :version, + mtime = :mtime, + specifics = :specifics + ` + joinedUpdateFields + whereClause + } + + result, err := tx.NamedExec(query, entity) + if err != nil { + return false, false, fmt.Errorf("error updating entity: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return false, false, fmt.Errorf("error getting rows affected after update: %w", err) + } + + return rowsAffected == 0, deleted, nil +} + +func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID *string) (*int64, error) { + // Get chain ID and lock for updates + clientIDBytes, err := hex.DecodeString(*clientID) + if err != nil { + return nil, fmt.Errorf("failed to decode clientID: %w", err) + } + row := ChainRow{} + if err := tx.Get(&row, chainIDSelectQuery, clientIDBytes); err != nil { + if err != sql.ErrNoRows { + return nil, fmt.Errorf("failed to get chain id: %w", err) + } + _, err := tx.Exec("INSERT INTO chains (client_id) VALUES ($1)", clientIDBytes) + if err != nil { + return nil, fmt.Errorf("failed to insert chain: %w", err) + } + + if err = tx.Get(&row, chainIDSelectQuery, clientIDBytes); err != nil { + return nil, fmt.Errorf("failed to get chain id: %w", err) + } + } + + return row.ID, nil +} diff --git a/docker-compose.yml b/docker-compose.yml index 5a7b68d3..0b1090db 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -28,6 +28,7 @@ services: - AWS_REGION=us-west-2 - AWS_ENDPOINT=http://dynamo-local:8000 - REDIS_URL=redis:6379 + - SQL_DATABASE_URL=postgres://sync:password@postgres/postgres?sslmode=disable web: build: context: . @@ -54,6 +55,7 @@ services: - AWS_ENDPOINT=http://dynamo-local:8000 - TABLE_NAME=client-entity-dev - REDIS_URL=redis:6379 + - SQL_DATABASE_URL=postgres://sync:password@postgres/postgres?sslmode=disable dynamo-local: build: context: . @@ -70,3 +72,12 @@ services: - ALLOW_EMPTY_PASSWORD=yes networks: - sync + postgres: + image: public.ecr.aws/docker/library/postgres:16 + ports: + - "5434:5432" + environment: + - POSTGRES_USER=sync + - POSTGRES_PASSWORD=password + networks: + - sync diff --git a/go.mod b/go.mod index 7a991df2..67ec98eb 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,13 @@ require ( google.golang.org/protobuf v1.34.1 ) +require ( + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/lib/pq v1.10.9 // indirect + go.uber.org/atomic v1.10.0 // indirect +) + require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -22,9 +29,11 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect + github.com/golang-migrate/migrate/v4 v4.17.1 github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.4.0 github.com/kr/text v0.2.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -38,7 +47,7 @@ require ( github.com/shengdoushi/base58 v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/throttled/throttled v2.2.5+incompatible // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.20.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index f63ee4ed..f3f8ecdc 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,10 @@ +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.23.0 h1:+lwAJYjvvdIVg6doFHuotFjueJ/7KY10xo/vm3X3Scw= @@ -32,15 +39,23 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/getsentry/sentry-go v0.28.0 h1:7Rqx9M3ythTKy2J6uZLHmc8Sz9OGgIlseuO1iBX/s0M= @@ -55,33 +70,83 @@ github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= +github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -100,6 +165,7 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= @@ -119,12 +185,22 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/throttled/throttled v2.2.5+incompatible h1:65UB52X0qNTYiT0Sohp8qLYVFwZQPDw85uSa65OljjQ= github.com/throttled/throttled v2.2.5+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -138,6 +214,7 @@ golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= @@ -147,8 +224,21 @@ google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= diff --git a/middleware/disabled_chain.go b/middleware/disabled_chain.go index 282e5382..4a52fa3b 100644 --- a/middleware/disabled_chain.go +++ b/middleware/disabled_chain.go @@ -21,7 +21,7 @@ func DisabledChain(next http.Handler) http.Handler { return } - db, ok := ctx.Value(syncContext.ContextKeyDatastore).(datastore.Datastore) + db, ok := ctx.Value(syncContext.ContextKeyDatastore).(datastore.DynamoDatastore) if !ok { http.Error(w, "unable to complete request", http.StatusInternalServerError) return diff --git a/migrations/20240904202925_init.down.postgres b/migrations/20240904202925_init.down.postgres new file mode 100644 index 00000000..9eb17df2 --- /dev/null +++ b/migrations/20240904202925_init.down.postgres @@ -0,0 +1,3 @@ +DROP TABLE entities; +DROP TABLE dynamo_migration_statuses; +DROP TABLE chains; diff --git a/migrations/20240904202925_init.up.postgres b/migrations/20240904202925_init.up.postgres new file mode 100644 index 00000000..316f5e5d --- /dev/null +++ b/migrations/20240904202925_init.up.postgres @@ -0,0 +1,38 @@ +CREATE TABLE chains ( + id BIGSERIAL PRIMARY KEY, + client_id BYTEA NOT NULL, + UNIQUE (client_id) +); + +CREATE TABLE dynamo_migration_statuses ( + chain_id BIGINT REFERENCES chains(id), + data_type INTEGER, + -- null earliest_mtime value indicates that all entities have been migrated + earliest_mtime BIGINT, + PRIMARY KEY (chain_id, data_type) +); + +CREATE TABLE entities ( + id BYTEA STORAGE PLAIN, + chain_id BIGINT NOT NULL REFERENCES chains(id), + data_type INTEGER NOT NULL, + ctime TIMESTAMP NOT NULL, + mtime TIMESTAMP NOT NULL, + specifics BYTEA STORAGE EXTERNAL NOT NULL , + client_defined_unique_tag TEXT STORAGE PLAIN, + server_defined_unique_tag TEXT STORAGE PLAIN, + folder BOOLEAN, + version BIGINT NOT NULL, + name TEXT STORAGE PLAIN, + originator_cache_guid TEXT STORAGE PLAIN, + originator_client_item_id TEXT STORAGE PLAIN, + parent_id TEXT STORAGE PLAIN, + non_unique_name TEXT STORAGE PLAIN, + unique_position BYTEA STORAGE PLAIN, + PRIMARY KEY (id, chain_id), + UNIQUE (chain_id, client_defined_unique_tag) +); +CREATE INDEX entities_chain_id_idx ON entities (chain_id); +CREATE INDEX entities_data_type_mtime_idx ON entities (data_type, mtime); +-- or maybe make a partial index for history entities and mtime, while keeping the chainid datattype and mtime index +-- CREATE INDEX entities_chain_id_data_type_mtime_idx ON entities (chain_id, data_type, mtime); diff --git a/server/server.go b/server/server.go index 67d854e0..90150c75 100644 --- a/server/server.go +++ b/server/server.go @@ -63,22 +63,28 @@ func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, r.Use(batware.BearerToken) r.Use(middleware.CommonResponseHeaders) - db, err := datastore.NewDynamo() + dynamoDB, err := datastore.NewDynamo() if err != nil { sentry.CaptureException(err) - log.Panic().Err(err).Msg("Must be able to init datastore to start") + log.Panic().Err(err).Msg("Must be able to init Dynamo datastore to start") + } + + sqlDB, err := datastore.NewSQLDB() + if err != nil { + sentry.CaptureException(err) + log.Panic().Err(err).Msg("Must be able to init SQL datastore to start") } redis := cache.NewRedisClient() cache := cache.NewCache(cache.NewRedisClientWithPrometheus(redis, "redis")) // Provide datastore & cache via context - ctx = context.WithValue(ctx, syncContext.ContextKeyDatastore, db) + ctx = context.WithValue(ctx, syncContext.ContextKeyDatastore, dynamoDB) ctx = context.WithValue(ctx, syncContext.ContextKeyCache, &cache) r.Mount("/v2", controller.SyncRouter( cache, - datastore.NewDatastoreWithPrometheus(db, "dynamo"))) + datastore.NewDatastoreWithPrometheus(dynamoDB, "dynamo"), *sqlDB)) r.Get("/metrics", batware.Metrics()) log.Info(). From 5db03395392f4c5af34131d345b1e3ec46ca8e95 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Fri, 6 Sep 2024 18:31:26 -0700 Subject: [PATCH 02/19] Various changes to SQL POC --- command/command.go | 39 ++++-- datastore/datastore.go | 2 +- datastore/instrumented_datastore.go | 2 +- datastore/sql.go | 3 +- datastore/sync_entity.go | 27 +++- datastore/sync_entity_dynamo.go | 4 +- datastore/sync_entity_sql.go | 150 ++++++++++++++------- migrations/20240904202925_init.up.postgres | 8 +- 8 files changed, 158 insertions(+), 77 deletions(-) diff --git a/command/command.go b/command/command.go index 159c3cd8..d745df48 100644 --- a/command/command.go +++ b/command/command.go @@ -38,15 +38,20 @@ const ( // handleGetUpdatesRequest handles GetUpdatesMessage and fills // GetUpdatesResponse. Target sync entities in the database will be updated or // deleted based on the client's requests. -func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, db datastore.DynamoDatastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS // default value, might be changed later isNewClient := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_NEW_CLIENT isPoll := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_PERIODIC + + var hasChangesRemaining bool + var syncEntities []datastore.SyncEntity + var err error + if isNewClient { // Reject the request if client has >= 50 devices in the chain. activeDevices := 0 for { - hasChangesRemaining, syncEntities, err := db.GetUpdatesForType(deviceInfoTypeID, 0, false, clientID, int64(maxGUBatchSize)) + hasChangesRemaining, syncEntities, err = dynamoDB.GetUpdatesForType(deviceInfoTypeID, 0, false, clientID, maxGUBatchSize) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", deviceInfoTypeID) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -73,7 +78,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } // Insert initial records if needed. - err := InsertServerDefinedUniqueEntities(db, clientID) + err := InsertServerDefinedUniqueEntities(dynamoDB, clientID) if err != nil { log.Error().Err(err).Msg("Create server defined unique entities failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -95,6 +100,11 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag maxSize := maxGUBatchSize + chainID, err := sqlDB.GetChainID(nil, clientID, false) + if err != nil { + return nil, err + } + // Process from_progress_marker guRsp.NewProgressMarker = make([]*sync_pb.DataTypeProgressMarker, len(guMsg.FromProgressMarker)) guRsp.Entries = make([]*sync_pb.SyncEntity, 0, maxSize) @@ -139,8 +149,13 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag continue } - curMaxSize := int64(maxSize) - int64(len(guRsp.Entries)) - hasChangesRemaining, entities, err := db.GetUpdatesForType(int(*fromProgressMarker.DataTypeId), token, fetchFolders, clientID, curMaxSize) + curMaxSize := maxSize - len(guRsp.Entries) + dataType := int(*fromProgressMarker.DataTypeId) + if _, isStoredInSQL := allowedSQLDataTypes[dataType]; isStoredInSQL { + hasChangesRemaining, syncEntities, err = sqlDB.GetUpdatesForType(dataType, token, fetchFolders, *chainID, curMaxSize) + } else { + hasChangesRemaining, syncEntities, err = dynamoDB.GetUpdatesForType(int(*fromProgressMarker.DataTypeId), token, fetchFolders, clientID, curMaxSize) + } if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", *fromProgressMarker.DataTypeId) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -153,7 +168,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // which is essential for clients when initializing sync engine with nigori // type. Return a transient error for clients to re-request in this case. if isNewClient && *fromProgressMarker.DataTypeId == nigoriTypeID && - token == 0 && len(entities) == 0 { + token == 0 && len(syncEntities) == 0 { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, fmt.Errorf("nigori root folder entity is not ready yet") } @@ -164,8 +179,8 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // Fill the PB entry from above DB entries until maxSize is reached. j := 0 - for ; j < len(entities) && len(guRsp.Entries) < cap(guRsp.Entries); j++ { - entity, err := datastore.CreatePBSyncEntity(&entities[j]) + for ; j < len(syncEntities) && len(guRsp.Entries) < cap(guRsp.Entries); j++ { + entity, err := datastore.CreatePBSyncEntity(&syncEntities[j]) if err != nil { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, fmt.Errorf("error creating protobuf sync entity from DB entity: %w", err) @@ -175,7 +190,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // If entities are appended, use the lastest mtime as returned token. if j != 0 { guRsp.NewProgressMarker[i].Token = make([]byte, binary.MaxVarintLen64) - binary.PutVarint(guRsp.NewProgressMarker[i].Token, *entities[j-1].Mtime) + binary.PutVarint(guRsp.NewProgressMarker[i].Token, *syncEntities[j-1].Mtime) } // Save (clientID#dataType, mtime) into cache after querying from DB. @@ -190,7 +205,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag if j == 0 { mtime = token } else { - mtime = *entities[j-1].Mtime + mtime = *syncEntities[j-1].Mtime } cache.SetTypeMtime(context.Background(), clientID, int(*fromProgressMarker.DataTypeId), mtime) } @@ -219,7 +234,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } defer trx.Rollback() - chainID, err := sqlDB.GetAndLockChainID(trx, &clientID) + chainID, err := sqlDB.GetChainID(trx, clientID, true) if err != nil { return nil, err } @@ -426,7 +441,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_GET_UPDATES { guRsp := &sync_pb.GetUpdatesResponse{} pbRsp.GetUpdates = guRsp - pbRsp.ErrorCode, err = handleGetUpdatesRequest(cache, pb.GetUpdates, guRsp, dynamoDB, clientID) + pbRsp.ErrorCode, err = handleGetUpdatesRequest(cache, pb.GetUpdates, guRsp, dynamoDB, sqlDB, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) diff --git a/datastore/datastore.go b/datastore/datastore.go index bfcc73b0..4d069192 100644 --- a/datastore/datastore.go +++ b/datastore/datastore.go @@ -12,7 +12,7 @@ type DynamoDatastore interface { // client token for a given client. Besides the array of sync entities, a // boolean value indicating whether there are more updates to query in the // next batch is returned. - GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []SyncEntity, error) + GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int) (bool, []SyncEntity, error) // Check if a server-defined unique tag is in the datastore. HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) // Get the count of sync items for a client. diff --git a/datastore/instrumented_datastore.go b/datastore/instrumented_datastore.go index 964bb3b3..51dc4043 100644 --- a/datastore/instrumented_datastore.go +++ b/datastore/instrumented_datastore.go @@ -80,7 +80,7 @@ func (_d DatastoreWithPrometheus) GetClientItemCount(clientID string) (counts *D } // GetUpdatesForType implements Datastore -func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (b1 bool, sa1 []SyncEntity, err error) { +func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int) (b1 bool, sa1 []SyncEntity, err error) { _since := time.Now() defer func() { result := "ok" diff --git a/datastore/sql.go b/datastore/sql.go index 877a7adc..6c2b724a 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -16,6 +16,7 @@ const sqlURLEnvKey = "SQL_DATABASE_URL" // SQLDB is a Datastore wrapper around a SQL-based database. type SQLDB struct { *sqlx.DB + insertQuery string } // NewSQLDB returns a SQLDB client to be used. @@ -42,6 +43,6 @@ func NewSQLDB() (*SQLDB, error) { return nil, fmt.Errorf("Failed to connect to SQL DB: %v", err) } - wrappedDB := SQLDB{db} + wrappedDB := SQLDB{db, buildInsertQuery()} return &wrappedDB, nil } diff --git a/datastore/sync_entity.go b/datastore/sync_entity.go index 56078e38..a5ccf70f 100644 --- a/datastore/sync_entity.go +++ b/datastore/sync_entity.go @@ -26,16 +26,17 @@ const ( type SyncEntity struct { ClientID string // ChainID is a synthetic key that is connected to the client id in the SQL db. - ChainID *int64 `dynamodbav:"-" db:"chain_id"` - ID string + ChainID *int64 `dynamodbav:"-" db:"chain_id"` + ID string `db:"-"` IDBytes []byte `dynamodbav:"-" db:"id"` + IDIsUUID bool `dynamodbav:"-" db:"id_is_uuid"` ParentID *string `dynamodbav:",omitempty" db:"parent_id"` Version *int64 Mtime *int64 Ctime *int64 Name *string `dynamodbav:",omitempty"` NonUniqueName *string `dynamodbav:",omitempty" db:"non_unique_name"` - ServerDefinedUniqueTag *string `dynamodbav:",omitempty" db:"server_defined_unique_tags"` + ServerDefinedUniqueTag *string `dynamodbav:",omitempty" db:"server_defined_unique_tag"` Deleted *bool OriginatorCacheGUID *string `dynamodbav:",omitempty" db:"originator_cache_guid"` OriginatorClientItemID *string `dynamodbav:",omitempty" db:"originator_client_item_id"` @@ -43,7 +44,7 @@ type SyncEntity struct { DataType *int `db:"data_type"` Folder *bool ClientDefinedUniqueTag *string `dynamodbav:",omitempty" db:"client_defined_unique_tag"` - UniquePosition []byte `dynamodbav:",omitempty" db:"server_defined_unique_tag"` + UniquePosition []byte `dynamodbav:",omitempty" db:"unique_position"` DataTypeMtime *string ExpirationTime *int64 OldVersion *int64 `dynamodbav:"-" db:"old_version"` @@ -148,11 +149,13 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID } var idBytes []byte + var idIsUUID bool idUUID, err := uuid.FromString(id) if err != nil { idBytes = []byte(id) } else { idBytes = idUUID.Bytes() + idIsUUID = true } return &SyncEntity{ @@ -160,6 +163,7 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID ChainID: chainID, ID: id, IDBytes: idBytes, + IDIsUUID: idIsUUID, ParentID: entity.ParentIdString, Version: entity.Version, Ctime: cTime, @@ -182,8 +186,21 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID // CreatePBSyncEntity converts a DB sync item to a protobuf sync entity. func CreatePBSyncEntity(entity *SyncEntity) (*sync_pb.SyncEntity, error) { + id := entity.ID + if len(id) == 0 { + if entity.IDIsUUID { + idUUID, err := uuid.FromBytes(entity.IDBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse uuid from bytes: %w", err) + } + id = idUUID.String() + } else { + id = string(entity.IDBytes) + } + } + pbEntity := &sync_pb.SyncEntity{ - IdString: &entity.ID, + IdString: &id, ParentIdString: entity.ParentID, Version: entity.Version, Mtime: entity.Mtime, diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go index 3dcdce70..a220ea73 100644 --- a/datastore/sync_entity_dynamo.go +++ b/datastore/sync_entity_dynamo.go @@ -598,7 +598,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo // To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a // list of (ClientID, ID) primary keys with the given condition, then read the // actual sync item using the list of primary keys. -func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []SyncEntity, error) { +func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int) (bool, []SyncEntity, error) { syncEntities := []SyncEntity{} // Get (ClientID, ID) pairs which are updates after mtime for a data type, @@ -631,7 +631,7 @@ func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFo FilterExpression: expr.Filter(), ProjectionExpression: aws.String(projPk), TableName: aws.String(Table), - Limit: aws.Int64(maxSize), + Limit: aws.Int64(int64(maxSize)), } out, err := dynamo.Query(input) diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 6904265a..41729dcc 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -9,7 +9,14 @@ import ( "github.com/jmoiron/sqlx" ) -const chainIDSelectQuery = "SELECT id FROM chains WHERE client_id = $1 FOR UPDATE" +const chainIDSelectQuery = "SELECT id FROM chains WHERE client_id = $1" + +var fieldsToInsert = []string{ + "id", "chain_id", "data_type", "ctime", "mtime", "id_is_uuid", "specifics", + "deleted", "client_defined_unique_tag", "server_defined_unique_tag", "folder", "version", + "name", "originator_cache_guid", "originator_client_item_id", "parent_id", "non_unique_name", + "unique_position", +} type ChainRow struct { ID *int64 @@ -21,18 +28,33 @@ type MigrationStatus struct { EarliestMtime int64 `db:"earliest_mtime"` } +type CommonSQLX interface { + Get(dest interface{}, query string, args ...interface{}) error + Exec(query string, args ...any) (sql.Result, error) +} + +func buildInsertQuery() string { + var insertValues []string + var setValues []string + for _, field := range fieldsToInsert { + insertValues = append(insertValues, ":"+field) + setValues = append(setValues, field+" = EXCLUDED."+field) + } + joinedFields := strings.Join(fieldsToInsert, ", ") + joinedInsertValues := strings.Join(insertValues, ", ") + joinedSetValues := strings.Join(setValues, ", ") + // We only want to update an existing row if it was previously deleted. + // If it was not deleted, then it should be considered a conflict + return `INSERT INTO entities (` + joinedFields + `) VALUES (` + joinedInsertValues + + `) ON CONFLICT (chain_id, client_defined_unique_tag) DO UPDATE SET ` + + joinedSetValues + ` WHERE entities.deleted = true` +} + func (sqlDB *SQLDB) InsertSyncEntity(tx *sqlx.Tx, entity *SyncEntity) (bool, error) { - res, err := tx.NamedExec(` - INSERT INTO entities ( - id, chain_id, data_type, ctime, mtime, specifics, client_defined_unique_tag, - server_defined_unique_tag, deleted, folder, version, name, originator_cache_guid, - originator_client_item_id, parent_id, non_unique_name, unique_position - ) VALUES ( - :id, :chain_id, :data_type, :ctime, :mtime, :specifics, :client_defined_unique_tag, - :server_defined_unique_tag, :deleted, :folder, :version, :name, :originator_cache_guid, - :originator_client_item_id, :parent_id, :non_unique_name, :unique_position - ) ON CONFLICT DO NOTHING - `, entity) + if entity.Deleted != nil && *entity.Deleted { + return true, nil + } + res, err := tx.NamedExec(sqlDB.insertQuery, entity) if err != nil { return false, fmt.Errorf("failed to insert entity: %w", err) } @@ -67,7 +89,7 @@ func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, da _, err := tx.NamedExec(` INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) VALUES (:chain_id, :data_type, :earliest_mtime) - ON CONFLICT (chain_id, data_type) DO UPDATE + ON CONFLICT DO UPDATE SET earliest_mtime = $3 WHERE earliest_mtime IS NOT NULL AND earliest_mtime > :earliest_mtime `, statuses) @@ -79,46 +101,42 @@ func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, da } func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (bool, bool, error) { - whereClause := "WHERE id = :id AND chain_id = :chain_id" + whereClause := " WHERE id = :id AND chain_id = :chain_id AND deleted = false" if *entity.DataType != HistoryTypeID { entity.OldVersion = &oldVersion whereClause += " AND version = :old_version" } - var query string - - deleted := entity.Deleted != nil && *entity.Deleted - if deleted { - query = `DELETE FROM entities ` + whereClause - } else { - var updateFields []string - if entity.UniquePosition != nil { - updateFields = append(updateFields, "unique_position = :unique_position") - } - if entity.ParentID != nil { - updateFields = append(updateFields, "parent_id = :parent_id") - } - if entity.Name != nil { - updateFields = append(updateFields, "name = :name") - } - if entity.NonUniqueName != nil { - updateFields = append(updateFields, "non_unique_name = :non_unique_name") - } - if entity.Folder != nil { - updateFields = append(updateFields, "folder = :folder") - } + var updateFields []string + if entity.UniquePosition != nil { + updateFields = append(updateFields, "unique_position = :unique_position") + } + if entity.ParentID != nil { + updateFields = append(updateFields, "parent_id = :parent_id") + } + if entity.Name != nil { + updateFields = append(updateFields, "name = :name") + } + if entity.NonUniqueName != nil { + updateFields = append(updateFields, "non_unique_name = :non_unique_name") + } + if entity.Folder != nil { + updateFields = append(updateFields, "folder = :folder") + } + if entity.Deleted != nil { + updateFields = append(updateFields, "deleted = :deleted") + } - var joinedUpdateFields string - if len(updateFields) > 0 { - joinedUpdateFields = ", " + strings.Join(updateFields, ", ") - } - query = ` - UPDATE entities - SET version = :version, - mtime = :mtime, - specifics = :specifics - ` + joinedUpdateFields + whereClause + var joinedUpdateFields string + if len(updateFields) > 0 { + joinedUpdateFields = ", " + strings.Join(updateFields, ", ") } + query := ` + UPDATE entities + SET version = :version, + mtime = :mtime, + specifics = :specifics + ` + joinedUpdateFields + whereClause result, err := tx.NamedExec(query, entity) if err != nil { @@ -130,29 +148,57 @@ func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion return false, false, fmt.Errorf("error getting rows affected after update: %w", err) } - return rowsAffected == 0, deleted, nil + return rowsAffected == 0, entity.Deleted != nil && *entity.Deleted, nil } -func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID *string) (*int64, error) { +func (sqlDB *SQLDB) GetChainID(tx *sqlx.Tx, clientID string, acquireUpdateLock bool) (*int64, error) { // Get chain ID and lock for updates - clientIDBytes, err := hex.DecodeString(*clientID) + clientIDBytes, err := hex.DecodeString(clientID) if err != nil { return nil, fmt.Errorf("failed to decode clientID: %w", err) } row := ChainRow{} - if err := tx.Get(&row, chainIDSelectQuery, clientIDBytes); err != nil { + + var lockClause string + if acquireUpdateLock { + lockClause = " FOR UPDATE" + } + + var commonSQLX CommonSQLX + if tx != nil { + commonSQLX = tx + } else { + commonSQLX = sqlDB + } + + if err := commonSQLX.Get(&row, chainIDSelectQuery+lockClause, clientIDBytes); err != nil { if err != sql.ErrNoRows { return nil, fmt.Errorf("failed to get chain id: %w", err) } - _, err := tx.Exec("INSERT INTO chains (client_id) VALUES ($1)", clientIDBytes) + _, err := commonSQLX.Exec("INSERT INTO chains (client_id) VALUES ($1)", clientIDBytes) if err != nil { return nil, fmt.Errorf("failed to insert chain: %w", err) } - if err = tx.Get(&row, chainIDSelectQuery, clientIDBytes); err != nil { + if err = commonSQLX.Get(&row, chainIDSelectQuery+lockClause, clientIDBytes); err != nil { return nil, fmt.Errorf("failed to get chain id: %w", err) } } return row.ID, nil } + +func (sqlDB *SQLDB) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (bool, []SyncEntity, error) { + var additionalCondition string + if !fetchFolders { + additionalCondition = "AND folder = false " + } + query := `SELECT * FROM entities + WHERE chain_id = $1 AND data_type = $2 AND mtime > $3 ` + additionalCondition + `ORDER BY mtime LIMIT $4` + + entities := []SyncEntity{} + if err := sqlDB.Select(&entities, query, chainID, dataType, clientToken, maxSize); err != nil { + return false, nil, fmt.Errorf("failed to get entity updates: %w", err) + } + return len(entities) == maxSize, entities, nil +} diff --git a/migrations/20240904202925_init.up.postgres b/migrations/20240904202925_init.up.postgres index 316f5e5d..0fbee675 100644 --- a/migrations/20240904202925_init.up.postgres +++ b/migrations/20240904202925_init.up.postgres @@ -16,9 +16,11 @@ CREATE TABLE entities ( id BYTEA STORAGE PLAIN, chain_id BIGINT NOT NULL REFERENCES chains(id), data_type INTEGER NOT NULL, - ctime TIMESTAMP NOT NULL, - mtime TIMESTAMP NOT NULL, - specifics BYTEA STORAGE EXTERNAL NOT NULL , + ctime BIGINT NOT NULL, + mtime BIGINT NOT NULL, + id_is_uuid BOOLEAN NOT NULL, + specifics BYTEA STORAGE EXTERNAL NOT NULL, + deleted BOOL NOT NULL, client_defined_unique_tag TEXT STORAGE PLAIN, server_defined_unique_tag TEXT STORAGE PLAIN, folder BOOLEAN, From 88ddc923e472b67c02365b53664eefe43d4a4230 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Tue, 10 Sep 2024 21:03:56 -0700 Subject: [PATCH 03/19] Add variations for rolling out SQL to percentage of users, migration of items upon update, create DB helpers, add godotenv --- .gitignore | 2 + command/command.go | 91 +++------- command/helpers.go | 164 ++++++++++++++++++ command/item_count.go | 10 +- command/server_defined_unique_entity.go | 10 +- datastore/datastore.go | 4 +- datastore/instrumented_datastore.go | 18 +- datastore/sql.go | 11 +- datastore/sql_variations.go | 83 +++++++++ datastore/sync_entity.go | 54 ++---- datastore/sync_entity_dynamo.go | 74 ++++++-- datastore/sync_entity_sql.go | 111 ++++++------ go.mod | 28 +-- go.sum | 23 +++ main.go | 1 + ....postgres => 20240904202925_init.down.sql} | 0 ...up.postgres => 20240904202925_init.up.sql} | 7 +- 17 files changed, 489 insertions(+), 202 deletions(-) create mode 100644 command/helpers.go create mode 100644 datastore/sql_variations.go rename migrations/{20240904202925_init.down.postgres => 20240904202925_init.down.sql} (100%) rename migrations/{20240904202925_init.up.postgres => 20240904202925_init.up.sql} (88%) diff --git a/.gitignore b/.gitignore index 4304ae0a..5ce67900 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ # Temp files /tmp/ + +.env diff --git a/command/command.go b/command/command.go index d745df48..5e52acea 100644 --- a/command/command.go +++ b/command/command.go @@ -19,11 +19,6 @@ var ( maxClientHistoryObjectQuota = 30000 ) -var allowedSQLDataTypes = map[int]struct{}{ - // Sessions - 50119: {}, -} - const ( storeBirthday string = "1" maxCommitBatchSize int32 = 90 @@ -43,15 +38,18 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag isNewClient := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_NEW_CLIENT isPoll := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_PERIODIC - var hasChangesRemaining bool - var syncEntities []datastore.SyncEntity - var err error + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, nil, false) + if err != nil { + return nil, err + } + defer dbHelpers.Trx.Rollback() if isNewClient { // Reject the request if client has >= 50 devices in the chain. activeDevices := 0 for { - hasChangesRemaining, syncEntities, err = dynamoDB.GetUpdatesForType(deviceInfoTypeID, 0, false, clientID, maxGUBatchSize) + // TODO(djandries): Call the dbHelpers variant instead + hasChangesRemaining, syncEntities, err := dynamoDB.GetUpdatesForType(deviceInfoTypeID, 0, false, clientID, maxGUBatchSize, nil) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", deviceInfoTypeID) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -100,11 +98,6 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag maxSize := maxGUBatchSize - chainID, err := sqlDB.GetChainID(nil, clientID, false) - if err != nil { - return nil, err - } - // Process from_progress_marker guRsp.NewProgressMarker = make([]*sync_pb.DataTypeProgressMarker, len(guMsg.FromProgressMarker)) guRsp.Entries = make([]*sync_pb.SyncEntity, 0, maxSize) @@ -150,12 +143,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } curMaxSize := maxSize - len(guRsp.Entries) - dataType := int(*fromProgressMarker.DataTypeId) - if _, isStoredInSQL := allowedSQLDataTypes[dataType]; isStoredInSQL { - hasChangesRemaining, syncEntities, err = sqlDB.GetUpdatesForType(dataType, token, fetchFolders, *chainID, curMaxSize) - } else { - hasChangesRemaining, syncEntities, err = dynamoDB.GetUpdatesForType(int(*fromProgressMarker.DataTypeId), token, fetchFolders, clientID, curMaxSize) - } + hasChangesRemaining, syncEntities, err := dbHelpers.getUpdatesFromDBs(int(*fromProgressMarker.DataTypeId), token, fetchFolders, curMaxSize) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", *fromProgressMarker.DataTypeId) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -211,6 +199,10 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } } + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err + } + return &errCode, nil } @@ -228,23 +220,11 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c return &errCode, nil } - trx, err := sqlDB.Beginx() - if err != nil { - return nil, fmt.Errorf("error starting transaction: %w", err) - } - defer trx.Rollback() - - chainID, err := sqlDB.GetChainID(trx, clientID, true) + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, cache, true) if err != nil { return nil, err } - - itemCounts, err := getItemCounts(cache, dynamoDB, clientID) - if err != nil { - log.Error().Err(err).Msg("Get client's item count failed") - errCode = sync_pb.SyncEnums_TRANSIENT_ERROR - return &errCode, fmt.Errorf("error getting client's item count: %w", err) - } + defer dbHelpers.Trx.Rollback() commitRsp.Entryresponse = make([]*sync_pb.CommitResponse_EntryResponse, len(commitMsg.Entries)) @@ -256,7 +236,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp := &sync_pb.CommitResponse_EntryResponse{} commitRsp.Entryresponse[i] = entryRsp - entityToCommit, err := datastore.CreateDBSyncEntity(v, commitMsg.CacheGuid, clientID, chainID) + entityToCommit, err := datastore.CreateDBSyncEntity(v, commitMsg.CacheGuid, clientID, dbHelpers.ChainID) if err != nil { // Can't unmarshal & marshal the message from PB into DB format rspType := sync_pb.CommitResponse_INVALID_MESSAGE entryRsp.ResponseType = &rspType @@ -274,17 +254,12 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c oldVersion := *entityToCommit.Version isUpdateOp := oldVersion != 0 - isHistoryRelatedItem := *entityToCommit.DataType == datastore.HistoryTypeID || *entityToCommit.DataType == datastore.HistoryDeleteDirectiveTypeID - _, isStoredInSQL := allowedSQLDataTypes[*entityToCommit.DataType] + isHistoryItem := *entityToCommit.DataType == datastore.HistoryTypeID + isHistoryRelatedItem := isHistoryItem || *entityToCommit.DataType == datastore.HistoryDeleteDirectiveTypeID *entityToCommit.Version = *entityToCommit.Mtime - if *entityToCommit.DataType == datastore.HistoryTypeID { - // Check if item exists using client_unique_tag - if isStoredInSQL { - isUpdateOp, err = sqlDB.HasItem(trx, *chainID, []byte(*entityToCommit.ClientDefinedUniqueTag)) - } else { - isUpdateOp, err = dynamoDB.HasItem(clientID, *entityToCommit.ClientDefinedUniqueTag) - } + if isHistoryItem { + isUpdateOp, err = dbHelpers.hasItemInEitherDB(entityToCommit) if err != nil { log.Error().Err(err).Msg("Insert history sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -295,7 +270,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } if !isUpdateOp { // Create - totalItemCount := itemCounts.sumCounts(false) + totalItemCount := dbHelpers.ItemCounts.sumCounts(false) if totalItemCount >= maxClientObjectQuota { rspType := sync_pb.CommitResponse_OVER_QUOTA entryRsp.ResponseType = &rspType @@ -303,16 +278,12 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c continue } - if !isHistoryRelatedItem || itemCounts.sumCounts(true) < maxClientHistoryObjectQuota { + if !isHistoryRelatedItem || dbHelpers.ItemCounts.sumCounts(true) < maxClientHistoryObjectQuota { // Insert all non-history items. For history items, ignore any items above history quoto // and lie to the client about the objects being synced instead of returning OVER_QUOTA // so the client can continue to sync other entities. var conflict bool - if isStoredInSQL { - conflict, err = sqlDB.InsertSyncEntity(trx, entityToCommit) - } else { - conflict, err = dynamoDB.InsertSyncEntity(entityToCommit) - } + conflict, err = dbHelpers.insertSyncEntity(entityToCommit) if err != nil { log.Error().Err(err).Msg("Insert sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -329,16 +300,9 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c if entityToCommit.OriginatorClientItemID != nil { idMap[*entityToCommit.OriginatorClientItemID] = entityToCommit.ID } - - err = itemCounts.recordChange(*entityToCommit.DataType, false) } } else { // Update - var conflict, deleted bool - if isStoredInSQL { - conflict, deleted, err = sqlDB.UpdateSyncEntity(trx, entityToCommit, oldVersion) - } else { - conflict, deleted, err = dynamoDB.UpdateSyncEntity(entityToCommit, oldVersion) - } + conflict, err := dbHelpers.updateSyncEntity(entityToCommit, oldVersion) if err != nil { log.Error().Err(err).Msg("Update sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -351,9 +315,6 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.ResponseType = &rspType continue } - if deleted { - err = itemCounts.recordChange(*entityToCommit.DataType, true) - } } if err != nil { log.Error().Err(err).Msg("Interim count update failed") @@ -370,7 +331,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.Mtime = entityToCommit.Mtime } - err = itemCounts.save() + err = dbHelpers.ItemCounts.save() if err != nil { log.Error().Err(err).Msg("Get interim item counts failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -382,7 +343,9 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c cache.SetTypeMtime(context.Background(), clientID, dataType, mtime) } - trx.Commit() + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err + } return &errCode, nil } diff --git a/command/helpers.go b/command/helpers.go new file mode 100644 index 00000000..603c6b8b --- /dev/null +++ b/command/helpers.go @@ -0,0 +1,164 @@ +package command + +import ( + "fmt" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/datastore" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" +) + +type DBHelpers struct { + dynamoDB datastore.DynamoDatastore + sqlDB datastore.SQLDB + Trx *sqlx.Tx + clientID string + ChainID int64 + variationHashDecimal float32 + ItemCounts *ItemCounts +} + +func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string, cache *cache.Cache, initItemCounts bool) (*DBHelpers, error) { + trx, err := sqlDB.Beginx() + if err != nil { + return nil, fmt.Errorf("error starting transaction: %w", err) + } + + chainID, err := sqlDB.GetAndLockChainID(trx, clientID) + if err != nil { + trx.Rollback() + return nil, err + } + variationHashDecimal := datastore.VariationHashDecimal(clientID) + + var itemCounts *ItemCounts + if initItemCounts { + itemCounts, err = getItemCounts(cache, dynamoDB, sqlDB, trx, clientID, *chainID) + if err != nil { + trx.Rollback() + return nil, err + } + } + + return &DBHelpers{ + dynamoDB: dynamoDB, + sqlDB: sqlDB, + Trx: trx, + clientID: clientID, + ChainID: *chainID, + variationHashDecimal: variationHashDecimal, + ItemCounts: itemCounts, + }, nil +} + +func (h *DBHelpers) hasItemInEitherDB(entity *datastore.SyncEntity) (exists bool, err error) { + // Check if item exists using client_unique_tag + if h.sqlDB.Variations.ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + exists, err := h.sqlDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) + if err != nil { + return false, err + } + if !exists { + return h.dynamoDB.HasItem(h.clientID, *entity.ClientDefinedUniqueTag) + } + return exists, err + } + return h.dynamoDB.HasItem(h.clientID, *entity.ClientDefinedUniqueTag) +} + +func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bool, curMaxSize int) (hasChangesRemaining bool, syncEntities []datastore.SyncEntity, err error) { + if curMaxSize == 0 { + return false, nil, nil + } + if h.sqlDB.Variations.ShouldSaveToSQL(dataType, h.variationHashDecimal) { + dynamoMigrationStatus, err := h.sqlDB.GetDynamoMigrationStatus(h.ChainID, dataType) + if err != nil { + return false, nil, err + } + + if dynamoMigrationStatus == nil || dynamoMigrationStatus.EarliestMtime > token { + var earliestMtime *int64 + if dynamoMigrationStatus != nil { + earliestMtime = &dynamoMigrationStatus.EarliestMtime + } + hasChangesRemaining, syncEntities, err = h.dynamoDB.GetUpdatesForType(dataType, token, fetchFolders, h.clientID, curMaxSize, earliestMtime) + if err != nil { + return false, nil, err + } + curMaxSize -= len(syncEntities) + } + + if curMaxSize > 0 { + sqlHasChangesRemaining, sqlSyncEntities, err := h.sqlDB.GetUpdatesForType(dataType, token, fetchFolders, h.ChainID, curMaxSize) + if err != nil { + return false, nil, err + } + if sqlHasChangesRemaining { + hasChangesRemaining = true + } + syncEntities = append(syncEntities, sqlSyncEntities...) + } + + return hasChangesRemaining, syncEntities, nil + } + return h.dynamoDB.GetUpdatesForType(dataType, token, fetchFolders, h.clientID, curMaxSize, nil) +} + +func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict bool, err error) { + savedInSQL := h.sqlDB.Variations.ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + if savedInSQL { + conflict, err = h.sqlDB.InsertSyncEntity(h.Trx, entity) + } else { + conflict, err = h.dynamoDB.InsertSyncEntity(entity) + } + if err == nil && !conflict && (entity.Deleted == nil || !*entity.Deleted) { + if err = h.ItemCounts.recordChange(*entity.DataType, false, savedInSQL); err != nil { + return false, err + } + } + return conflict, nil +} + +func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, err error) { + if h.sqlDB.Variations.ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + conflict, err := h.sqlDB.UpdateSyncEntity(h.Trx, entity, oldVersion) + if err != nil { + return false, err + } + if conflict { + oldEntity, err := h.dynamoDB.DeleteEntity(entity) + if err != nil { + return false, err + } + if oldEntity == nil { + return true, nil + } + if oldEntity.Deleted == nil || !*oldEntity.Deleted { + if err = h.ItemCounts.recordChange(*entity.DataType, true, false); err != nil { + return false, err + } + } + if *entity.DataType == datastore.HistoryTypeID { + newID, err := uuid.NewV7() + if err != nil { + return false, err + } + entity.ID = newID.String() + } + conflict, err = h.sqlDB.InsertSyncEntity(h.Trx, entity) + if err != nil { + return false, err + } + if !conflict && (entity.Deleted == nil || !*entity.Deleted) { + if err = h.ItemCounts.recordChange(*entity.DataType, false, true); err != nil { + return false, err + } + } + return conflict, err + } + return conflict, err + } + conflict, _, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) + return conflict, err +} diff --git a/command/item_count.go b/command/item_count.go index 95a6be1f..601cada3 100644 --- a/command/item_count.go +++ b/command/item_count.go @@ -6,6 +6,7 @@ import ( "github.com/brave/go-sync/cache" "github.com/brave/go-sync/datastore" + "github.com/jmoiron/sqlx" "github.com/rs/zerolog/log" ) @@ -13,6 +14,7 @@ type ItemCounts struct { cache *cache.Cache dynamoDB datastore.DynamoDatastore dynamoItemCounts *datastore.DynamoItemCounts + sqlItemCounts *datastore.SQLItemCounts clientID string cacheNewNormalCount int cacheNewHistoryCount int @@ -20,16 +22,19 @@ type ItemCounts struct { sqlTxNewHistoryCount int } -func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, clientID string) (*ItemCounts, error) { +func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { dynamoItemCounts, err := dynamoDB.GetClientItemCount(clientID) if err != nil { return nil, err } + sqlItemCounts, err := sqlDB.GetItemCounts(tx, chainID) + itemCounts := ItemCounts{ cache: cache, dynamoDB: dynamoDB, dynamoItemCounts: dynamoItemCounts, + sqlItemCounts: sqlItemCounts, clientID: clientID, cacheNewNormalCount: 0, cacheNewHistoryCount: 0, @@ -58,8 +63,7 @@ func (itemCounts *ItemCounts) updateInterimItemCounts(clear bool) error { return nil } -func (itemCounts *ItemCounts) recordChange(dataType int, subtract bool) error { - _, isStoredInSQL := allowedSQLDataTypes[dataType] +func (itemCounts *ItemCounts) recordChange(dataType int, subtract bool, isStoredInSQL bool) error { isHistory := dataType == datastore.HistoryTypeID if isStoredInSQL { delta := 1 diff --git a/command/server_defined_unique_entity.go b/command/server_defined_unique_entity.go index 93057fdf..81bd7337 100644 --- a/command/server_defined_unique_entity.go +++ b/command/server_defined_unique_entity.go @@ -8,7 +8,7 @@ import ( "github.com/brave/go-sync/datastore" "github.com/brave/go-sync/schema/protobuf/sync_pb" "github.com/brave/go-sync/utils" - "github.com/satori/go.uuid" + "github.com/google/uuid" ) const ( @@ -29,7 +29,11 @@ func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clien deleted := false folder := true version := int64(1) - idString := uuid.NewV4().String() + idUUID, err := uuid.NewV7() + if err != nil { + return nil, err + } + idString := idUUID.String() pbEntity := &sync_pb.SyncEntity{ Ctime: &now, Mtime: &now, Deleted: &deleted, Folder: &folder, @@ -37,7 +41,7 @@ func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clien Version: &version, ParentIdString: &parentID, IdString: &idString, Specifics: specifics} - return datastore.CreateDBSyncEntity(pbEntity, nil, clientID, nil) + return datastore.CreateDBSyncEntity(pbEntity, nil, clientID, 0) } // InsertServerDefinedUniqueEntities inserts the server defined unique tag diff --git a/datastore/datastore.go b/datastore/datastore.go index 4d069192..8ac713ce 100644 --- a/datastore/datastore.go +++ b/datastore/datastore.go @@ -12,7 +12,7 @@ type DynamoDatastore interface { // client token for a given client. Besides the array of sync entities, a // boolean value indicating whether there are more updates to query in the // next batch is returned. - GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int) (bool, []SyncEntity, error) + GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int, maxMtime *int64) (bool, []SyncEntity, error) // Check if a server-defined unique tag is in the datastore. HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) // Get the count of sync items for a client. @@ -27,4 +27,6 @@ type DynamoDatastore interface { IsSyncChainDisabled(clientID string) (bool, error) // Checks if sync item exists for a client HasItem(clientID string, ID string) (bool, error) + // Deletes an existing item + DeleteEntity(entity *SyncEntity) (*SyncEntity, error) } diff --git a/datastore/instrumented_datastore.go b/datastore/instrumented_datastore.go index 51dc4043..9fc4b55c 100644 --- a/datastore/instrumented_datastore.go +++ b/datastore/instrumented_datastore.go @@ -80,7 +80,7 @@ func (_d DatastoreWithPrometheus) GetClientItemCount(clientID string) (counts *D } // GetUpdatesForType implements Datastore -func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int) (b1 bool, sa1 []SyncEntity, err error) { +func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int, maxMtime *int64) (b1 bool, sa1 []SyncEntity, err error) { _since := time.Now() defer func() { result := "ok" @@ -90,7 +90,7 @@ func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken in datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.GetUpdatesForType(dataType, clientToken, fetchFolders, clientID, maxSize) + return _d.base.GetUpdatesForType(dataType, clientToken, fetchFolders, clientID, maxSize, maxMtime) } // HasItem implements Datastore @@ -190,3 +190,17 @@ func (_d DatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersio }() return _d.base.UpdateSyncEntity(entity, oldVersion) } + +// DeleteEntity implements Datastore +func (_d DatastoreWithPrometheus) DeleteEntity(entity *SyncEntity) (oldEntity *SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DeleteEntity(entity) +} diff --git a/datastore/sql.go b/datastore/sql.go index 6c2b724a..cee008ef 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -8,6 +8,7 @@ import ( "github.com/golang-migrate/migrate/v4" _ "github.com/golang-migrate/migrate/v4/database/postgres" _ "github.com/golang-migrate/migrate/v4/source/file" + _ "github.com/jackc/pgx/stdlib" "github.com/jmoiron/sqlx" ) @@ -17,10 +18,16 @@ const sqlURLEnvKey = "SQL_DATABASE_URL" type SQLDB struct { *sqlx.DB insertQuery string + Variations *SQLVariations } // NewSQLDB returns a SQLDB client to be used. func NewSQLDB() (*SQLDB, error) { + variations, err := LoadSQLVariations() + if err != nil { + return nil, err + } + sqlURL := os.Getenv(sqlURLEnvKey) if len(sqlURL) == 0 { return nil, fmt.Errorf("%s must be defined", sqlURLEnvKey) @@ -38,11 +45,11 @@ func NewSQLDB() (*SQLDB, error) { } } - db, err := sqlx.Connect("postgres", sqlURL) + db, err := sqlx.Connect("pgx", sqlURL) if err != nil { return nil, fmt.Errorf("Failed to connect to SQL DB: %v", err) } - wrappedDB := SQLDB{db, buildInsertQuery()} + wrappedDB := SQLDB{db, buildInsertQuery(), variations} return &wrappedDB, nil } diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go new file mode 100644 index 00000000..5ab0120b --- /dev/null +++ b/datastore/sql_variations.go @@ -0,0 +1,83 @@ +package datastore + +import ( + "fmt" + "hash/fnv" + "math" + "os" + "strconv" + "strings" +) + +const sqlSaveRolloutsEnvKey = "SQL_SAVE_ROLLOUTS" +const sqlMigrateRolloutsEnvKey = "SQL_MIGRATE_ROLLOUTS" + +func VariationHashDecimal(input string) float32 { + h := fnv.New32a() + h.Write([]byte(input)) + hashValue := h.Sum32() + + // Convert hash to a decimal between 0 and 1 + return float32(hashValue) / math.MaxUint32 +} + +type SQLVariations struct { + sqlSaveRollouts map[int]float32 + sqlMigrateRollouts map[int]float32 +} + +func parseRollouts(envKey string) (map[int]float32, error) { + rollouts := make(map[int]float32) + envVal := os.Getenv(sqlSaveRolloutsEnvKey) + + if len(envVal) > 0 { + pairs := strings.Split(envVal, ",") + + for _, pair := range pairs { + parts := strings.Split(strings.TrimSpace(pair), "=") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid format in %s: %s", envKey, pair) + } + + key, err := strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return nil, fmt.Errorf("Invalid integer in %s: %s", envKey, parts[0]) + } + + value, err := strconv.ParseFloat(strings.TrimSpace(parts[1]), 32) + if err != nil { + return nil, fmt.Errorf("Invalid float in %s: %s", sqlSaveRolloutsEnvKey, parts[1]) + } + + rollouts[key] = float32(value) + } + } + + return rollouts, nil +} + +func LoadSQLVariations() (*SQLVariations, error) { + sqlSaveRollouts, err := parseRollouts(sqlSaveRolloutsEnvKey) + if err != nil { + return nil, err + } + sqlMigrateRollouts, err := parseRollouts(sqlMigrateRolloutsEnvKey) + if err != nil { + return nil, err + } + + return &SQLVariations{ + sqlSaveRollouts, + sqlMigrateRollouts, + }, nil +} + +func (sqlVariations *SQLVariations) ShouldSaveToSQL(dataType int, variationHashDecimal float32) bool { + rolloutPercent, exists := sqlVariations.sqlSaveRollouts[dataType] + return exists && variationHashDecimal <= rolloutPercent +} + +func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHashDecimal float32) bool { + rolloutPercent, exists := sqlVariations.sqlMigrateRollouts[dataType] + return exists && variationHashDecimal <= rolloutPercent +} diff --git a/datastore/sync_entity.go b/datastore/sync_entity.go index a5ccf70f..9c78e368 100644 --- a/datastore/sync_entity.go +++ b/datastore/sync_entity.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/brave/go-sync/schema/protobuf/sync_pb" + "github.com/google/uuid" "github.com/rs/zerolog/log" - uuid "github.com/satori/go.uuid" "google.golang.org/protobuf/proto" ) @@ -26,10 +26,8 @@ const ( type SyncEntity struct { ClientID string // ChainID is a synthetic key that is connected to the client id in the SQL db. - ChainID *int64 `dynamodbav:"-" db:"chain_id"` - ID string `db:"-"` - IDBytes []byte `dynamodbav:"-" db:"id"` - IDIsUUID bool `dynamodbav:"-" db:"id_is_uuid"` + ChainID *int64 `dynamodbav:"-" db:"chain_id"` + ID string ParentID *string `dynamodbav:",omitempty" db:"parent_id"` Version *int64 Mtime *int64 @@ -71,7 +69,7 @@ func validatePBEntity(entity *sync_pb.SyncEntity) error { } // CreateDBSyncEntity converts a protobuf sync entity into a DB sync item. -func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID string, chainID *int64) (*SyncEntity, error) { +func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID string, chainID int64) (*SyncEntity, error) { err := validatePBEntity(entity) if err != nil { log.Error().Err(err).Msg("Invalid sync_pb.SyncEntity received") @@ -105,18 +103,16 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID var originatorCacheGUID, originatorClientItemID *string if cacheGUID != nil { if *entity.Version == 0 { - id = uuid.NewV4().String() + idUUID, err := uuid.NewV7() + if err != nil { + return nil, err + } + id = idUUID.String() } originatorCacheGUID = cacheGUID originatorClientItemID = entity.IdString } - // The client tag hash must be used as the primary key - // for the history type. - if dataType == HistoryTypeID { - id = *entity.ClientTagHash - } - now := time.Now() var expirationTime *int64 @@ -148,22 +144,10 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID } } - var idBytes []byte - var idIsUUID bool - idUUID, err := uuid.FromString(id) - if err != nil { - idBytes = []byte(id) - } else { - idBytes = idUUID.Bytes() - idIsUUID = true - } - return &SyncEntity{ ClientID: clientID, - ChainID: chainID, + ChainID: &chainID, ID: id, - IDBytes: idBytes, - IDIsUUID: idIsUUID, ParentID: entity.ParentIdString, Version: entity.Version, Ctime: cTime, @@ -186,21 +170,15 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID // CreatePBSyncEntity converts a DB sync item to a protobuf sync entity. func CreatePBSyncEntity(entity *SyncEntity) (*sync_pb.SyncEntity, error) { - id := entity.ID - if len(id) == 0 { - if entity.IDIsUUID { - idUUID, err := uuid.FromBytes(entity.IDBytes) - if err != nil { - return nil, fmt.Errorf("failed to parse uuid from bytes: %w", err) - } - id = idUUID.String() - } else { - id = string(entity.IDBytes) - } + id := &entity.ID + // The client tag hash must be used as the primary key + // for the history type. + if *entity.DataType == HistoryTypeID { + id = entity.ClientDefinedUniqueTag } pbEntity := &sync_pb.SyncEntity{ - IdString: &id, + IdString: id, ParentIdString: entity.ParentID, Version: entity.Version, Mtime: entity.Mtime, diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go index a220ea73..414da6ce 100644 --- a/datastore/sync_entity_dynamo.go +++ b/datastore/sync_entity_dynamo.go @@ -52,13 +52,6 @@ type DisabledMarkerItem struct { Ctime *int64 } -// DisabledMarkerItemQuery is used to query for disabled marker item in -// DynamoDB -type DisabledMarkerItemQuery struct { - ClientID string - ID string -} - // ServerClientUniqueTagItem is used to marshal and unmarshal tag items in // dynamoDB. type ServerClientUniqueTagItem struct { @@ -68,9 +61,8 @@ type ServerClientUniqueTagItem struct { Ctime *int64 } -// ServerClientUniqueTagItemQuery is used to query for unique tag items in -// dynamoDB. -type ServerClientUniqueTagItemQuery struct { +// ItemQuery is used to query for items in dynamoDB. +type ItemQuery struct { ClientID string // Hash key ID string // Range key } @@ -109,10 +101,10 @@ func NewServerClientUniqueTagItem(clientID string, tag string, isServer bool) *S // NewServerClientUniqueTagItemQuery creates a tag item query which is used to // determine whether a sync entity has a unique tag item or not -func NewServerClientUniqueTagItemQuery(clientID string, tag string, isServer bool) *ServerClientUniqueTagItemQuery { +func NewServerClientUniqueTagItemQuery(clientID string, tag string, isServer bool) *ItemQuery { prefix := getTagPrefix(isServer) - return &ServerClientUniqueTagItemQuery{ + return &ItemQuery{ ClientID: clientID, ID: prefix + tag, } @@ -185,8 +177,17 @@ func (dynamo *Dynamo) InsertSyncEntity(entity *SyncEntity) (bool, error) { return false, nil } + actualID := entity.ID + if *entity.DataType == HistoryTypeID { + entity.ID = *entity.ClientDefinedUniqueTag + } + // Normal sync item av, err := dynamodbattribute.MarshalMap(*entity) + if *entity.DataType == HistoryTypeID { + entity.ID = actualID + } + if err != nil { return false, fmt.Errorf("error marshalling sync item to insert sync entity: %w", err) } @@ -437,7 +438,7 @@ func (dynamo *Dynamo) ClearServerData(clientID string) ([]SyncEntity, error) { // IsSyncChainDisabled checks whether a given sync chain has been deleted func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { - key, err := dynamodbattribute.MarshalMap(DisabledMarkerItemQuery{ + key, err := dynamodbattribute.MarshalMap(ItemQuery{ ClientID: clientID, ID: disabledChainID, }) @@ -460,7 +461,11 @@ func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { // UpdateSyncEntity updates a sync item in dynamoDB. func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, bool, error) { - primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: entity.ID} + id := entity.ID + if *entity.DataType == HistoryTypeID { + id = *entity.ClientDefinedUniqueTag + } + primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: id} key, err := dynamodbattribute.MarshalMap(primaryKey) if err != nil { return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) @@ -598,13 +603,18 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo // To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a // list of (ClientID, ID) primary keys with the given condition, then read the // actual sync item using the list of primary keys. -func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int) (bool, []SyncEntity, error) { +func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int, maxMtime *int64) (bool, []SyncEntity, error) { syncEntities := []SyncEntity{} // Get (ClientID, ID) pairs which are updates after mtime for a data type, // sorted by dataType#mTime. e.g. sorted by mtime since dataType is the same. dataTypeMtimeLowerBound := strconv.Itoa(dataType) + "#" + strconv.FormatInt(clientToken+1, 10) - dataTypeMtimeUpperBound := strconv.Itoa(dataType+1) + "#0" + var dataTypeMtimeUpperBound string + if maxMtime != nil { + dataTypeMtimeUpperBound = strconv.Itoa(dataType) + "#" + strconv.FormatInt(*maxMtime-1, 10) + } else { + dataTypeMtimeUpperBound = strconv.Itoa(dataType+1) + "#0" + } pkCond := expression.Key(clientIDDataTypeMtimeIdxPk).Equal(expression.Value(clientID)) skCond := expression.KeyBetween( expression.Key(clientIDDataTypeMtimeIdxSk), @@ -696,3 +706,35 @@ func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFo sort.Sort(SyncEntityByMtime(filteredSyncEntities)) return hasChangesRemaining, filteredSyncEntities, nil } + +func (dynamo *Dynamo) DeleteEntity(entity *SyncEntity) (oldEntity *SyncEntity, err error) { + key, err := dynamodbattribute.MarshalMap(ItemQuery{ + ClientID: entity.ClientID, + ID: entity.ID, + }) + if err != nil { + return nil, fmt.Errorf("error marshalling key to get item for deletion: %w", err) + } + + returnValues := dynamodb.ReturnValueAllOld + input := &dynamodb.DeleteItemInput{ + TableName: aws.String(Table), + Key: key, + ReturnValues: &returnValues, + } + + result, err := dynamo.DeleteItem(input) + if err != nil { + return nil, fmt.Errorf("failed to delete item: %w", err) + } + + if result.Attributes == nil { + return nil, nil + } + + if err = dynamodbattribute.UnmarshalMap(result.Attributes, &oldEntity); err != nil { + return nil, fmt.Errorf("failed to get old entity after deleting: %w", err) + } + + return oldEntity, nil +} diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 41729dcc..030dbf90 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -5,34 +5,24 @@ import ( "encoding/hex" "fmt" "strings" + "time" "github.com/jmoiron/sqlx" ) -const chainIDSelectQuery = "SELECT id FROM chains WHERE client_id = $1" - var fieldsToInsert = []string{ - "id", "chain_id", "data_type", "ctime", "mtime", "id_is_uuid", "specifics", + "id", "chain_id", "data_type", "ctime", "mtime", "specifics", "deleted", "client_defined_unique_tag", "server_defined_unique_tag", "folder", "version", "name", "originator_cache_guid", "originator_client_item_id", "parent_id", "non_unique_name", "unique_position", } -type ChainRow struct { - ID *int64 -} - type MigrationStatus struct { ChainID int64 `db:"chain_id"` DataType int `db:"data_type"` EarliestMtime int64 `db:"earliest_mtime"` } -type CommonSQLX interface { - Get(dest interface{}, query string, args ...interface{}) error - Exec(query string, args ...any) (sql.Result, error) -} - func buildInsertQuery() string { var insertValues []string var setValues []string @@ -50,10 +40,7 @@ func buildInsertQuery() string { joinedSetValues + ` WHERE entities.deleted = true` } -func (sqlDB *SQLDB) InsertSyncEntity(tx *sqlx.Tx, entity *SyncEntity) (bool, error) { - if entity.Deleted != nil && *entity.Deleted { - return true, nil - } +func (sqlDB *SQLDB) InsertSyncEntity(tx *sqlx.Tx, entity *SyncEntity) (conflict bool, err error) { res, err := tx.NamedExec(sqlDB.insertQuery, entity) if err != nil { return false, fmt.Errorf("failed to insert entity: %w", err) @@ -67,25 +54,33 @@ func (sqlDB *SQLDB) InsertSyncEntity(tx *sqlx.Tx, entity *SyncEntity) (bool, err return rowsAffected == 0, nil } -func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, idBytes []byte) (bool, error) { - var exists bool - err := tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND id = $2)", chainId, idBytes).Scan(&exists) +func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (exists bool, err error) { + err = tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND client_defined_unique_tag = $2)", chainId, clientTag).Scan(&exists) if err != nil { return false, fmt.Errorf("failed to check existence of item: %w", err) } return exists, nil } -func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, data_type_earliest_mtime_map map[int]int64) error { - var statuses []MigrationStatus - for dataType, earliestMtime := range data_type_earliest_mtime_map { - statuses = append(statuses, MigrationStatus{ - ChainID: chainID, - DataType: dataType, - EarliestMtime: earliestMtime, - }) +func (sqlDB *SQLDB) GetDynamoMigrationStatus(chainID int64, dataType int) (*MigrationStatus, error) { + var status MigrationStatus + err := sqlDB.Get(&status, ` + SELECT chain_id, data_type, earliest_mtime + FROM dynamo_migration_statuses + WHERE chain_id = $1 AND data_type = $2 + `, chainID, dataType) + + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, fmt.Errorf("failed to get dynamo migration status: %w", err) } + return &status, nil +} + +func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []MigrationStatus) error { _, err := tx.NamedExec(` INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) VALUES (:chain_id, :data_type, :earliest_mtime) @@ -100,8 +95,14 @@ func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, da return nil } -func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (bool, bool, error) { - whereClause := " WHERE id = :id AND chain_id = :chain_id AND deleted = false" +func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, err error) { + var idColumn string + if *entity.DataType == HistoryTypeID { + idColumn = "client_defined_unique_tag" + } else { + idColumn = "id" + } + whereClause := " WHERE " + idColumn + " = :id AND chain_id = :chain_id AND deleted = false" if *entity.DataType != HistoryTypeID { entity.OldVersion = &oldVersion whereClause += " AND version = :old_version" @@ -140,55 +141,50 @@ func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion result, err := tx.NamedExec(query, entity) if err != nil { - return false, false, fmt.Errorf("error updating entity: %w", err) + return false, fmt.Errorf("error updating entity: %w", err) } rowsAffected, err := result.RowsAffected() if err != nil { - return false, false, fmt.Errorf("error getting rows affected after update: %w", err) + return false, fmt.Errorf("error getting rows affected after update: %w", err) } - return rowsAffected == 0, entity.Deleted != nil && *entity.Deleted, nil + return rowsAffected == 0, nil } -func (sqlDB *SQLDB) GetChainID(tx *sqlx.Tx, clientID string, acquireUpdateLock bool) (*int64, error) { +func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *int64, err error) { // Get chain ID and lock for updates clientIDBytes, err := hex.DecodeString(clientID) if err != nil { return nil, fmt.Errorf("failed to decode clientID: %w", err) } - row := ChainRow{} - var lockClause string - if acquireUpdateLock { - lockClause = " FOR UPDATE" - } + var id int64 - var commonSQLX CommonSQLX - if tx != nil { - commonSQLX = tx - } else { - commonSQLX = sqlDB + err = tx.QueryRowx(` + INSERT INTO chains (client_id, last_usage_time) VALUES ($1, $2) + ON CONFLICT (client_id) + DO UPDATE SET last_usage_time = EXCLUDED.last_usage_time + RETURNING id + `, clientIDBytes, time.Now()).Scan(&id) + if err != nil { + return nil, fmt.Errorf("failed to upsert chain: %w", err) } - if err := commonSQLX.Get(&row, chainIDSelectQuery+lockClause, clientIDBytes); err != nil { - if err != sql.ErrNoRows { - return nil, fmt.Errorf("failed to get chain id: %w", err) - } - _, err := commonSQLX.Exec("INSERT INTO chains (client_id) VALUES ($1)", clientIDBytes) - if err != nil { - return nil, fmt.Errorf("failed to insert chain: %w", err) - } - - if err = commonSQLX.Get(&row, chainIDSelectQuery+lockClause, clientIDBytes); err != nil { - return nil, fmt.Errorf("failed to get chain id: %w", err) - } + // Once we have completely migrated over to SQL, we can change this to + // `FOR UPDATE`, and only lock upon commits. We need to lock for updates + // as we will be deleting older Dynamo items during update requests, and migrating + // them over to SQL. If another client in the chain updates during this process, + // the client may not receive some older items. + _, err = tx.Exec(`SELECT id FROM chains WHERE id = $1 FOR SHARE`, id) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock on chain: %w", err) } - return row.ID, nil + return &id, nil } -func (sqlDB *SQLDB) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (bool, []SyncEntity, error) { +func (sqlDB *SQLDB) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { var additionalCondition string if !fetchFolders { additionalCondition = "AND folder = false " @@ -196,7 +192,6 @@ func (sqlDB *SQLDB) GetUpdatesForType(dataType int, clientToken int64, fetchFold query := `SELECT * FROM entities WHERE chain_id = $1 AND data_type = $2 AND mtime > $3 ` + additionalCondition + `ORDER BY mtime LIMIT $4` - entities := []SyncEntity{} if err := sqlDB.Select(&entities, query, chainID, dataType, clientToken, maxSize); err != nil { return false, nil, fmt.Errorf("failed to get entity updates: %w", err) } diff --git a/go.mod b/go.mod index 67ec98eb..07d23ce6 100644 --- a/go.mod +++ b/go.mod @@ -1,54 +1,60 @@ module github.com/brave/go-sync -go 1.22 +go 1.22.0 + +toolchain go1.23.0 require ( github.com/aws/aws-sdk-go v1.55.5 - github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae github.com/getsentry/sentry-go v0.28.1 github.com/go-chi/chi/v5 v5.0.12 github.com/prometheus/client_golang v1.19.0 github.com/redis/go-redis/v9 v9.5.1 github.com/rs/zerolog v1.32.0 - github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.9.0 - google.golang.org/protobuf v1.34.1 + google.golang.org/protobuf v1.34.2 ) require ( + github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/lib/pq v1.10.9 // indirect + github.com/satori/go.uuid v1.2.0 // indirect go.uber.org/atomic v1.10.0 // indirect ) require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect - github.com/golang-migrate/migrate/v4 v4.17.1 + github.com/golang-migrate/migrate/v4 v4.18.1 // indirect github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/jackc/pgx v3.6.2+incompatible // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jmoiron/sqlx v1.4.0 + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/joho/godotenv v1.5.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/xid v1.5.0 // indirect github.com/shengdoushi/base58 v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/throttled/throttled v2.2.5+incompatible // indirect - golang.org/x/crypto v0.20.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f3f8ecdc..1ab98501 100644 --- a/go.sum +++ b/go.sum @@ -35,6 +35,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae h1:CGUFAtMXAsGajLeobq6ep+5wREYS+lepZSdPckY+Ba0= github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae/go.mod h1:sUyKgpr9uxg0SARewNEkNMStvBjOeWuWoLchHgyONGA= +github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e h1:wWsx4axnKnJ2i6HM4m+1etOu+fz68VDjm1dMgnD3+b0= +github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e/go.mod h1:8QVK0ZrPIiemLAHAvgGYY+Xf3QXYclAHfnuiHHFxlK8= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -79,6 +81,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= +github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= +github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -90,6 +94,8 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= @@ -103,12 +109,16 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -168,14 +178,18 @@ github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0 github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shengdoushi/base58 v1.0.0 h1:tGe4o6TmdXFJWoI31VoSWvuaKxf0Px3gqa3sUWhAxBs= github.com/shengdoushi/base58 v1.0.0/go.mod h1:m5uIILfzcKMw6238iWAhP4l3s5+uXyF3+bJKUNhAL9I= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -201,6 +215,8 @@ golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -212,8 +228,13 @@ golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= @@ -221,6 +242,8 @@ google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/main.go b/main.go index dc289123..3a353527 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,7 @@ package main import ( "github.com/brave/go-sync/server" + _ "github.com/joho/godotenv/autoload" ) func main() { diff --git a/migrations/20240904202925_init.down.postgres b/migrations/20240904202925_init.down.sql similarity index 100% rename from migrations/20240904202925_init.down.postgres rename to migrations/20240904202925_init.down.sql diff --git a/migrations/20240904202925_init.up.postgres b/migrations/20240904202925_init.up.sql similarity index 88% rename from migrations/20240904202925_init.up.postgres rename to migrations/20240904202925_init.up.sql index 0fbee675..56c4db45 100644 --- a/migrations/20240904202925_init.up.postgres +++ b/migrations/20240904202925_init.up.sql @@ -1,24 +1,23 @@ CREATE TABLE chains ( id BIGSERIAL PRIMARY KEY, client_id BYTEA NOT NULL, + last_usage_time TIMESTAMP NOT NULL, UNIQUE (client_id) ); CREATE TABLE dynamo_migration_statuses ( chain_id BIGINT REFERENCES chains(id), data_type INTEGER, - -- null earliest_mtime value indicates that all entities have been migrated - earliest_mtime BIGINT, + earliest_mtime BIGINT NOT NULL, PRIMARY KEY (chain_id, data_type) ); CREATE TABLE entities ( - id BYTEA STORAGE PLAIN, + id UUID, chain_id BIGINT NOT NULL REFERENCES chains(id), data_type INTEGER NOT NULL, ctime BIGINT NOT NULL, mtime BIGINT NOT NULL, - id_is_uuid BOOLEAN NOT NULL, specifics BYTEA STORAGE EXTERNAL NOT NULL, deleted BOOL NOT NULL, client_defined_unique_tag TEXT STORAGE PLAIN, From 94f28ddbc4fdd6a47ae012de3bfb7afb469b766c Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Wed, 11 Sep 2024 19:25:29 -0700 Subject: [PATCH 04/19] Add chunked migration from dynamo to SQL, add Prometheus metrics for SQL --- Makefile | 3 +- cache/instrumented_redis.go | 26 +-- command/command.go | 22 ++- command/helpers.go | 94 +++++++-- command/item_count.go | 2 +- controller/controller.go | 4 +- datastore/datastore.go | 32 --- datastore/dynamo_migration_status.go | 49 +++++ datastore/instrumented_datastore.go | 206 ------------------- datastore/instrumented_dynamo_datastore.go | 220 +++++++++++++++++++++ datastore/instrumented_sql_datastore.go | 195 ++++++++++++++++++ datastore/interfaces.go | 64 ++++++ datastore/sql.go | 33 +++- datastore/sql_variations.go | 4 + datastore/sync_entity_dynamo.go | 82 +++++++- datastore/sync_entity_sql.go | 50 +---- migrations/20240904202925_init.up.sql | 3 +- server/server.go | 2 +- 18 files changed, 764 insertions(+), 327 deletions(-) delete mode 100644 datastore/datastore.go create mode 100644 datastore/dynamo_migration_status.go delete mode 100644 datastore/instrumented_datastore.go create mode 100644 datastore/instrumented_dynamo_datastore.go create mode 100644 datastore/instrumented_sql_datastore.go create mode 100644 datastore/interfaces.go diff --git a/Makefile b/Makefile index 3f40a34c..dbf56aa8 100644 --- a/Makefile +++ b/Makefile @@ -38,5 +38,6 @@ docker-test: COMMIT=$(GIT_COMMIT) VERSION=$(GIT_VERSION) BUILD_TIME=$(BUILD_TIME) docker compose -f docker-compose.yml run --rm dev make test instrumented: - gowrap gen -p github.com/brave/go-sync/datastore -i Datastore -t ./.prom-gowrap.tmpl -o ./datastore/instrumented_datastore.go + gowrap gen -p github.com/brave/go-sync/datastore -i DynamoDatastore -t ./.prom-gowrap.tmpl -o ./datastore/instrumented_dynamo_datastore.go + gowrap gen -p github.com/brave/go-sync/datastore -i SQLDatastore -t ./.prom-gowrap.tmpl -o ./datastore/instrumented_sql_datastore.go gowrap gen -p github.com/brave/go-sync/cache -i RedisClient -t ./.prom-gowrap.tmpl -o ./cache/instrumented_redis.go diff --git a/cache/instrumented_redis.go b/cache/instrumented_redis.go index 9c7b83e9..17418f82 100755 --- a/cache/instrumented_redis.go +++ b/cache/instrumented_redis.go @@ -1,10 +1,10 @@ -package cache +// Code generated by gowrap. DO NOT EDIT. +// template: ../.prom-gowrap.tmpl +// gowrap: http://github.com/hexdigest/gowrap -// DO NOT EDIT! -// This code is generated with http://github.com/hexdigest/gowrap tool -// using ../.prom-gowrap.tmpl template +package cache -//go:generate gowrap gen -p github.com/brave/go-sync/cache -i RedisClient -t ../.prom-gowrap.tmpl -o instrumented_redis.go +//go:generate gowrap gen -p github.com/brave/go-sync/cache -i RedisClient -t ../.prom-gowrap.tmpl -o instrumented_redis.go -l "" import ( "context" @@ -80,8 +80,8 @@ func (_d RedisClientWithPrometheus) Get(ctx context.Context, key string, delete return _d.base.Get(ctx, key, delete) } -// Set implements RedisClient -func (_d RedisClientWithPrometheus) Set(ctx context.Context, key string, val string, ttl time.Duration) (err error) { +// Incr implements RedisClient +func (_d RedisClientWithPrometheus) Incr(ctx context.Context, key string, subtract bool) (i1 int, err error) { _since := time.Now() defer func() { result := "ok" @@ -89,13 +89,13 @@ func (_d RedisClientWithPrometheus) Set(ctx context.Context, key string, val str result = "error" } - redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "Set", result).Observe(time.Since(_since).Seconds()) + redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "Incr", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.Set(ctx, key, val, ttl) + return _d.base.Incr(ctx, key, subtract) } -// Incr implements RedisClient -func (_d RedisClientWithPrometheus) Incr(ctx context.Context, key string, subtract bool) (val int, err error) { +// Set implements RedisClient +func (_d RedisClientWithPrometheus) Set(ctx context.Context, key string, val string, ttl time.Duration) (err error) { _since := time.Now() defer func() { result := "ok" @@ -103,7 +103,7 @@ func (_d RedisClientWithPrometheus) Incr(ctx context.Context, key string, subtra result = "error" } - redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "Incr", result).Observe(time.Since(_since).Seconds()) + redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "Set", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.Incr(ctx, key, subtract) + return _d.base.Set(ctx, key, val, ttl) } diff --git a/command/command.go b/command/command.go index 5e52acea..84d85a81 100644 --- a/command/command.go +++ b/command/command.go @@ -33,7 +33,7 @@ const ( // handleGetUpdatesRequest handles GetUpdatesMessage and fills // GetUpdatesResponse. Target sync entities in the database will be updated or // deleted based on the client's requests. -func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS // default value, might be changed later isNewClient := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_NEW_CLIENT isPoll := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_PERIODIC @@ -49,7 +49,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag activeDevices := 0 for { // TODO(djandries): Call the dbHelpers variant instead - hasChangesRemaining, syncEntities, err := dynamoDB.GetUpdatesForType(deviceInfoTypeID, 0, false, clientID, maxGUBatchSize, nil) + hasChangesRemaining, syncEntities, err := dynamoDB.GetUpdatesForType(deviceInfoTypeID, nil, nil, false, clientID, maxGUBatchSize, true) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", deviceInfoTypeID) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -101,10 +101,15 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // Process from_progress_marker guRsp.NewProgressMarker = make([]*sync_pb.DataTypeProgressMarker, len(guMsg.FromProgressMarker)) guRsp.Entries = make([]*sync_pb.SyncEntity, 0, maxSize) + + var dataTypes []int + for i, fromProgressMarker := range guMsg.FromProgressMarker { guRsp.NewProgressMarker[i] = &sync_pb.DataTypeProgressMarker{} guRsp.NewProgressMarker[i].DataTypeId = fromProgressMarker.DataTypeId + dataTypes = append(dataTypes, int(*fromProgressMarker.DataTypeId)) + // Default token value is client's token, otherwise 0. // This token will be updated when we return the updated entities. if len(fromProgressMarker.Token) > 0 { @@ -199,10 +204,19 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } } + migratedEntities, err := dbHelpers.maybeMigrateToSQL(dataTypes) + if err != nil { + return nil, fmt.Errorf("failed to perform migration: %w") + } + if err = dbHelpers.Trx.Commit(); err != nil { return nil, err } + if err = dynamoDB.DeleteEntities(migratedEntities); err != nil { + log.Error().Err(err).Msgf("Failed to delete migrated items") + } + return &errCode, nil } @@ -210,7 +224,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // For each commit entry: // - new sync entity is created and inserted into the database if version is 0. // - existed sync entity will be updated if version is greater than 0. -func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, commitRsp *sync_pb.CommitResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, commitRsp *sync_pb.CommitResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { if commitMsg == nil { return nil, fmt.Errorf("nil commitMsg is received") } @@ -390,7 +404,7 @@ func handleClearServerDataRequest(cache *cache.Cache, db datastore.DynamoDatasto // HandleClientToServerMessage handles the protobuf ClientToServerMessage and // fills the protobuf ClientToServerResponse. -func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerMessage, pbRsp *sync_pb.ClientToServerResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string) error { +func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerMessage, pbRsp *sync_pb.ClientToServerResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string) error { // Create ClientToServerResponse and fill general fields for both GU and // Commit. pbRsp.StoreBirthday = aws.String(storeBirthday) diff --git a/command/helpers.go b/command/helpers.go index 603c6b8b..3f72563e 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "math/rand/v2" "github.com/brave/go-sync/cache" "github.com/brave/go-sync/datastore" @@ -11,7 +12,7 @@ import ( type DBHelpers struct { dynamoDB datastore.DynamoDatastore - sqlDB datastore.SQLDB + sqlDB datastore.SQLDatastore Trx *sqlx.Tx clientID string ChainID int64 @@ -19,7 +20,7 @@ type DBHelpers struct { ItemCounts *ItemCounts } -func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, clientID string, cache *cache.Cache, initItemCounts bool) (*DBHelpers, error) { +func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string, cache *cache.Cache, initItemCounts bool) (*DBHelpers, error) { trx, err := sqlDB.Beginx() if err != nil { return nil, fmt.Errorf("error starting transaction: %w", err) @@ -54,7 +55,7 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, cli func (h *DBHelpers) hasItemInEitherDB(entity *datastore.SyncEntity) (exists bool, err error) { // Check if item exists using client_unique_tag - if h.sqlDB.Variations.ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { exists, err := h.sqlDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) if err != nil { return false, err @@ -71,18 +72,18 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo if curMaxSize == 0 { return false, nil, nil } - if h.sqlDB.Variations.ShouldSaveToSQL(dataType, h.variationHashDecimal) { - dynamoMigrationStatus, err := h.sqlDB.GetDynamoMigrationStatus(h.ChainID, dataType) + if h.sqlDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { + dynamoMigrationStatuses, err := h.sqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) if err != nil { return false, nil, err } - if dynamoMigrationStatus == nil || dynamoMigrationStatus.EarliestMtime > token { + if migrationStatus := dynamoMigrationStatuses[dataType]; migrationStatus == nil || (migrationStatus.EarliestMtime != nil && *migrationStatus.EarliestMtime > token) { var earliestMtime *int64 - if dynamoMigrationStatus != nil { - earliestMtime = &dynamoMigrationStatus.EarliestMtime + if migrationStatus != nil { + earliestMtime = migrationStatus.EarliestMtime } - hasChangesRemaining, syncEntities, err = h.dynamoDB.GetUpdatesForType(dataType, token, fetchFolders, h.clientID, curMaxSize, earliestMtime) + hasChangesRemaining, syncEntities, err = h.dynamoDB.GetUpdatesForType(dataType, &token, earliestMtime, fetchFolders, h.clientID, curMaxSize, true) if err != nil { return false, nil, err } @@ -102,13 +103,13 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo return hasChangesRemaining, syncEntities, nil } - return h.dynamoDB.GetUpdatesForType(dataType, token, fetchFolders, h.clientID, curMaxSize, nil) + return h.dynamoDB.GetUpdatesForType(dataType, &token, nil, fetchFolders, h.clientID, curMaxSize, true) } func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict bool, err error) { - savedInSQL := h.sqlDB.Variations.ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + savedInSQL := h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) if savedInSQL { - conflict, err = h.sqlDB.InsertSyncEntity(h.Trx, entity) + conflict, err = h.sqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) } else { conflict, err = h.dynamoDB.InsertSyncEntity(entity) } @@ -121,7 +122,7 @@ func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict boo } func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, err error) { - if h.sqlDB.Variations.ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { conflict, err := h.sqlDB.UpdateSyncEntity(h.Trx, entity, oldVersion) if err != nil { return false, err @@ -146,7 +147,7 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in } entity.ID = newID.String() } - conflict, err = h.sqlDB.InsertSyncEntity(h.Trx, entity) + conflict, err = h.sqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) if err != nil { return false, err } @@ -162,3 +163,68 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in conflict, _, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) return conflict, err } + +func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*datastore.SyncEntity, err error) { + if rand.Float32() > h.sqlDB.MigrateIntervalPercent() { + return nil, nil + } + var applicableDataTypes []int + for _, dataType := range dataTypes { + if !h.sqlDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { + continue + } + applicableDataTypes = append(applicableDataTypes, dataType) + } + migrationStatuses, err := h.sqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) + if err != nil { + return nil, err + } + + currLimit := h.sqlDB.MigrateChunkSize() + var updatedMigrationStatuses []*datastore.MigrationStatus + + for _, dataType := range dataTypes { + if currLimit <= 0 { + break + } + migrationStatus := migrationStatuses[dataType] + if migrationStatus != nil && migrationStatus.EarliestMtime == nil { + continue + } + + var earliestMtime *int64 + if migrationStatus != nil { + earliestMtime = migrationStatus.EarliestMtime + } + + hasChangesRemaining, syncEntities, err := h.dynamoDB.GetUpdatesForType(dataType, nil, earliestMtime, true, h.clientID, currLimit, false) + if err != nil { + return nil, err + } + + currLimit -= len(syncEntities) + + lastItem := &syncEntities[len(syncEntities)-1] + + if !hasChangesRemaining { + migrationStatus.EarliestMtime = nil + } else if lastItem.Mtime != nil { + migrationStatus.EarliestMtime = lastItem.Mtime + } + updatedMigrationStatuses = append(updatedMigrationStatuses, migrationStatus) + + var syncEntitiesPtr []*datastore.SyncEntity + for _, syncEntity := range syncEntities { + syncEntitiesPtr = append(syncEntitiesPtr, &syncEntity) + migratedEntities = append(migratedEntities, &syncEntity) + } + + if _, err = h.sqlDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { + return nil, err + } + } + if err = h.sqlDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { + return nil, err + } + return migratedEntities, nil +} diff --git a/command/item_count.go b/command/item_count.go index 601cada3..672b1bef 100644 --- a/command/item_count.go +++ b/command/item_count.go @@ -22,7 +22,7 @@ type ItemCounts struct { sqlTxNewHistoryCount int } -func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { +func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { dynamoItemCounts, err := dynamoDB.GetClientItemCount(clientID) if err != nil { return nil, err diff --git a/controller/controller.go b/controller/controller.go index 5561558c..6b4fefa1 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -24,7 +24,7 @@ const ( ) // SyncRouter add routers for command and auth endpoint requests. -func SyncRouter(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB) chi.Router { +func SyncRouter(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore) chi.Router { r := chi.NewRouter() r.Use(syncMiddleware.Auth) r.Use(syncMiddleware.DisabledChain) @@ -33,7 +33,7 @@ func SyncRouter(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB da } // Command handles GetUpdates and Commit requests from sync clients. -func Command(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDB) http.HandlerFunc { +func Command(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() clientID, ok := ctx.Value(syncContext.ContextKeyClientID).(string) diff --git a/datastore/datastore.go b/datastore/datastore.go deleted file mode 100644 index 8ac713ce..00000000 --- a/datastore/datastore.go +++ /dev/null @@ -1,32 +0,0 @@ -package datastore - -// DynamoDatastore abstracts over the underlying datastore. -type DynamoDatastore interface { - // Insert a new sync entity. - InsertSyncEntity(entity *SyncEntity) (bool, error) - // Insert a series of sync entities in a write transaction. - InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error - // Update an existing sync entity. - UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) - // Get updates for a specific type which are modified after the time of - // client token for a given client. Besides the array of sync entities, a - // boolean value indicating whether there are more updates to query in the - // next batch is returned. - GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int, maxMtime *int64) (bool, []SyncEntity, error) - // Check if a server-defined unique tag is in the datastore. - HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) - // Get the count of sync items for a client. - GetClientItemCount(clientID string) (*DynamoItemCounts, error) - // Update the count of sync items for a client. - UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error - // ClearServerData deletes all items for a given clientID - ClearServerData(clientID string) ([]SyncEntity, error) - // DisableSyncChain marks a chain as disabled so no further updates or commits can happen - DisableSyncChain(clientID string) error - // IsSyncChainDisabled checks whether a given sync chain is deleted - IsSyncChainDisabled(clientID string) (bool, error) - // Checks if sync item exists for a client - HasItem(clientID string, ID string) (bool, error) - // Deletes an existing item - DeleteEntity(entity *SyncEntity) (*SyncEntity, error) -} diff --git a/datastore/dynamo_migration_status.go b/datastore/dynamo_migration_status.go new file mode 100644 index 00000000..93a8b9e4 --- /dev/null +++ b/datastore/dynamo_migration_status.go @@ -0,0 +1,49 @@ +package datastore + +import ( + "fmt" + + "github.com/jmoiron/sqlx" +) + +type MigrationStatus struct { + ChainID int64 `db:"chain_id"` + DataType int `db:"data_type"` + EarliestMtime *int64 `db:"earliest_mtime"` +} + +func (sqlDB *SQLDB) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (dataTypeToStatusMap map[int]*MigrationStatus, err error) { + dataTypeToStatusMap = make(map[int]*MigrationStatus) + + var statuses []MigrationStatus + err = tx.Select(&statuses, ` + SELECT chain_id, data_type, earliest_mtime + FROM dynamo_migration_statuses + WHERE chain_id = $1 AND data_type IN $2 + `, chainID, dataTypes) + + if err != nil { + return nil, fmt.Errorf("failed to get dynamo migration status: %w", err) + } + + for _, status := range statuses { + dataTypeToStatusMap[status.DataType] = &status + } + + return dataTypeToStatusMap, nil +} + +func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) error { + _, err := tx.NamedExec(` + INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) + VALUES (:chain_id, :data_type, :earliest_mtime) + ON CONFLICT DO UPDATE + SET earliest_mtime = $3 + WHERE earliest_mtime IS NOT NULL AND earliest_mtime > :earliest_mtime + `, statuses) + if err != nil { + return fmt.Errorf("failed to update dynamo migration statuses: %w", err) + } + + return nil +} diff --git a/datastore/instrumented_datastore.go b/datastore/instrumented_datastore.go deleted file mode 100644 index 9fc4b55c..00000000 --- a/datastore/instrumented_datastore.go +++ /dev/null @@ -1,206 +0,0 @@ -package datastore - -// DO NOT EDIT! -// This code is generated with http://github.com/hexdigest/gowrap tool -// using ../.prom-gowrap.tmpl template - -//go:generate gowrap gen -p github.com/brave/go-sync/datastore -i Datastore -t ../.prom-gowrap.tmpl -o instrumented_datastore.go - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -// DatastoreWithPrometheus implements Datastore interface with all methods wrapped -// with Prometheus metrics -type DatastoreWithPrometheus struct { - base DynamoDatastore - instanceName string -} - -var datastoreDurationSummaryVec = promauto.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "datastore_duration_seconds", - Help: "datastore runtime duration and result", - MaxAge: time.Minute, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"instance_name", "method", "result"}) - -// NewDatastoreWithPrometheus returns an instance of the Datastore decorated with prometheus summary metric -func NewDatastoreWithPrometheus(base DynamoDatastore, instanceName string) DatastoreWithPrometheus { - return DatastoreWithPrometheus{ - base: base, - instanceName: instanceName, - } -} - -// ClearServerData implements Datastore -func (_d DatastoreWithPrometheus) ClearServerData(clientID string) (sa1 []SyncEntity, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "ClearServerData", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.ClearServerData(clientID) -} - -// DisableSyncChain implements Datastore -func (_d DatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DisableSyncChain", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.DisableSyncChain(clientID) -} - -// GetClientItemCount implements Datastore -func (_d DatastoreWithPrometheus) GetClientItemCount(clientID string) (counts *DynamoItemCounts, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetClientItemCount", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.GetClientItemCount(clientID) -} - -// GetUpdatesForType implements Datastore -func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int, maxMtime *int64) (b1 bool, sa1 []SyncEntity, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.GetUpdatesForType(dataType, clientToken, fetchFolders, clientID, maxSize, maxMtime) -} - -// HasItem implements Datastore -func (_d DatastoreWithPrometheus) HasItem(clientID string, ID string) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.HasItem(clientID, ID) -} - -// HasServerDefinedUniqueTag implements Datastore -func (_d DatastoreWithPrometheus) HasServerDefinedUniqueTag(clientID string, tag string) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasServerDefinedUniqueTag", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.HasServerDefinedUniqueTag(clientID, tag) -} - -// InsertSyncEntitiesWithServerTags implements Datastore -func (_d DatastoreWithPrometheus) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) (err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntitiesWithServerTags", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.InsertSyncEntitiesWithServerTags(entities) -} - -// InsertSyncEntity implements Datastore -func (_d DatastoreWithPrometheus) InsertSyncEntity(entity *SyncEntity) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntity", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.InsertSyncEntity(entity) -} - -// IsSyncChainDisabled implements Datastore -func (_d DatastoreWithPrometheus) IsSyncChainDisabled(clientID string) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "IsSyncChainDisabled", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.IsSyncChainDisabled(clientID) -} - -// UpdateClientItemCount implements Datastore -func (_d DatastoreWithPrometheus) UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) (err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateClientItemCount", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.UpdateClientItemCount(counts, newNormalItemCount, newHistoryItemCount) -} - -// UpdateSyncEntity implements Datastore -func (_d DatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateSyncEntity", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.UpdateSyncEntity(entity, oldVersion) -} - -// DeleteEntity implements Datastore -func (_d DatastoreWithPrometheus) DeleteEntity(entity *SyncEntity) (oldEntity *SyncEntity, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteEntity", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.DeleteEntity(entity) -} diff --git a/datastore/instrumented_dynamo_datastore.go b/datastore/instrumented_dynamo_datastore.go new file mode 100644 index 00000000..fb66485b --- /dev/null +++ b/datastore/instrumented_dynamo_datastore.go @@ -0,0 +1,220 @@ +// Code generated by gowrap. DO NOT EDIT. +// template: ../.prom-gowrap.tmpl +// gowrap: http://github.com/hexdigest/gowrap + +package datastore + +//go:generate gowrap gen -p github.com/brave/go-sync/datastore -i DynamoDatastore -t ../.prom-gowrap.tmpl -o instrumented_dynamo_datastore.go -l "" + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// DynamoDatastoreWithPrometheus implements DynamoDatastore interface with all methods wrapped +// with Prometheus metrics +type DynamoDatastoreWithPrometheus struct { + base DynamoDatastore + instanceName string +} + +var dynamodatastoreDurationSummaryVec = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "dynamodatastore_duration_seconds", + Help: "dynamodatastore runtime duration and result", + MaxAge: time.Minute, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"instance_name", "method", "result"}) + +// NewDynamoDatastoreWithPrometheus returns an instance of the DynamoDatastore decorated with prometheus summary metric +func NewDynamoDatastoreWithPrometheus(base DynamoDatastore, instanceName string) DynamoDatastoreWithPrometheus { + return DynamoDatastoreWithPrometheus{ + base: base, + instanceName: instanceName, + } +} + +// ClearServerData implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) ClearServerData(clientID string) (sa1 []SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "ClearServerData", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.ClearServerData(clientID) +} + +// DeleteEntities implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) DeleteEntities(entities []*SyncEntity) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteEntities", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DeleteEntities(entities) +} + +// DeleteEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) DeleteEntity(entity *SyncEntity) (sp1 *SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DeleteEntity(entity) +} + +// DisableSyncChain implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DisableSyncChain", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DisableSyncChain(clientID) +} + +// GetClientItemCount implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetClientItemCount(clientID string) (dp1 *DynamoItemCounts, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetClientItemCount", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetClientItemCount(clientID) +} + +// GetUpdatesForType implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (b1 bool, sa1 []SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetUpdatesForType(dataType, minMtime, maxMtime, fetchFolders, clientID, maxSize, ascOrder) +} + +// HasItem implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) HasItem(clientID string, ID string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.HasItem(clientID, ID) +} + +// HasServerDefinedUniqueTag implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) HasServerDefinedUniqueTag(clientID string, tag string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasServerDefinedUniqueTag", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.HasServerDefinedUniqueTag(clientID, tag) +} + +// InsertSyncEntitiesWithServerTags implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntitiesWithServerTags", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.InsertSyncEntitiesWithServerTags(entities) +} + +// InsertSyncEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) InsertSyncEntity(entity *SyncEntity) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.InsertSyncEntity(entity) +} + +// IsSyncChainDisabled implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) IsSyncChainDisabled(clientID string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "IsSyncChainDisabled", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.IsSyncChainDisabled(clientID) +} + +// UpdateClientItemCount implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateClientItemCount", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateClientItemCount(counts, newNormalItemCount, newHistoryItemCount) +} + +// UpdateSyncEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateSyncEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateSyncEntity(entity, oldVersion) +} diff --git a/datastore/instrumented_sql_datastore.go b/datastore/instrumented_sql_datastore.go new file mode 100644 index 00000000..b9650792 --- /dev/null +++ b/datastore/instrumented_sql_datastore.go @@ -0,0 +1,195 @@ +// Code generated by gowrap. DO NOT EDIT. +// template: ../.prom-gowrap.tmpl +// gowrap: http://github.com/hexdigest/gowrap + +package datastore + +//go:generate gowrap gen -p github.com/brave/go-sync/datastore -i SQLDatastore -t ../.prom-gowrap.tmpl -o instrumented_sql_datastore.go -l "" + +import ( + "time" + + "github.com/jmoiron/sqlx" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// SQLDatastoreWithPrometheus implements SQLDatastore interface with all methods wrapped +// with Prometheus metrics +type SQLDatastoreWithPrometheus struct { + base SQLDatastore + instanceName string +} + +var sqldatastoreDurationSummaryVec = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "sqldatastore_duration_seconds", + Help: "sqldatastore runtime duration and result", + MaxAge: time.Minute, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"instance_name", "method", "result"}) + +// NewSQLDatastoreWithPrometheus returns an instance of the SQLDatastore decorated with prometheus summary metric +func NewSQLDatastoreWithPrometheus(base SQLDatastore, instanceName string) SQLDatastoreWithPrometheus { + return SQLDatastoreWithPrometheus{ + base: base, + instanceName: instanceName, + } +} + +// Beginx implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) Beginx() (tp1 *sqlx.Tx, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "Beginx", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.Beginx() +} + +// GetAndLockChainID implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetAndLockChainID(tx *sqlx.Tx, clientID string) (ip1 *int64, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetAndLockChainID", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetAndLockChainID(tx, clientID) +} + +// GetDynamoMigrationStatuses implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (m1 map[int]*MigrationStatus, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetDynamoMigrationStatuses", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetDynamoMigrationStatuses(tx, chainID, dataTypes) +} + +// GetItemCounts implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetItemCounts(tx *sqlx.Tx, chainID int64) (sp1 *SQLItemCounts, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetItemCounts", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetItemCounts(tx, chainID) +} + +// GetUpdatesForType implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (b1 bool, sa1 []SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetUpdatesForType(dataType, clientToken, fetchFolders, chainID, maxSize) +} + +// HasItem implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.HasItem(tx, chainId, clientTag) +} + +// InsertSyncEntities implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntities", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.InsertSyncEntities(tx, entities) +} + +// MigrateChunkSize implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) MigrateChunkSize() (i1 int) { + _since := time.Now() + defer func() { + result := "ok" + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "MigrateChunkSize", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.MigrateChunkSize() +} + +// MigrateIntervalPercent implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) MigrateIntervalPercent() (f1 float32) { + _since := time.Now() + defer func() { + result := "ok" + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "MigrateIntervalPercent", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.MigrateIntervalPercent() +} + +// UpdateDynamoMigrationStatuses implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateDynamoMigrationStatuses", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateDynamoMigrationStatuses(tx, statuses) +} + +// UpdateSyncEntity implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateSyncEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateSyncEntity(tx, entity, oldVersion) +} + +// Variations implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) Variations() (sp1 *SQLVariations) { + _since := time.Now() + defer func() { + result := "ok" + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "Variations", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.Variations() +} diff --git a/datastore/interfaces.go b/datastore/interfaces.go new file mode 100644 index 00000000..a3f2fe21 --- /dev/null +++ b/datastore/interfaces.go @@ -0,0 +1,64 @@ +package datastore + +import "github.com/jmoiron/sqlx" + +// DynamoDatastore abstracts over the underlying datastore. +type DynamoDatastore interface { + // Insert a new sync entity. + InsertSyncEntity(entity *SyncEntity) (bool, error) + // Insert a series of sync entities in a write transaction. + InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error + // Update an existing sync entity. + UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) + // Get updates for a specific type which are modified after the time of + // client token for a given client. Besides the array of sync entities, a + // boolean value indicating whether there are more updates to query in the + // next batch is returned. + GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (bool, []SyncEntity, error) + // Check if a server-defined unique tag is in the datastore. + HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) + // Get the count of sync items for a client. + GetClientItemCount(clientID string) (*DynamoItemCounts, error) + // Update the count of sync items for a client. + UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error + // ClearServerData deletes all items for a given clientID + ClearServerData(clientID string) ([]SyncEntity, error) + // DisableSyncChain marks a chain as disabled so no further updates or commits can happen + DisableSyncChain(clientID string) error + // IsSyncChainDisabled checks whether a given sync chain is deleted + IsSyncChainDisabled(clientID string) (bool, error) + // Checks if sync item exists for a client + HasItem(clientID string, ID string) (bool, error) + // Deletes an existing item + DeleteEntity(entity *SyncEntity) (*SyncEntity, error) + // Deletes multiple existing items + DeleteEntities(entities []*SyncEntity) error +} + +// SQLDatastore abstracts over the underlying datastore. +type SQLDatastore interface { + // InsertSyncEntities inserts multiple sync entities into the database + InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (bool, error) + // HasItem checks if an item exists in the database + HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (bool, error) + // UpdateSyncEntity updates a sync entity in the database + UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (bool, error) + // GetAndLockChainID retrieves and locks a chain ID for a given client ID + GetAndLockChainID(tx *sqlx.Tx, clientID string) (*int64, error) + // GetUpdatesForType retrieves updates for a specific data type + GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (bool, []SyncEntity, error) + // GetDynamoMigrationStatuses retrieves migration statuses for specified data types + GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (map[int]*MigrationStatus, error) + // UpdateDynamoMigrationStatuses updates migration statuses in the database + UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) error + // GetItemCounts provides the counts of items associated with a chain + GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, error) + // Beginx initializes a database transaction + Beginx() (*sqlx.Tx, error) + // Variations returns the SQLVariations utility + Variations() *SQLVariations + // MigrateIntervalPercent returns migration update interval percentage + MigrateIntervalPercent() float32 + // MigrateChunkSize returns the max entity count for each migration chunk + MigrateChunkSize() int +} diff --git a/datastore/sql.go b/datastore/sql.go index cee008ef..40ff423c 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "strconv" "github.com/golang-migrate/migrate/v4" _ "github.com/golang-migrate/migrate/v4/database/postgres" @@ -13,12 +14,19 @@ import ( ) const sqlURLEnvKey = "SQL_DATABASE_URL" +const sqlMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" +const sqlMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" + +const defaultMigrateUpdateInterval = 4 +const defaultMigrateChunkSize = 100 // SQLDB is a Datastore wrapper around a SQL-based database. type SQLDB struct { *sqlx.DB - insertQuery string - Variations *SQLVariations + insertQuery string + variations *SQLVariations + migrateIntervalPercent float32 + migrateChunkSize int } // NewSQLDB returns a SQLDB client to be used. @@ -50,6 +58,25 @@ func NewSQLDB() (*SQLDB, error) { return nil, fmt.Errorf("Failed to connect to SQL DB: %v", err) } - wrappedDB := SQLDB{db, buildInsertQuery(), variations} + migrateInterval, _ := strconv.Atoi(os.Getenv(sqlMigrateUpdateIntervalEnvKey)) + migrateChunkSize, _ := strconv.Atoi(os.Getenv(sqlMigrateChunkSizeEnvKey)) + + if migrateInterval <= 0 { + migrateInterval = defaultMigrateUpdateInterval + } + migrateIntervalPercent := 1 / float32(migrateInterval) + if migrateChunkSize <= 0 { + migrateChunkSize = defaultMigrateChunkSize + } + + wrappedDB := SQLDB{db, buildInsertQuery(), variations, migrateIntervalPercent, migrateChunkSize} return &wrappedDB, nil } + +func (db *SQLDB) MigrateIntervalPercent() float32 { + return db.migrateIntervalPercent +} + +func (db *SQLDB) MigrateChunkSize() int { + return db.migrateChunkSize +} diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go index 5ab0120b..72102588 100644 --- a/datastore/sql_variations.go +++ b/datastore/sql_variations.go @@ -81,3 +81,7 @@ func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHa rolloutPercent, exists := sqlVariations.sqlMigrateRollouts[dataType] return exists && variationHashDecimal <= rolloutPercent } + +func (sqlDB *SQLDB) Variations() *SQLVariations { + return sqlDB.variations +} diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go index 414da6ce..f2e4b986 100644 --- a/datastore/sync_entity_dynamo.go +++ b/datastore/sync_entity_dynamo.go @@ -603,13 +603,17 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo // To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a // list of (ClientID, ID) primary keys with the given condition, then read the // actual sync item using the list of primary keys. -func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int, maxMtime *int64) (bool, []SyncEntity, error) { +func (dynamo *Dynamo) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (bool, []SyncEntity, error) { syncEntities := []SyncEntity{} // Get (ClientID, ID) pairs which are updates after mtime for a data type, // sorted by dataType#mTime. e.g. sorted by mtime since dataType is the same. - dataTypeMtimeLowerBound := strconv.Itoa(dataType) + "#" + strconv.FormatInt(clientToken+1, 10) - var dataTypeMtimeUpperBound string + var dataTypeMtimeUpperBound, dataTypeMtimeLowerBound string + if minMtime != nil { + dataTypeMtimeLowerBound = strconv.Itoa(dataType) + "#" + strconv.FormatInt(*minMtime+1, 10) + } else { + dataTypeMtimeLowerBound = strconv.Itoa(dataType) + "#0" + } if maxMtime != nil { dataTypeMtimeUpperBound = strconv.Itoa(dataType) + "#" + strconv.FormatInt(*maxMtime-1, 10) } else { @@ -642,6 +646,7 @@ func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFo ProjectionExpression: aws.String(projPk), TableName: aws.String(Table), Limit: aws.Int64(int64(maxSize)), + ScanIndexForward: &ascOrder, } out, err := dynamo.Query(input) @@ -703,7 +708,11 @@ func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFo filteredSyncEntities = append(filteredSyncEntities, syncEntity) } - sort.Sort(SyncEntityByMtime(filteredSyncEntities)) + var sortInterface sort.Interface = SyncEntityByMtime(filteredSyncEntities) + if !ascOrder { + sortInterface = sort.Reverse(sortInterface) + } + sort.Sort(sortInterface) return hasChangesRemaining, filteredSyncEntities, nil } @@ -728,6 +737,21 @@ func (dynamo *Dynamo) DeleteEntity(entity *SyncEntity) (oldEntity *SyncEntity, e return nil, fmt.Errorf("failed to delete item: %w", err) } + if entity.ClientDefinedUniqueTag != nil && len(*entity.ClientDefinedUniqueTag) > 0 { + key, err = dynamodbattribute.MarshalMap(NewServerClientUniqueTagItemQuery(entity.ClientID, *entity.ClientDefinedUniqueTag, false)) + if err != nil { + return nil, fmt.Errorf("error marshalling client tag key for deletion: %w", err) + } + input = &dynamodb.DeleteItemInput{ + TableName: aws.String(Table), + Key: key, + } + _, err := dynamo.DeleteItem(input) + if err != nil { + return nil, fmt.Errorf("failed to delete client tag: %w", err) + } + } + if result.Attributes == nil { return nil, nil } @@ -738,3 +762,53 @@ func (dynamo *Dynamo) DeleteEntity(entity *SyncEntity) (oldEntity *SyncEntity, e return oldEntity, nil } + +func (dynamo *Dynamo) DeleteEntities(entities []*SyncEntity) error { + var writeRequests []*dynamodb.WriteRequest + + for _, entity := range entities { + key, err := dynamodbattribute.MarshalMap(ItemQuery{ + ClientID: entity.ClientID, + ID: entity.ID, + }) + if err != nil { + return fmt.Errorf("error marshalling key for deletion: %w", err) + } + writeRequests = append(writeRequests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: key, + }, + }) + if entity.ClientDefinedUniqueTag != nil && len(*entity.ClientDefinedUniqueTag) > 0 { + key, err := dynamodbattribute.MarshalMap(NewServerClientUniqueTagItemQuery(entity.ClientID, *entity.ClientDefinedUniqueTag, false)) + if err != nil { + return fmt.Errorf("error marshalling client tag key for deletion: %w", err) + } + writeRequests = append(writeRequests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: key, + }, + }) + } + } + + const batchSize = 25 + for i := 0; i < len(writeRequests); i += batchSize { + end := i + batchSize + if end > len(writeRequests) { + end = len(writeRequests) + } + input := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ + Table: writeRequests[i:end], + }, + } + + _, err := dynamo.BatchWriteItem(input) + if err != nil { + return fmt.Errorf("failed to delete entities: %w", err) + } + } + + return nil +} diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 030dbf90..daf90871 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -1,7 +1,6 @@ package datastore import ( - "database/sql" "encoding/hex" "fmt" "strings" @@ -17,12 +16,6 @@ var fieldsToInsert = []string{ "unique_position", } -type MigrationStatus struct { - ChainID int64 `db:"chain_id"` - DataType int `db:"data_type"` - EarliestMtime int64 `db:"earliest_mtime"` -} - func buildInsertQuery() string { var insertValues []string var setValues []string @@ -40,18 +33,18 @@ func buildInsertQuery() string { joinedSetValues + ` WHERE entities.deleted = true` } -func (sqlDB *SQLDB) InsertSyncEntity(tx *sqlx.Tx, entity *SyncEntity) (conflict bool, err error) { - res, err := tx.NamedExec(sqlDB.insertQuery, entity) +func (sqlDB *SQLDB) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (conflict bool, err error) { + res, err := tx.NamedExec(sqlDB.insertQuery, entities) if err != nil { - return false, fmt.Errorf("failed to insert entity: %w", err) + return false, fmt.Errorf("failed to insert entities: %w", err) } rowsAffected, err := res.RowsAffected() if err != nil { return false, fmt.Errorf("failed to get rows affected after insert: %w", err) } - // if rows affected is 0, then there must be a conflict. return true to indicate this condition. - return rowsAffected == 0, nil + // if rows affected is not len(entities), then there must be a conflict. return true to indicate this condition. + return int(rowsAffected) == len(entities), nil } func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (exists bool, err error) { @@ -62,39 +55,6 @@ func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (exist return exists, nil } -func (sqlDB *SQLDB) GetDynamoMigrationStatus(chainID int64, dataType int) (*MigrationStatus, error) { - var status MigrationStatus - err := sqlDB.Get(&status, ` - SELECT chain_id, data_type, earliest_mtime - FROM dynamo_migration_statuses - WHERE chain_id = $1 AND data_type = $2 - `, chainID, dataType) - - if err != nil { - if err == sql.ErrNoRows { - return nil, nil - } - return nil, fmt.Errorf("failed to get dynamo migration status: %w", err) - } - - return &status, nil -} - -func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []MigrationStatus) error { - _, err := tx.NamedExec(` - INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) - VALUES (:chain_id, :data_type, :earliest_mtime) - ON CONFLICT DO UPDATE - SET earliest_mtime = $3 - WHERE earliest_mtime IS NOT NULL AND earliest_mtime > :earliest_mtime - `, statuses) - if err != nil { - return fmt.Errorf("failed to update dynamo migration statuses: %w", err) - } - - return nil -} - func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, err error) { var idColumn string if *entity.DataType == HistoryTypeID { diff --git a/migrations/20240904202925_init.up.sql b/migrations/20240904202925_init.up.sql index 56c4db45..7cd09795 100644 --- a/migrations/20240904202925_init.up.sql +++ b/migrations/20240904202925_init.up.sql @@ -8,7 +8,8 @@ CREATE TABLE chains ( CREATE TABLE dynamo_migration_statuses ( chain_id BIGINT REFERENCES chains(id), data_type INTEGER, - earliest_mtime BIGINT NOT NULL, + -- null earliest_mtime indicates that all entities have been migrated + earliest_mtime BIGINT, PRIMARY KEY (chain_id, data_type) ); diff --git a/server/server.go b/server/server.go index 90150c75..f0b61693 100644 --- a/server/server.go +++ b/server/server.go @@ -84,7 +84,7 @@ func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, r.Mount("/v2", controller.SyncRouter( cache, - datastore.NewDatastoreWithPrometheus(dynamoDB, "dynamo"), *sqlDB)) + datastore.NewDynamoDatastoreWithPrometheus(dynamoDB, "dynamo"), datastore.NewSQLDatastoreWithPrometheus(sqlDB, "sql"))) r.Get("/metrics", batware.Metrics()) log.Info(). From 92f40002917e4572c6b8404dde4880db80eeef6f Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Thu, 12 Sep 2024 17:59:55 -0700 Subject: [PATCH 05/19] Misc SQL fixes, insert server defined entities in SQL if applicable --- command/command.go | 30 +++-- command/helpers.go | 141 ++++++++++++++++----- command/server_defined_unique_entity.go | 39 ++---- datastore/dynamo_migration_status.go | 9 +- datastore/instrumented_dynamo_datastore.go | 26 ++-- datastore/instrumented_sql_datastore.go | 4 +- datastore/interfaces.go | 12 +- datastore/sql_variations.go | 4 +- datastore/sync_entity_dynamo.go | 76 ++++------- datastore/sync_entity_sql.go | 4 +- 10 files changed, 198 insertions(+), 147 deletions(-) diff --git a/command/command.go b/command/command.go index 84d85a81..e0dc2292 100644 --- a/command/command.go +++ b/command/command.go @@ -48,8 +48,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // Reject the request if client has >= 50 devices in the chain. activeDevices := 0 for { - // TODO(djandries): Call the dbHelpers variant instead - hasChangesRemaining, syncEntities, err := dynamoDB.GetUpdatesForType(deviceInfoTypeID, nil, nil, false, clientID, maxGUBatchSize, true) + hasChangesRemaining, syncEntities, err := dbHelpers.getUpdatesFromDBs(deviceInfoTypeID, 0, false, maxGUBatchSize) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", deviceInfoTypeID) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -76,7 +75,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } // Insert initial records if needed. - err := InsertServerDefinedUniqueEntities(dynamoDB, clientID) + err := dbHelpers.InsertServerDefinedUniqueEntities() if err != nil { log.Error().Err(err).Msg("Create server defined unique entities failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -206,15 +205,17 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag migratedEntities, err := dbHelpers.maybeMigrateToSQL(dataTypes) if err != nil { - return nil, fmt.Errorf("failed to perform migration: %w") + return nil, fmt.Errorf("failed to perform migration: %w", err) } - if err = dbHelpers.Trx.Commit(); err != nil { - return nil, err + if len(migratedEntities) > 0 { + if err = dynamoDB.DeleteEntities(migratedEntities); err != nil { + log.Error().Err(err).Msgf("Failed to delete migrated items") + } } - if err = dynamoDB.DeleteEntities(migratedEntities); err != nil { - log.Error().Err(err).Msgf("Failed to delete migrated items") + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err } return &errCode, nil @@ -246,6 +247,8 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c idMap := make(map[string]string) // Map to save commit data type ID & mtime typeMtimeMap := make(map[int]int64) + + var migratedEntities []*datastore.SyncEntity for i, v := range commitMsg.Entries { entryRsp := &sync_pb.CommitResponse_EntryResponse{} commitRsp.Entryresponse[i] = entryRsp @@ -316,7 +319,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } } } else { // Update - conflict, err := dbHelpers.updateSyncEntity(entityToCommit, oldVersion) + conflict, migratedEntity, err := dbHelpers.updateSyncEntity(entityToCommit, oldVersion) if err != nil { log.Error().Err(err).Msg("Update sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -329,6 +332,9 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.ResponseType = &rspType continue } + if migratedEntity != nil { + migratedEntities = append(migratedEntities, migratedEntity) + } } if err != nil { log.Error().Err(err).Msg("Interim count update failed") @@ -357,6 +363,12 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c cache.SetTypeMtime(context.Background(), clientID, dataType, mtime) } + if len(migratedEntities) > 0 { + if err = dynamoDB.DeleteEntities(migratedEntities); err != nil { + log.Error().Err(err).Msgf("Failed to delete migrated items") + } + } + if err = dbHelpers.Trx.Commit(); err != nil { return nil, err } diff --git a/command/helpers.go b/command/helpers.go index 3f72563e..b58b37ed 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -91,7 +91,7 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo } if curMaxSize > 0 { - sqlHasChangesRemaining, sqlSyncEntities, err := h.sqlDB.GetUpdatesForType(dataType, token, fetchFolders, h.ChainID, curMaxSize) + sqlHasChangesRemaining, sqlSyncEntities, err := h.sqlDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) if err != nil { return false, nil, err } @@ -121,47 +121,60 @@ func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict boo return conflict, nil } -func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, err error) { +func getMigratedEntityID(entity *datastore.SyncEntity) (string, error) { + id := entity.ID + if *entity.DataType == datastore.HistoryTypeID { + newID, err := uuid.NewV7() + if err != nil { + return "", err + } + id = newID.String() + } + return id, nil +} + +func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, migratedEntity *datastore.SyncEntity, err error) { if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { conflict, err := h.sqlDB.UpdateSyncEntity(h.Trx, entity, oldVersion) if err != nil { - return false, err + return false, nil, err } if conflict { - oldEntity, err := h.dynamoDB.DeleteEntity(entity) + oldEntity, err := h.dynamoDB.GetEntity(datastore.ItemQuery{ + ID: entity.ID, + ClientID: entity.ClientID, + }) if err != nil { - return false, err + return false, nil, err } if oldEntity == nil { - return true, nil + return true, nil, nil } if oldEntity.Deleted == nil || !*oldEntity.Deleted { if err = h.ItemCounts.recordChange(*entity.DataType, true, false); err != nil { - return false, err + return false, nil, err } } - if *entity.DataType == datastore.HistoryTypeID { - newID, err := uuid.NewV7() - if err != nil { - return false, err - } - entity.ID = newID.String() + migratedEntityId, err := getMigratedEntityID(entity) + if err != nil { + return false, nil, err } + entity.ID = migratedEntityId conflict, err = h.sqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) if err != nil { - return false, err + return false, nil, err } if !conflict && (entity.Deleted == nil || !*entity.Deleted) { if err = h.ItemCounts.recordChange(*entity.DataType, false, true); err != nil { - return false, err + return false, nil, err } } - return conflict, err + return conflict, oldEntity, err } - return conflict, err + return conflict, nil, err } - conflict, _, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) - return conflict, err + conflict, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) + return conflict, nil, err } func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*datastore.SyncEntity, err error) { @@ -175,6 +188,10 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data } applicableDataTypes = append(applicableDataTypes, dataType) } + if len(applicableDataTypes) == 0 { + return nil, nil + } + migrationStatuses, err := h.sqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) if err != nil { return nil, err @@ -183,7 +200,7 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data currLimit := h.sqlDB.MigrateChunkSize() var updatedMigrationStatuses []*datastore.MigrationStatus - for _, dataType := range dataTypes { + for _, dataType := range applicableDataTypes { if currLimit <= 0 { break } @@ -195,6 +212,12 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data var earliestMtime *int64 if migrationStatus != nil { earliestMtime = migrationStatus.EarliestMtime + } else { + migrationStatus = &datastore.MigrationStatus{ + ChainID: h.ChainID, + DataType: dataType, + EarliestMtime: nil, + } } hasChangesRemaining, syncEntities, err := h.dynamoDB.GetUpdatesForType(dataType, nil, earliestMtime, true, h.clientID, currLimit, false) @@ -204,27 +227,87 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data currLimit -= len(syncEntities) - lastItem := &syncEntities[len(syncEntities)-1] - if !hasChangesRemaining { migrationStatus.EarliestMtime = nil - } else if lastItem.Mtime != nil { - migrationStatus.EarliestMtime = lastItem.Mtime + } else if len(syncEntities) > 0 { + if lastItem := &syncEntities[len(syncEntities)-1]; lastItem.Mtime != nil { + migrationStatus.EarliestMtime = lastItem.Mtime + } } updatedMigrationStatuses = append(updatedMigrationStatuses, migrationStatus) var syncEntitiesPtr []*datastore.SyncEntity for _, syncEntity := range syncEntities { - syncEntitiesPtr = append(syncEntitiesPtr, &syncEntity) + syncEntity.ChainID = &h.ChainID + newEntity := &syncEntity + migratedEntityID, err := getMigratedEntityID(&syncEntity) + if err != nil { + return nil, err + } + if migratedEntityID != syncEntity.ID { + entityClone := syncEntity + entityClone.ID = migratedEntityID + newEntity = &entityClone + } + syncEntitiesPtr = append(syncEntitiesPtr, newEntity) migratedEntities = append(migratedEntities, &syncEntity) } - if _, err = h.sqlDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { - return nil, err + if len(syncEntitiesPtr) > 0 { + if _, err = h.sqlDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { + return nil, err + } } } - if err = h.sqlDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { - return nil, err + if len(updatedMigrationStatuses) > 0 { + if err = h.sqlDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { + return nil, err + } } return migratedEntities, nil } + +// InsertServerDefinedUniqueEntities inserts the server defined unique tag +// entities if it is not in the DB yet for a specific client. +func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { + // Check if they're existed already for this client. + // If yes, just return directly. + ready, err := h.dynamoDB.HasServerDefinedUniqueTag(h.clientID, nigoriTag) + if err != nil { + return fmt.Errorf("error checking if entity with a server tag existed: %w", err) + } + if ready { + return nil + } + + entities, err := CreateServerDefinedUniqueEntities(h.clientID, h.ChainID) + if err != nil { + return err + } + + var dynamoEntities []*datastore.SyncEntity + var sqlEntities []*datastore.SyncEntity + for _, entity := range entities { + if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + sqlEntities = append(sqlEntities, entity) + } else { + dynamoEntities = append(dynamoEntities, entity) + } + } + + if len(dynamoEntities) > 0 { + err = h.dynamoDB.InsertSyncEntitiesWithServerTags(dynamoEntities) + if err != nil { + return fmt.Errorf("error inserting entities with server tags to DynamoDB: %w", err) + } + } + + if len(sqlEntities) > 0 { + _, err = h.sqlDB.InsertSyncEntities(h.Trx, sqlEntities) + if err != nil { + return fmt.Errorf("error inserting entities with server tags to SQL: %w", err) + } + } + + return nil +} diff --git a/command/server_defined_unique_entity.go b/command/server_defined_unique_entity.go index 81bd7337..e91c98b3 100644 --- a/command/server_defined_unique_entity.go +++ b/command/server_defined_unique_entity.go @@ -24,7 +24,7 @@ const ( bookmarkBarTag string = "bookmark_bar" ) -func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clientID string, parentID string, specifics *sync_pb.EntitySpecifics) (*datastore.SyncEntity, error) { +func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clientID string, chainID int64, parentID string, specifics *sync_pb.EntitySpecifics) (*datastore.SyncEntity, error) { now := utils.UnixMilli(time.Now()) deleted := false folder := true @@ -41,30 +41,17 @@ func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clien Version: &version, ParentIdString: &parentID, IdString: &idString, Specifics: specifics} - return datastore.CreateDBSyncEntity(pbEntity, nil, clientID, 0) + return datastore.CreateDBSyncEntity(pbEntity, nil, clientID, chainID) } -// InsertServerDefinedUniqueEntities inserts the server defined unique tag -// entities if it is not in the DB yet for a specific client. -func InsertServerDefinedUniqueEntities(db datastore.DynamoDatastore, clientID string) error { - var entities []*datastore.SyncEntity - // Check if they're existed already for this client. - // If yes, just return directly. - ready, err := db.HasServerDefinedUniqueTag(clientID, nigoriTag) - if err != nil { - return fmt.Errorf("error checking if entity with a server tag existed: %w", err) - } - if ready { - return nil - } - +func CreateServerDefinedUniqueEntities(clientID string, chainID int64) (entities []*datastore.SyncEntity, err error) { // Create nigori top-level folder nigoriSpecific := &sync_pb.NigoriSpecifics{} nigoriEntitySpecific := &sync_pb.EntitySpecifics_Nigori{Nigori: nigoriSpecific} specifics := &sync_pb.EntitySpecifics{SpecificsVariant: nigoriEntitySpecific} - entity, err := createServerDefinedUniqueEntity(nigoriName, nigoriTag, clientID, "0", specifics) + entity, err := createServerDefinedUniqueEntity(nigoriName, nigoriTag, clientID, chainID, "0", specifics) if err != nil { - return fmt.Errorf("error creating entity with a server tag: %w", err) + return nil, fmt.Errorf("error creating entity with a server tag: %w", err) } entities = append(entities, entity) @@ -72,9 +59,9 @@ func InsertServerDefinedUniqueEntities(db datastore.DynamoDatastore, clientID st bookmarkSpecific := &sync_pb.BookmarkSpecifics{} bookmarkEntitySpecific := &sync_pb.EntitySpecifics_Bookmark{Bookmark: bookmarkSpecific} specifics = &sync_pb.EntitySpecifics{SpecificsVariant: bookmarkEntitySpecific} - entity, err = createServerDefinedUniqueEntity(bookmarksName, bookmarksTag, clientID, "0", specifics) + entity, err = createServerDefinedUniqueEntity(bookmarksName, bookmarksTag, clientID, chainID, "0", specifics) if err != nil { - return fmt.Errorf("error creating entity with a server tag: %w", err) + return nil, fmt.Errorf("error creating entity with a server tag: %w", err) } entities = append(entities, entity) @@ -86,17 +73,11 @@ func InsertServerDefinedUniqueEntities(db datastore.DynamoDatastore, clientID st bookmarkBarName: bookmarkBarTag} for name, tag := range bookmarkSecondLevelFolders { entity, err := createServerDefinedUniqueEntity( - name, tag, clientID, bookmarkRootID, specifics) + name, tag, clientID, chainID, bookmarkRootID, specifics) if err != nil { - return fmt.Errorf("error creating entity with a server tag: %w", err) + return nil, fmt.Errorf("error creating entity with a server tag: %w", err) } entities = append(entities, entity) } - - // Start a transaction to insert all server defined unique entities - err = db.InsertSyncEntitiesWithServerTags(entities) - if err != nil { - return fmt.Errorf("error inserting entities with server tags: %w", err) - } - return nil + return entities, nil } diff --git a/datastore/dynamo_migration_status.go b/datastore/dynamo_migration_status.go index 93a8b9e4..aa09e26c 100644 --- a/datastore/dynamo_migration_status.go +++ b/datastore/dynamo_migration_status.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jmoiron/sqlx" + "github.com/lib/pq" ) type MigrationStatus struct { @@ -19,8 +20,8 @@ func (sqlDB *SQLDB) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataT err = tx.Select(&statuses, ` SELECT chain_id, data_type, earliest_mtime FROM dynamo_migration_statuses - WHERE chain_id = $1 AND data_type IN $2 - `, chainID, dataTypes) + WHERE chain_id = $1 AND data_type = ANY($2) + `, chainID, pq.Array(dataTypes)) if err != nil { return nil, fmt.Errorf("failed to get dynamo migration status: %w", err) @@ -37,9 +38,9 @@ func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*Migra _, err := tx.NamedExec(` INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) VALUES (:chain_id, :data_type, :earliest_mtime) - ON CONFLICT DO UPDATE + ON CONFLICT (chain_id, data_type) DO UPDATE SET earliest_mtime = $3 - WHERE earliest_mtime IS NOT NULL AND earliest_mtime > :earliest_mtime + WHERE dynamo_migration_statuses.earliest_mtime IS NOT NULL AND (dynamo_migration_statuses.earliest_mtime > EXCLUDED.earliest_mtime OR EXCLUDED.earliest_mtime IS NULL) `, statuses) if err != nil { return fmt.Errorf("failed to update dynamo migration statuses: %w", err) diff --git a/datastore/instrumented_dynamo_datastore.go b/datastore/instrumented_dynamo_datastore.go index fb66485b..7fd352e4 100644 --- a/datastore/instrumented_dynamo_datastore.go +++ b/datastore/instrumented_dynamo_datastore.go @@ -65,8 +65,8 @@ func (_d DynamoDatastoreWithPrometheus) DeleteEntities(entities []*SyncEntity) ( return _d.base.DeleteEntities(entities) } -// DeleteEntity implements DynamoDatastore -func (_d DynamoDatastoreWithPrometheus) DeleteEntity(entity *SyncEntity) (sp1 *SyncEntity, err error) { +// DisableSyncChain implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) { _since := time.Now() defer func() { result := "ok" @@ -74,13 +74,13 @@ func (_d DynamoDatastoreWithPrometheus) DeleteEntity(entity *SyncEntity) (sp1 *S result = "error" } - dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteEntity", result).Observe(time.Since(_since).Seconds()) + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DisableSyncChain", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.DeleteEntity(entity) + return _d.base.DisableSyncChain(clientID) } -// DisableSyncChain implements DynamoDatastore -func (_d DynamoDatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) { +// GetClientItemCount implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetClientItemCount(clientID string) (dp1 *DynamoItemCounts, err error) { _since := time.Now() defer func() { result := "ok" @@ -88,13 +88,13 @@ func (_d DynamoDatastoreWithPrometheus) DisableSyncChain(clientID string) (err e result = "error" } - dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DisableSyncChain", result).Observe(time.Since(_since).Seconds()) + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetClientItemCount", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.DisableSyncChain(clientID) + return _d.base.GetClientItemCount(clientID) } -// GetClientItemCount implements DynamoDatastore -func (_d DynamoDatastoreWithPrometheus) GetClientItemCount(clientID string) (dp1 *DynamoItemCounts, err error) { +// GetEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetEntity(query ItemQuery) (sp1 *SyncEntity, err error) { _since := time.Now() defer func() { result := "ok" @@ -102,9 +102,9 @@ func (_d DynamoDatastoreWithPrometheus) GetClientItemCount(clientID string) (dp1 result = "error" } - dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetClientItemCount", result).Observe(time.Since(_since).Seconds()) + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetEntity", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.GetClientItemCount(clientID) + return _d.base.GetEntity(query) } // GetUpdatesForType implements DynamoDatastore @@ -206,7 +206,7 @@ func (_d DynamoDatastoreWithPrometheus) UpdateClientItemCount(counts *DynamoItem } // UpdateSyncEntity implements DynamoDatastore -func (_d DynamoDatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) { +func (_d DynamoDatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, err error) { _since := time.Now() defer func() { result := "ok" diff --git a/datastore/instrumented_sql_datastore.go b/datastore/instrumented_sql_datastore.go index b9650792..e80b9d55 100644 --- a/datastore/instrumented_sql_datastore.go +++ b/datastore/instrumented_sql_datastore.go @@ -95,7 +95,7 @@ func (_d SQLDatastoreWithPrometheus) GetItemCounts(tx *sqlx.Tx, chainID int64) ( } // GetUpdatesForType implements SQLDatastore -func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (b1 bool, sa1 []SyncEntity, err error) { +func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (b1 bool, sa1 []SyncEntity, err error) { _since := time.Now() defer func() { result := "ok" @@ -105,7 +105,7 @@ func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.GetUpdatesForType(dataType, clientToken, fetchFolders, chainID, maxSize) + return _d.base.GetUpdatesForType(tx, dataType, clientToken, fetchFolders, chainID, maxSize) } // HasItem implements SQLDatastore diff --git a/datastore/interfaces.go b/datastore/interfaces.go index a3f2fe21..5a0c1383 100644 --- a/datastore/interfaces.go +++ b/datastore/interfaces.go @@ -9,7 +9,7 @@ type DynamoDatastore interface { // Insert a series of sync entities in a write transaction. InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error // Update an existing sync entity. - UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) + UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, err error) // Get updates for a specific type which are modified after the time of // client token for a given client. Besides the array of sync entities, a // boolean value indicating whether there are more updates to query in the @@ -27,11 +27,11 @@ type DynamoDatastore interface { DisableSyncChain(clientID string) error // IsSyncChainDisabled checks whether a given sync chain is deleted IsSyncChainDisabled(clientID string) (bool, error) - // Checks if sync item exists for a client + // HasItem checks if sync item exists for a client HasItem(clientID string, ID string) (bool, error) - // Deletes an existing item - DeleteEntity(entity *SyncEntity) (*SyncEntity, error) - // Deletes multiple existing items + // GetEntity gets an existing entity + GetEntity(query ItemQuery) (*SyncEntity, error) + // DeleteEntities deletes multiple existing items DeleteEntities(entities []*SyncEntity) error } @@ -46,7 +46,7 @@ type SQLDatastore interface { // GetAndLockChainID retrieves and locks a chain ID for a given client ID GetAndLockChainID(tx *sqlx.Tx, clientID string) (*int64, error) // GetUpdatesForType retrieves updates for a specific data type - GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (bool, []SyncEntity, error) + GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (bool, []SyncEntity, error) // GetDynamoMigrationStatuses retrieves migration statuses for specified data types GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (map[int]*MigrationStatus, error) // UpdateDynamoMigrationStatuses updates migration statuses in the database diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go index 72102588..f8218826 100644 --- a/datastore/sql_variations.go +++ b/datastore/sql_variations.go @@ -28,7 +28,7 @@ type SQLVariations struct { func parseRollouts(envKey string) (map[int]float32, error) { rollouts := make(map[int]float32) - envVal := os.Getenv(sqlSaveRolloutsEnvKey) + envVal := os.Getenv(envKey) if len(envVal) > 0 { pairs := strings.Split(envVal, ",") @@ -46,7 +46,7 @@ func parseRollouts(envKey string) (map[int]float32, error) { value, err := strconv.ParseFloat(strings.TrimSpace(parts[1]), 32) if err != nil { - return nil, fmt.Errorf("Invalid float in %s: %s", sqlSaveRolloutsEnvKey, parts[1]) + return nil, fmt.Errorf("Invalid float in %s: %s", envKey, parts[1]) } rollouts[key] = float32(value) diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go index f2e4b986..f3cd9c10 100644 --- a/datastore/sync_entity_dynamo.go +++ b/datastore/sync_entity_dynamo.go @@ -460,7 +460,7 @@ func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { } // UpdateSyncEntity updates a sync item in dynamoDB. -func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, bool, error) { +func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, error) { id := entity.ID if *entity.DataType == HistoryTypeID { id = *entity.ClientDefinedUniqueTag @@ -468,7 +468,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: id} key, err := dynamodbattribute.MarshalMap(primaryKey) if err != nil { - return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + return false, fmt.Errorf("error marshalling key to update sync entity: %w", err) } // condition to ensure the request is update only... @@ -505,7 +505,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo expr, err := expression.NewBuilder().WithCondition(cond).WithUpdate(update).Build() if err != nil { - return false, false, fmt.Errorf("error building expression to update sync entity: %w", err) + return false, fmt.Errorf("error building expression to update sync entity: %w", err) } // Soft-delete a sync item with a client tag, use a transaction to delete its @@ -515,7 +515,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo ClientID: entity.ClientID, ID: clientTagItemPrefix + *entity.ClientDefinedUniqueTag} tagItemKey, err := dynamodbattribute.MarshalMap(pk) if err != nil { - return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + return false, fmt.Errorf("error marshalling key to update sync entity: %w", err) } items := []*dynamodb.TransactWriteItem{} @@ -546,16 +546,16 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { for _, reason := range canceledException.CancellationReasons { if reason.Code != nil && *reason.Code == conditionalCheckFailed { - return true, false, nil + return true, nil } } } - return false, false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) + return false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) } // Successfully soft-delete the sync item and delete the tag item. - return false, true, nil + return false, nil } // Not deleting a sync item with a client tag, do a normal update on sync @@ -575,27 +575,19 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo if aerr, ok := err.(awserr.Error); ok { // Return conflict if the write condition fails. if aerr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return true, false, nil + return true, nil } } - return false, false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) + return false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) } // Unmarshal out.Attributes oldEntity := &SyncEntity{} err = dynamodbattribute.UnmarshalMap(out.Attributes, oldEntity) if err != nil { - return false, false, fmt.Errorf("error unmarshalling old sync entity: %w", err) - } - var deleted bool - if entity.Deleted == nil { // No updates on Deleted this time. - deleted = false - } else if oldEntity.Deleted == nil { // Consider it as Deleted = false. - deleted = *entity.Deleted - } else { - deleted = !*oldEntity.Deleted && *entity.Deleted + return false, fmt.Errorf("error unmarshalling old sync entity: %w", err) } - return false, deleted, nil + return false, nil } // GetUpdatesForType returns sync entities of a data type where it's mtime is @@ -716,51 +708,33 @@ func (dynamo *Dynamo) GetUpdatesForType(dataType int, minMtime *int64, maxMtime return hasChangesRemaining, filteredSyncEntities, nil } -func (dynamo *Dynamo) DeleteEntity(entity *SyncEntity) (oldEntity *SyncEntity, err error) { - key, err := dynamodbattribute.MarshalMap(ItemQuery{ - ClientID: entity.ClientID, - ID: entity.ID, - }) +func (dynamo *Dynamo) GetEntity(query ItemQuery) (*SyncEntity, error) { + key, err := dynamodbattribute.MarshalMap(query) if err != nil { - return nil, fmt.Errorf("error marshalling key to get item for deletion: %w", err) + return nil, fmt.Errorf("error marshalling key for GetEntity: %w", err) } - returnValues := dynamodb.ReturnValueAllOld - input := &dynamodb.DeleteItemInput{ - TableName: aws.String(Table), - Key: key, - ReturnValues: &returnValues, + input := &dynamodb.GetItemInput{ + TableName: aws.String(Table), + Key: key, } - result, err := dynamo.DeleteItem(input) + result, err := dynamo.GetItem(input) if err != nil { - return nil, fmt.Errorf("failed to delete item: %w", err) + return nil, fmt.Errorf("error getting item: %w", err) } - if entity.ClientDefinedUniqueTag != nil && len(*entity.ClientDefinedUniqueTag) > 0 { - key, err = dynamodbattribute.MarshalMap(NewServerClientUniqueTagItemQuery(entity.ClientID, *entity.ClientDefinedUniqueTag, false)) - if err != nil { - return nil, fmt.Errorf("error marshalling client tag key for deletion: %w", err) - } - input = &dynamodb.DeleteItemInput{ - TableName: aws.String(Table), - Key: key, - } - _, err := dynamo.DeleteItem(input) - if err != nil { - return nil, fmt.Errorf("failed to delete client tag: %w", err) - } - } - - if result.Attributes == nil { + if result.Item == nil { return nil, nil } - if err = dynamodbattribute.UnmarshalMap(result.Attributes, &oldEntity); err != nil { - return nil, fmt.Errorf("failed to get old entity after deleting: %w", err) + var entity SyncEntity + err = dynamodbattribute.UnmarshalMap(result.Item, &entity) + if err != nil { + return nil, fmt.Errorf("error unmarshalling item: %w", err) } - return oldEntity, nil + return &entity, nil } func (dynamo *Dynamo) DeleteEntities(entities []*SyncEntity) error { diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index daf90871..89d666fb 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -144,7 +144,7 @@ func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *in return &id, nil } -func (sqlDB *SQLDB) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { +func (sqlDB *SQLDB) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { var additionalCondition string if !fetchFolders { additionalCondition = "AND folder = false " @@ -152,7 +152,7 @@ func (sqlDB *SQLDB) GetUpdatesForType(dataType int, clientToken int64, fetchFold query := `SELECT * FROM entities WHERE chain_id = $1 AND data_type = $2 AND mtime > $3 ` + additionalCondition + `ORDER BY mtime LIMIT $4` - if err := sqlDB.Select(&entities, query, chainID, dataType, clientToken, maxSize); err != nil { + if err := tx.Select(&entities, query, chainID, dataType, clientToken, maxSize); err != nil { return false, nil, fmt.Errorf("failed to get entity updates: %w", err) } return len(entities) == maxSize, entities, nil From c247b64292514d8c809392d797d1ddba893fea5b Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Tue, 17 Sep 2024 16:00:15 -0700 Subject: [PATCH 06/19] Fix SQL history update issue, add clear server data support for SQL --- command/command.go | 24 ++++++++++++--- command/helpers.go | 40 ++++++++++++------------- datastore/instrumented_sql_datastore.go | 14 +++++++++ datastore/interfaces.go | 2 ++ datastore/sync_entity_dynamo.go | 2 +- datastore/sync_entity_sql.go | 16 +++++++--- migrations/20240904202925_init.up.sql | 9 ++---- 7 files changed, 72 insertions(+), 35 deletions(-) diff --git a/command/command.go b/command/command.go index e0dc2292..a00c9dd1 100644 --- a/command/command.go +++ b/command/command.go @@ -378,18 +378,24 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c // handleClearServerDataRequest handles clearing user data from the datastore and cache // and fills the response -func handleClearServerDataRequest(cache *cache.Cache, db datastore.DynamoDatastore, _ *sync_pb.ClearServerDataMessage, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleClearServerDataRequest(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, _ *sync_pb.ClearServerDataMessage, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS var err error - err = db.DisableSyncChain(clientID) + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, nil, false) + if err != nil { + return nil, err + } + defer dbHelpers.Trx.Rollback() + + err = dynamoDB.DisableSyncChain(clientID) if err != nil { log.Error().Err(err).Msg("Failed to disable sync chain") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, err } - syncEntities, err := db.ClearServerData(clientID) + syncEntities, err := dynamoDB.ClearServerData(clientID) if err != nil { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, err @@ -411,6 +417,16 @@ func handleClearServerDataRequest(cache *cache.Cache, db datastore.DynamoDatasto } } + if err = dbHelpers.SqlDB.DeleteChain(dbHelpers.Trx, dbHelpers.ChainID); err != nil { + log.Error().Err(err).Msg("Failed to disable sync chain") + errCode = sync_pb.SyncEnums_TRANSIENT_ERROR + return &errCode, err + } + + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err + } + return &errCode, nil } @@ -458,7 +474,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_CLEAR_SERVER_DATA { csdRsp := &sync_pb.ClearServerDataResponse{} pbRsp.ClearServerData = csdRsp - pbRsp.ErrorCode, err = handleClearServerDataRequest(cache, dynamoDB, pb.ClearServerData, clientID) + pbRsp.ErrorCode, err = handleClearServerDataRequest(cache, dynamoDB, sqlDB, pb.ClearServerData, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) diff --git a/command/helpers.go b/command/helpers.go index b58b37ed..41b4e81f 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -12,7 +12,7 @@ import ( type DBHelpers struct { dynamoDB datastore.DynamoDatastore - sqlDB datastore.SQLDatastore + SqlDB datastore.SQLDatastore Trx *sqlx.Tx clientID string ChainID int64 @@ -44,7 +44,7 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatasto return &DBHelpers{ dynamoDB: dynamoDB, - sqlDB: sqlDB, + SqlDB: sqlDB, Trx: trx, clientID: clientID, ChainID: *chainID, @@ -55,8 +55,8 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatasto func (h *DBHelpers) hasItemInEitherDB(entity *datastore.SyncEntity) (exists bool, err error) { // Check if item exists using client_unique_tag - if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { - exists, err := h.sqlDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) + if h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + exists, err := h.SqlDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) if err != nil { return false, err } @@ -72,8 +72,8 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo if curMaxSize == 0 { return false, nil, nil } - if h.sqlDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { - dynamoMigrationStatuses, err := h.sqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) + if h.SqlDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { + dynamoMigrationStatuses, err := h.SqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) if err != nil { return false, nil, err } @@ -91,7 +91,7 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo } if curMaxSize > 0 { - sqlHasChangesRemaining, sqlSyncEntities, err := h.sqlDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) + sqlHasChangesRemaining, sqlSyncEntities, err := h.SqlDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) if err != nil { return false, nil, err } @@ -107,9 +107,9 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo } func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict bool, err error) { - savedInSQL := h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + savedInSQL := h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) if savedInSQL { - conflict, err = h.sqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) + conflict, err = h.SqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) } else { conflict, err = h.dynamoDB.InsertSyncEntity(entity) } @@ -134,8 +134,8 @@ func getMigratedEntityID(entity *datastore.SyncEntity) (string, error) { } func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, migratedEntity *datastore.SyncEntity, err error) { - if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { - conflict, err := h.sqlDB.UpdateSyncEntity(h.Trx, entity, oldVersion) + if h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + conflict, err := h.SqlDB.UpdateSyncEntity(h.Trx, entity, oldVersion) if err != nil { return false, nil, err } @@ -160,7 +160,7 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in return false, nil, err } entity.ID = migratedEntityId - conflict, err = h.sqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) + conflict, err = h.SqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) if err != nil { return false, nil, err } @@ -178,12 +178,12 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in } func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*datastore.SyncEntity, err error) { - if rand.Float32() > h.sqlDB.MigrateIntervalPercent() { + if rand.Float32() > h.SqlDB.MigrateIntervalPercent() { return nil, nil } var applicableDataTypes []int for _, dataType := range dataTypes { - if !h.sqlDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { + if !h.SqlDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { continue } applicableDataTypes = append(applicableDataTypes, dataType) @@ -192,12 +192,12 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data return nil, nil } - migrationStatuses, err := h.sqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) + migrationStatuses, err := h.SqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) if err != nil { return nil, err } - currLimit := h.sqlDB.MigrateChunkSize() + currLimit := h.SqlDB.MigrateChunkSize() var updatedMigrationStatuses []*datastore.MigrationStatus for _, dataType := range applicableDataTypes { @@ -254,13 +254,13 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data } if len(syncEntitiesPtr) > 0 { - if _, err = h.sqlDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { + if _, err = h.SqlDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { return nil, err } } } if len(updatedMigrationStatuses) > 0 { - if err = h.sqlDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { + if err = h.SqlDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { return nil, err } } @@ -288,7 +288,7 @@ func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { var dynamoEntities []*datastore.SyncEntity var sqlEntities []*datastore.SyncEntity for _, entity := range entities { - if h.sqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + if h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { sqlEntities = append(sqlEntities, entity) } else { dynamoEntities = append(dynamoEntities, entity) @@ -303,7 +303,7 @@ func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { } if len(sqlEntities) > 0 { - _, err = h.sqlDB.InsertSyncEntities(h.Trx, sqlEntities) + _, err = h.SqlDB.InsertSyncEntities(h.Trx, sqlEntities) if err != nil { return fmt.Errorf("error inserting entities with server tags to SQL: %w", err) } diff --git a/datastore/instrumented_sql_datastore.go b/datastore/instrumented_sql_datastore.go index e80b9d55..3c3482a6 100644 --- a/datastore/instrumented_sql_datastore.go +++ b/datastore/instrumented_sql_datastore.go @@ -52,6 +52,20 @@ func (_d SQLDatastoreWithPrometheus) Beginx() (tp1 *sqlx.Tx, err error) { return _d.base.Beginx() } +// DeleteChain implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) DeleteChain(tx *sqlx.Tx, chainID int64) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteChain", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DeleteChain(tx, chainID) +} + // GetAndLockChainID implements SQLDatastore func (_d SQLDatastoreWithPrometheus) GetAndLockChainID(tx *sqlx.Tx, clientID string) (ip1 *int64, err error) { _since := time.Now() diff --git a/datastore/interfaces.go b/datastore/interfaces.go index 5a0c1383..68fe2c57 100644 --- a/datastore/interfaces.go +++ b/datastore/interfaces.go @@ -61,4 +61,6 @@ type SQLDatastore interface { MigrateIntervalPercent() float32 // MigrateChunkSize returns the max entity count for each migration chunk MigrateChunkSize() int + // DeleteChain removes a chain and its associated data from the database + DeleteChain(tx *sqlx.Tx, chainID int64) error } diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go index f3cd9c10..f13f081f 100644 --- a/datastore/sync_entity_dynamo.go +++ b/datastore/sync_entity_dynamo.go @@ -381,7 +381,7 @@ func (dynamo *Dynamo) ClearServerData(clientID string) ([]SyncEntity, error) { } // Fail delete if race condition detected (modified time has changed). - if item.Version != nil { + if item.Version != nil && item.Mtime != nil { cond := expression.Name("Mtime").Equal(expression.Value(*item.Mtime)) expr, err := expression.NewBuilder().WithCondition(cond).Build() if err != nil { diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 89d666fb..330600e9 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -56,13 +56,13 @@ func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (exist } func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, err error) { - var idColumn string + var idCondition string if *entity.DataType == HistoryTypeID { - idColumn = "client_defined_unique_tag" + idCondition = "client_defined_unique_tag = :client_defined_unique_tag" } else { - idColumn = "id" + idCondition = "id = :id" } - whereClause := " WHERE " + idColumn + " = :id AND chain_id = :chain_id AND deleted = false" + whereClause := " WHERE " + idCondition + " AND chain_id = :chain_id AND deleted = false" if *entity.DataType != HistoryTypeID { entity.OldVersion = &oldVersion whereClause += " AND version = :old_version" @@ -157,3 +157,11 @@ func (sqlDB *SQLDB) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int } return len(entities) == maxSize, entities, nil } + +func (sqlDB *SQLDB) DeleteChain(tx *sqlx.Tx, chainID int64) error { + _, err := tx.Exec(`DELETE FROM chains WHERE id = $1`, chainID) + if err != nil { + return fmt.Errorf("failed to delete chain with cascade: %w", err) + } + return nil +} diff --git a/migrations/20240904202925_init.up.sql b/migrations/20240904202925_init.up.sql index 7cd09795..a06dd6e3 100644 --- a/migrations/20240904202925_init.up.sql +++ b/migrations/20240904202925_init.up.sql @@ -6,7 +6,7 @@ CREATE TABLE chains ( ); CREATE TABLE dynamo_migration_statuses ( - chain_id BIGINT REFERENCES chains(id), + chain_id BIGINT REFERENCES chains(id) ON DELETE CASCADE, data_type INTEGER, -- null earliest_mtime indicates that all entities have been migrated earliest_mtime BIGINT, @@ -15,7 +15,7 @@ CREATE TABLE dynamo_migration_statuses ( CREATE TABLE entities ( id UUID, - chain_id BIGINT NOT NULL REFERENCES chains(id), + chain_id BIGINT NOT NULL REFERENCES chains(id) ON DELETE CASCADE, data_type INTEGER NOT NULL, ctime BIGINT NOT NULL, mtime BIGINT NOT NULL, @@ -34,7 +34,4 @@ CREATE TABLE entities ( PRIMARY KEY (id, chain_id), UNIQUE (chain_id, client_defined_unique_tag) ); -CREATE INDEX entities_chain_id_idx ON entities (chain_id); -CREATE INDEX entities_data_type_mtime_idx ON entities (data_type, mtime); --- or maybe make a partial index for history entities and mtime, while keeping the chainid datattype and mtime index --- CREATE INDEX entities_chain_id_data_type_mtime_idx ON entities (chain_id, data_type, mtime); +CREATE INDEX entities_chain_id_data_type_mtime_idx ON entities (chain_id, data_type, mtime); From 10af175805a4e43aacf2d0c2b826cc1eea0ffd34 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Tue, 17 Sep 2024 17:57:21 -0700 Subject: [PATCH 07/19] Add rollout confirmation/waiting to reduce risk of conflicts --- cache/instrumented_redis.go | 14 +++++++ cache/redis.go | 76 ++++++++++++++----------------------- command/command.go | 7 +++- command/helpers.go | 8 +++- datastore/sql_variations.go | 11 +++++- server/rollout.go | 45 ++++++++++++++++++++++ server/server.go | 2 + 7 files changed, 112 insertions(+), 51 deletions(-) create mode 100644 server/rollout.go diff --git a/cache/instrumented_redis.go b/cache/instrumented_redis.go index 17418f82..37fd9338 100755 --- a/cache/instrumented_redis.go +++ b/cache/instrumented_redis.go @@ -107,3 +107,17 @@ func (_d RedisClientWithPrometheus) Set(ctx context.Context, key string, val str }() return _d.base.Set(ctx, key, val, ttl) } + +// SubscribeAndWait implements RedisClient +func (_d RedisClientWithPrometheus) SubscribeAndWait(ctx context.Context, channel string) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "SubscribeAndWait", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.SubscribeAndWait(ctx, channel) +} diff --git a/cache/redis.go b/cache/redis.go index 5aa9fbf2..a41f4896 100644 --- a/cache/redis.go +++ b/cache/redis.go @@ -2,6 +2,7 @@ package cache import ( "context" + "fmt" "os" "strconv" "strings" @@ -18,14 +19,11 @@ type RedisClient interface { Get(ctx context.Context, key string, delete bool) (string, error) Del(ctx context.Context, keys ...string) error FlushAll(ctx context.Context) error + SubscribeAndWait(ctx context.Context, channel string) error } -type redisSimpleClient struct { - client *redis.Client -} - -type redisClusterClient struct { - client *redis.ClusterClient +type redisClientImpl struct { + client redis.UniversalClient } // NewRedisClient create a client for standalone redis or redis cluster. @@ -50,24 +48,24 @@ func NewRedisClient() RedisClient { client := redis.NewClient(&redis.Options{ Addr: addrs[0], }) - r = &redisSimpleClient{client} + r = &redisClientImpl{client} } else { client := redis.NewClusterClient(&redis.ClusterOptions{ Addrs: addrs, PoolSize: poolSize, ReadOnly: true, }) - r = &redisClusterClient{client} + r = &redisClientImpl{client} } return r } -func (r *redisSimpleClient) Set(ctx context.Context, key string, val string, ttl time.Duration) error { +func (r *redisClientImpl) Set(ctx context.Context, key string, val string, ttl time.Duration) error { return r.client.Set(ctx, key, val, ttl).Err() } -func (r *redisSimpleClient) Incr(ctx context.Context, key string, subtract bool) (int, error) { +func (r *redisClientImpl) Incr(ctx context.Context, key string, subtract bool) (int, error) { var res *redis.IntCmd if subtract { res = r.client.Decr(ctx, key) @@ -78,7 +76,7 @@ func (r *redisSimpleClient) Incr(ctx context.Context, key string, subtract bool) return int(val), err } -func (r *redisSimpleClient) Get(ctx context.Context, key string, delete bool) (string, error) { +func (r *redisClientImpl) Get(ctx context.Context, key string, delete bool) (string, error) { var res *redis.StringCmd if delete { res = r.client.GetDel(ctx, key) @@ -92,47 +90,31 @@ func (r *redisSimpleClient) Get(ctx context.Context, key string, delete bool) (s return val, err } -func (r *redisSimpleClient) Del(ctx context.Context, keys ...string) error { +func (r *redisClientImpl) Del(ctx context.Context, keys ...string) error { return r.client.Del(ctx, keys...).Err() } -func (r *redisSimpleClient) FlushAll(ctx context.Context) error { +func (r *redisClientImpl) FlushAll(ctx context.Context) error { return r.client.FlushAll(ctx).Err() } -func (r *redisClusterClient) Set(ctx context.Context, key string, val string, ttl time.Duration) error { - return r.client.Set(ctx, key, val, ttl).Err() -} - -func (r *redisClusterClient) Incr(ctx context.Context, key string, subtract bool) (int, error) { - var res *redis.IntCmd - if subtract { - res = r.client.Decr(ctx, key) - } else { - res = r.client.Incr(ctx, key) +func (r *redisClientImpl) SubscribeAndWait(ctx context.Context, channel string) error { + pubsub := r.client.Subscribe(ctx, channel) + defer pubsub.Close() + + ch := pubsub.Channel() + + for { + select { + case msg, ok := <-ch: + if !ok { + return fmt.Errorf("redis channel unexpectedly closed") + } + if msg != nil { + return nil + } + case <-ctx.Done(): + return ctx.Err() + } } - val, err := res.Result() - return int(val), err -} - -func (r *redisClusterClient) Get(ctx context.Context, key string, delete bool) (string, error) { - var res *redis.StringCmd - if delete { - res = r.client.GetDel(ctx, key) - } else { - res = r.client.Get(ctx, key) - } - val, err := res.Result() - if err == redis.Nil { - return "", nil - } - return val, err -} - -func (r *redisClusterClient) Del(ctx context.Context, keys ...string) error { - return r.client.Del(ctx, keys...).Err() -} - -func (r *redisClusterClient) FlushAll(ctx context.Context) error { - return r.client.FlushAll(ctx).Err() } diff --git a/command/command.go b/command/command.go index a00c9dd1..a2597074 100644 --- a/command/command.go +++ b/command/command.go @@ -75,7 +75,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } // Insert initial records if needed. - err := dbHelpers.InsertServerDefinedUniqueEntities() + err := dbHelpers.insertServerDefinedUniqueEntities() if err != nil { log.Error().Err(err).Msg("Create server defined unique entities failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -235,6 +235,11 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c return &errCode, nil } + if !sqlDB.Variations().Ready { + errCode = sync_pb.SyncEnums_TRANSIENT_ERROR + return &errCode, fmt.Errorf("SQL rollout not ready") + } + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, cache, true) if err != nil { return nil, err diff --git a/command/helpers.go b/command/helpers.go index 41b4e81f..98af0375 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -178,6 +178,9 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in } func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*datastore.SyncEntity, err error) { + if !h.SqlDB.Variations().Ready { + return nil, nil + } if rand.Float32() > h.SqlDB.MigrateIntervalPercent() { return nil, nil } @@ -269,7 +272,10 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data // InsertServerDefinedUniqueEntities inserts the server defined unique tag // entities if it is not in the DB yet for a specific client. -func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { +func (h *DBHelpers) insertServerDefinedUniqueEntities() error { + if !h.SqlDB.Variations().Ready { + return fmt.Errorf("SQL rollout not ready") + } // Check if they're existed already for this client. // If yes, just return directly. ready, err := h.dynamoDB.HasServerDefinedUniqueTag(h.clientID, nigoriTag) diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go index f8218826..a87faffe 100644 --- a/datastore/sql_variations.go +++ b/datastore/sql_variations.go @@ -24,6 +24,7 @@ func VariationHashDecimal(input string) float32 { type SQLVariations struct { sqlSaveRollouts map[int]float32 sqlMigrateRollouts map[int]float32 + Ready bool } func parseRollouts(envKey string) (map[int]float32, error) { @@ -67,8 +68,9 @@ func LoadSQLVariations() (*SQLVariations, error) { } return &SQLVariations{ - sqlSaveRollouts, - sqlMigrateRollouts, + sqlSaveRollouts: sqlSaveRollouts, + sqlMigrateRollouts: sqlMigrateRollouts, + Ready: false, }, nil } @@ -82,6 +84,11 @@ func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHa return exists && variationHashDecimal <= rolloutPercent } +func (v *SQLVariations) GetStateDigest() string { + return sqlSaveRolloutsEnvKey + ":" + os.Getenv(sqlSaveRolloutsEnvKey) + ";" + + sqlMigrateRolloutsEnvKey + ":" + os.Getenv(sqlMigrateRolloutsEnvKey) +} + func (sqlDB *SQLDB) Variations() *SQLVariations { return sqlDB.variations } diff --git a/server/rollout.go b/server/rollout.go new file mode 100644 index 00000000..7606cc1c --- /dev/null +++ b/server/rollout.go @@ -0,0 +1,45 @@ +package server + +import ( + "context" + "os" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/datastore" + "github.com/rs/zerolog/log" +) + +const ( + lastRolloutStateCacheKey string = "last-rollout-state" + rolloutConfirmChannelKey string = "rollout-confirm" + sqlDisableRolloutConfirm string = "SQL_DISABLE_ROLLOUT_CONFIRM" +) + +func maybeWaitOnRolloutConfigChange(sqlVariations *datastore.SQLVariations, cache *cache.Cache) { + currentDigest := sqlVariations.GetStateDigest() + + lastDigest, err := cache.Get(context.Background(), lastRolloutStateCacheKey, false) + if err != nil { + log.Fatal().Msgf("failed to get last rollout state: %v", err) + return + } + + rolloutConfirmDisabled := os.Getenv(sqlDisableRolloutConfirm) != "" + if !rolloutConfirmDisabled && currentDigest != lastDigest { + log.Info().Msg("Rollout configuration detected. Commits/writes disabled until Redis confirmation event is received...") + err = cache.SubscribeAndWait(context.Background(), rolloutConfirmChannelKey) + if err != nil { + log.Fatal().Msgf("failed to subscribe and wait for rollout confirmation: %v", err) + return + } + + err = cache.Set(context.Background(), lastRolloutStateCacheKey, currentDigest, 0) + if err != nil { + log.Fatal().Msgf("failed to update last rollout state: %v", err) + return + } + log.Info().Msg("Confirmation event received") + } + + sqlVariations.Ready = true +} diff --git a/server/server.go b/server/server.go index f0b61693..2e32ce12 100644 --- a/server/server.go +++ b/server/server.go @@ -78,6 +78,8 @@ func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, redis := cache.NewRedisClient() cache := cache.NewCache(cache.NewRedisClientWithPrometheus(redis, "redis")) + go maybeWaitOnRolloutConfigChange(sqlDB.Variations(), cache) + // Provide datastore & cache via context ctx = context.WithValue(ctx, syncContext.ContextKeyDatastore, dynamoDB) ctx = context.WithValue(ctx, syncContext.ContextKeyCache, &cache) From 8c55bacb3a33b64fcad0787f9dcbd2c43bb9fda4 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Wed, 18 Sep 2024 14:21:05 -0700 Subject: [PATCH 08/19] Embed SQL migrations, fix existing tests --- Dockerfile | 1 - command/command.go | 2 +- command/command_test.go | 69 ++++++++-------- command/helpers.go | 15 +++- command/server_defined_unique_entity_test.go | 33 +++++--- controller/controller_test.go | 17 ++-- datastore/datastoretest/dynamo.go | 5 +- datastore/datastoretest/mock_datastore.go | 23 +++++- datastore/datastoretest/sql.go | 9 +++ datastore/dynamo.go | 21 ++++- datastore/item_count_dynamo_test.go | 4 +- .../migrations}/20240904202925_init.down.sql | 0 .../migrations}/20240904202925_init.up.sql | 0 datastore/sql.go | 58 ++++++++++---- datastore/sync_entity_sql.go | 2 +- datastore/sync_entity_test.go | 79 +++++++++---------- docker-compose.yml | 5 ++ middleware/middleware_test.go | 3 + misc/create_additional_dbs.sql | 2 + server/server.go | 8 +- server/server_test.go | 2 +- 21 files changed, 235 insertions(+), 123 deletions(-) create mode 100644 datastore/datastoretest/sql.go rename {migrations => datastore/migrations}/20240904202925_init.down.sql (100%) rename {migrations => datastore/migrations}/20240904202925_init.up.sql (100%) create mode 100644 misc/create_additional_dbs.sql diff --git a/Dockerfile b/Dockerfile index 368231a9..3010b51d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,6 @@ RUN CGO_ENABLED=0 GOOS=linux go build \ FROM alpine:3.20 as artifact RUN apk add --update ca-certificates # Certificates for SSL COPY --from=builder /src/main main -COPY ./migrations/ ./migrations EXPOSE 8295 diff --git a/command/command.go b/command/command.go index a2597074..dcaa705a 100644 --- a/command/command.go +++ b/command/command.go @@ -75,7 +75,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } // Insert initial records if needed. - err := dbHelpers.insertServerDefinedUniqueEntities() + err := dbHelpers.InsertServerDefinedUniqueEntities() if err != nil { log.Error().Err(err).Msg("Create server defined unique entities failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR diff --git a/command/command_test.go b/command/command_test.go index 4c9066e4..e14826d2 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -27,8 +27,9 @@ const ( type CommandTestSuite struct { suite.Suite - dynamo *datastore.Dynamo - cache *cache.Cache + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB } type PBSyncAttrs struct { @@ -60,20 +61,24 @@ func NewPBSyncAttrs(name *string, version *int64, deleted *bool, folder *bool, s func (suite *CommandTestSuite) SetupSuite() { datastore.Table = "client-entity-test-command" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") suite.cache = cache.NewCache(cache.NewRedisClient()) } func (suite *CommandTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") } func (suite *CommandTestSuite) TearDownTest() { suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") suite.Require().NoError( suite.cache.FlushAll(context.Background()), "Failed to clear cache") } @@ -221,7 +226,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { // Commit and check response. suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -241,7 +246,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -265,7 +270,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) @@ -287,7 +292,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -312,7 +317,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { msg = getClientToServerCommitMsg(entries) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -328,7 +333,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -345,7 +350,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_NewClient() { rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -389,7 +394,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_GUBatchSize() { // Commit and check response. suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -413,7 +418,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -438,7 +443,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -459,7 +464,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -476,7 +481,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -496,7 +501,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -518,7 +523,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo msg := getClientToServerCommitMsg([]*sync_pb.SyncEntity{child0}) rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -547,7 +552,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(6, len(rsp.Commit.Entryresponse)) @@ -562,7 +567,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo marker, sync_pb.SyncEnums_GU_TRIGGER, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(6, len(rsp.GetUpdates.Entries)) @@ -590,9 +595,9 @@ func assertTypeMtimeCacheValue(suite *CommandTestSuite, key string, mtime int64, func insertSyncEntitiesWithoutUpdateCache( suite *CommandTestSuite, entries []*sync_pb.SyncEntity, clientID string) (ret []*datastore.SyncEntity) { for _, entry := range entries { - dbEntry, err := datastore.CreateDBSyncEntity(entry, nil, clientID) + dbEntry, err := datastore.CreateDBSyncEntity(entry, nil, clientID, 1) suite.Require().NoError(err, "Create db entity from pb entity should succeed") - _, err = suite.dynamo.InsertSyncEntity(dbEntry) + _, err = suite.dynamoDB.InsertSyncEntity(dbEntry) suite.Require().NoError(err, "Insert sync entity should succeed") val, err := suite.cache.Get(context.Background(), clientID+"#"+strconv.Itoa(*dbEntry.DataType), false) @@ -616,7 +621,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(3, len(rsp.Commit.Entryresponse)) @@ -657,7 +662,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba marker, sync_pb.SyncEnums_PERIODIC, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Assert().Equal(0, len(rsp.GetUpdates.Entries)) @@ -680,7 +685,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -698,7 +703,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba marker, sync_pb.SyncEnums_PERIODIC, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Assert().Equal(2, len(rsp.GetUpdates.Entries)) @@ -719,7 +724,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Sk rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -746,7 +751,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Sk marker, sync_pb.SyncEnums_GU_TRIGGER, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(1, len(rsp.GetUpdates.Entries)) @@ -765,7 +770,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -788,7 +793,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch marker, sync_pb.SyncEnums_PERIODIC, true, &clientBatch) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(2, len(rsp.GetUpdates.Entries)) @@ -805,7 +810,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch marker, sync_pb.SyncEnums_PERIODIC, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(1, len(rsp.GetUpdates.Entries)) diff --git a/command/helpers.go b/command/helpers.go index 98af0375..550dd361 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -139,6 +139,8 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in if err != nil { return false, nil, err } + // Conflict might mean that the entity does not exist in SQL but exists in Dynamo. + // Check for a Dynamo entity and migrate it accordingly. if conflict { oldEntity, err := h.dynamoDB.GetEntity(datastore.ItemQuery{ ID: entity.ID, @@ -148,9 +150,13 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in return false, nil, err } if oldEntity == nil { + // The conflict is unrelated to a pending Dynamo to SQL migration. + // Return conflict error to client. return true, nil, nil } if oldEntity.Deleted == nil || !*oldEntity.Deleted { + // If the stored entity was not already deleted, decrement the + // Dynamo item count since we'll be migrating the entity to SQL. if err = h.ItemCounts.recordChange(*entity.DataType, true, false); err != nil { return false, nil, err } @@ -165,6 +171,8 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in return false, nil, err } if !conflict && (entity.Deleted == nil || !*entity.Deleted) { + // If the new entity is not considered deleted, increment the + // SQL interim count. if err = h.ItemCounts.recordChange(*entity.DataType, false, true); err != nil { return false, nil, err } @@ -174,6 +182,11 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in return conflict, nil, err } conflict, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) + if !conflict && entity.Deleted != nil && *entity.Deleted { + if err = h.ItemCounts.recordChange(*entity.DataType, true, false); err != nil { + return false, nil, err + } + } return conflict, nil, err } @@ -272,7 +285,7 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data // InsertServerDefinedUniqueEntities inserts the server defined unique tag // entities if it is not in the DB yet for a specific client. -func (h *DBHelpers) insertServerDefinedUniqueEntities() error { +func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { if !h.SqlDB.Variations().Ready { return fmt.Errorf("SQL rollout not ready") } diff --git a/command/server_defined_unique_entity_test.go b/command/server_defined_unique_entity_test.go index 94832250..35165c8b 100644 --- a/command/server_defined_unique_entity_test.go +++ b/command/server_defined_unique_entity_test.go @@ -13,7 +13,8 @@ import ( type ServerDefinedUniqueEntityTestSuite struct { suite.Suite - dynamo *datastore.Dynamo + sqlDB *datastore.SQLDB + dynamoDB *datastore.Dynamo } type SyncAttrs struct { @@ -29,26 +30,34 @@ type SyncAttrs struct { func (suite *ServerDefinedUniqueEntityTestSuite) SetupSuite() { datastore.Table = "client-entity-test-command" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") } func (suite *ServerDefinedUniqueEntityTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") } func (suite *ServerDefinedUniqueEntityTestSuite) TearDownTest() { suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") } func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEntities() { + dbHelpers, err := command.NewDBHelpers(suite.dynamoDB, suite.sqlDB, "client1", nil, false) + suite.Require().NoError(err, "NewDBHelpers should succeed") + defer dbHelpers.Trx.Rollback() + suite.Require().NoError( - command.InsertServerDefinedUniqueEntities(suite.dynamo, "client1"), + dbHelpers.InsertServerDefinedUniqueEntities(), "InsertServerDefinedUniqueEntities should succeed") suite.Require().NoError( - command.InsertServerDefinedUniqueEntities(suite.dynamo, "client1"), + dbHelpers.InsertServerDefinedUniqueEntities(), "InsertServerDefinedUniqueEntities again for a same client should succeed") expectedSyncAttrsMap := map[string]*SyncAttrs{ @@ -102,7 +111,7 @@ func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEn expectedTagItems = append(expectedTagItems, datastore.ServerClientUniqueTagItem{ClientID: "client1", ID: "Server#" + key}) } - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + tagItems, err := datastoretest.ScanTagItems(suite.dynamoDB) suite.Require().NoError(err, "ScanTagItems should succeed") // Check that Ctime and Mtime have been set, reset to zero value for subsequent @@ -119,7 +128,7 @@ func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEn sort.Sort(datastore.TagItemByClientIDID(expectedTagItems)) suite.Assert().Equal(tagItems, expectedTagItems) - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamoDB) suite.Require().NoError(err, "ScanSyncEntities should succeed") // Find bookmark root folder to update parentID of its subfolders. @@ -154,8 +163,14 @@ func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEn } suite.Assert().Equal(0, len(expectedSyncAttrsMap)) + suite.Require().NoError(dbHelpers.Trx.Commit(), "Transaction commit should succeed") + + dbHelpers, err = command.NewDBHelpers(suite.dynamoDB, suite.sqlDB, "client2", nil, false) + suite.Require().NoError(err, "NewDBHelpers should succeed") + defer dbHelpers.Trx.Rollback() + suite.Require().NoError( - command.InsertServerDefinedUniqueEntities(suite.dynamo, "client2"), + dbHelpers.InsertServerDefinedUniqueEntities(), "InsertServerDefinedUniqueEntities should succeed for another client") } diff --git a/controller/controller_test.go b/controller/controller_test.go index e30826d1..fef95392 100644 --- a/controller/controller_test.go +++ b/controller/controller_test.go @@ -24,27 +24,32 @@ import ( type ControllerTestSuite struct { suite.Suite - dynamo *datastore.Dynamo - cache *cache.Cache + sqlDB *datastore.SQLDB + dynamoDB *datastore.Dynamo + cache *cache.Cache } func (suite *ControllerTestSuite) SetupSuite() { datastore.Table = "client-entity-test-controllor" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") suite.cache = cache.NewCache(cache.NewRedisClient()) } func (suite *ControllerTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") } func (suite *ControllerTestSuite) TearDownTest() { suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") suite.Require().NoError( suite.cache.FlushAll(context.Background()), "Failed to clear cache") } @@ -83,7 +88,7 @@ func (suite *ControllerTestSuite) TestCommand() { suite.Require().NoError(err, "NewRequest should succeed") req.Header.Set("Authorization", "Bearer token") - handler := controller.Command(suite.cache, suite.dynamo) + handler := controller.Command(suite.cache, suite.dynamoDB, suite.sqlDB) // Test unauthorized response. rr := httptest.NewRecorder() diff --git a/datastore/datastoretest/dynamo.go b/datastore/datastoretest/dynamo.go index 9bc1ae34..03c1b7e6 100644 --- a/datastore/datastoretest/dynamo.go +++ b/datastore/datastoretest/dynamo.go @@ -25,6 +25,7 @@ func DeleteTable(dynamo *datastore.Dynamo) error { if aerr.Code() == dynamodb.ErrCodeResourceNotFoundException { return nil } + return err } else { return fmt.Errorf("error deleting table: %w", err) } @@ -59,8 +60,8 @@ func CreateTable(dynamo *datastore.Dynamo) error { &dynamodb.DescribeTableInput{TableName: aws.String(datastore.Table)}) } -// ResetTable deletes and creates datastore.Table in dynamoDB. -func ResetTable(dynamo *datastore.Dynamo) error { +// ResetDynamoTable deletes and creates datastore.Table in dynamoDB. +func ResetDynamoTable(dynamo *datastore.Dynamo) error { if err := DeleteTable(dynamo); err != nil { return fmt.Errorf("error deleting table to reset table: %w", err) } diff --git a/datastore/datastoretest/mock_datastore.go b/datastore/datastoretest/mock_datastore.go index 70911fd5..95faf55c 100644 --- a/datastore/datastoretest/mock_datastore.go +++ b/datastore/datastoretest/mock_datastore.go @@ -23,14 +23,14 @@ func (m *MockDatastore) InsertSyncEntitiesWithServerTags(entities []*datastore.S } // UpdateSyncEntity mocks calls to UpdateSyncEntity -func (m *MockDatastore) UpdateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) { +func (m *MockDatastore) UpdateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, err error) { args := m.Called(entity, oldVersion) - return args.Bool(0), args.Bool(1), args.Error(2) + return args.Bool(0), args.Error(1) } // GetUpdatesForType mocks calls to GetUpdatesForType -func (m *MockDatastore) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []datastore.SyncEntity, error) { - args := m.Called(dataType, clientToken, fetchFolders, clientID, maxSize) +func (m *MockDatastore) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (bool, []datastore.SyncEntity, error) { + args := m.Called(dataType, minMtime, maxMtime, fetchFolders, clientID, maxSize, ascOrder) return args.Bool(0), args.Get(1).([]datastore.SyncEntity), args.Error(2) } @@ -74,3 +74,18 @@ func (m *MockDatastore) IsSyncChainDisabled(clientID string) (bool, error) { args := m.Called(clientID) return args.Bool(0), args.Error(1) } + +// DeleteEntities mocks the deletion of sync entities +func (m *MockDatastore) DeleteEntities(entities []*datastore.SyncEntity) error { + args := m.Called(entities) + return args.Error(0) +} + +// GetEntity mocks the retrieval of a sync entity +func (m *MockDatastore) GetEntity(query datastore.ItemQuery) (*datastore.SyncEntity, error) { + args := m.Called(query) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*datastore.SyncEntity), args.Error(1) +} diff --git a/datastore/datastoretest/sql.go b/datastore/datastoretest/sql.go new file mode 100644 index 00000000..71f72967 --- /dev/null +++ b/datastore/datastoretest/sql.go @@ -0,0 +1,9 @@ +package datastoretest + +import "github.com/brave/go-sync/datastore" + +// ResetSQLTables clears SQL tables. +func ResetSQLTables(sqlDB *datastore.SQLDB) error { + _, err := sqlDB.Exec("DELETE FROM chains") + return err +} diff --git a/datastore/dynamo.go b/datastore/dynamo.go index 27ad05a1..f2416290 100644 --- a/datastore/dynamo.go +++ b/datastore/dynamo.go @@ -7,6 +7,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" ) @@ -25,7 +26,9 @@ const ( var ( // Table is the name of the table in dynamoDB, could be modified in tests. - Table = os.Getenv("TABLE_NAME") + Table = os.Getenv("TABLE_NAME") + defaultTestEndpoint = "http://localhost:8000" + defaultTestRegion = "us-west-2" ) // PrimaryKey struct is used to represent the primary key of our table. @@ -40,7 +43,7 @@ type Dynamo struct { } // NewDynamo returns a dynamoDB client to be used. -func NewDynamo() (*Dynamo, error) { +func NewDynamo(isTesting bool) (*Dynamo, error) { httpClient := &http.Client{ Timeout: 30 * time.Second, Transport: &http.Transport{ @@ -49,7 +52,19 @@ func NewDynamo() (*Dynamo, error) { }, } - awsConfig := aws.NewConfig().WithRegion(os.Getenv("AWS_REGION")).WithEndpoint(os.Getenv("AWS_ENDPOINT")).WithHTTPClient(httpClient) + endpoint := os.Getenv("AWS_ENDPOINT") + region := os.Getenv("AWS_REGION") + if endpoint == "" && region == "" && isTesting { + endpoint = defaultTestEndpoint + region = defaultTestRegion + } + + awsConfig := aws.NewConfig().WithRegion(region).WithEndpoint(endpoint).WithHTTPClient(httpClient) + + if isTesting { + awsConfig = awsConfig.WithCredentials(credentials.NewStaticCredentials("GOSYNC", "GOSYNC", "GOSYNC")) + } + sess, err := session.NewSession(awsConfig) if err != nil { diff --git a/datastore/item_count_dynamo_test.go b/datastore/item_count_dynamo_test.go index 513118c6..60255d7c 100644 --- a/datastore/item_count_dynamo_test.go +++ b/datastore/item_count_dynamo_test.go @@ -17,13 +17,13 @@ type ItemCountTestSuite struct { func (suite *ItemCountTestSuite) SetupSuite() { datastore.Table = "client-entity-test-datastore" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamo, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") } func (suite *ItemCountTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") } func (suite *ItemCountTestSuite) TearDownTest() { diff --git a/migrations/20240904202925_init.down.sql b/datastore/migrations/20240904202925_init.down.sql similarity index 100% rename from migrations/20240904202925_init.down.sql rename to datastore/migrations/20240904202925_init.down.sql diff --git a/migrations/20240904202925_init.up.sql b/datastore/migrations/20240904202925_init.up.sql similarity index 100% rename from migrations/20240904202925_init.up.sql rename to datastore/migrations/20240904202925_init.up.sql diff --git a/datastore/sql.go b/datastore/sql.go index 40ff423c..fb83f7f5 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -1,6 +1,7 @@ package datastore import ( + "embed" "errors" "fmt" "os" @@ -9,16 +10,25 @@ import ( "github.com/golang-migrate/migrate/v4" _ "github.com/golang-migrate/migrate/v4/database/postgres" _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/golang-migrate/migrate/v4/source/iofs" _ "github.com/jackc/pgx/stdlib" "github.com/jmoiron/sqlx" ) -const sqlURLEnvKey = "SQL_DATABASE_URL" -const sqlMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" -const sqlMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" +const ( + sqlURLEnvKey = "SQL_DATABASE_URL" + sqlTestURLEnvKey = "SQL_TEST_DATABASE_URL" + // Default value is defined here, since the .env file will not be loaded + // because tests are run in the subdirectories where the tests live + defaultSQLTestURL = "postgres://sync:password@localhost:5434/testing?sslmode=disable" + sqlMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" + sqlMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" + defaultMigrateUpdateInterval = 4 + defaultMigrateChunkSize = 100 +) -const defaultMigrateUpdateInterval = 4 -const defaultMigrateChunkSize = 100 +//go:embed migrations/* +var migrationFiles embed.FS // SQLDB is a Datastore wrapper around a SQL-based database. type SQLDB struct { @@ -30,32 +40,52 @@ type SQLDB struct { } // NewSQLDB returns a SQLDB client to be used. -func NewSQLDB() (*SQLDB, error) { +func NewSQLDB(isTesting bool) (*SQLDB, error) { variations, err := LoadSQLVariations() if err != nil { return nil, err } - sqlURL := os.Getenv(sqlURLEnvKey) - if len(sqlURL) == 0 { - return nil, fmt.Errorf("%s must be defined", sqlURLEnvKey) + var envKey string + if isTesting { + envKey = sqlTestURLEnvKey + } else { + envKey = sqlURLEnvKey + } + + sqlURL := os.Getenv(envKey) + if sqlURL == "" { + if isTesting { + sqlURL = defaultSQLTestURL + } else { + return nil, fmt.Errorf("%s must be defined", envKey) + } } - migration, err := migrate.New( - "file://./migrations", + iofsDriver, err := iofs.New(migrationFiles, "migrations") + if err != nil { + return nil, fmt.Errorf("failed to load iofs driver for migrations: %w", err) + } + migration, err := migrate.NewWithSourceInstance( + "iofs", + iofsDriver, sqlURL, ) if err != nil { - return nil, fmt.Errorf("Failed to init migrations: %v", err) + return nil, fmt.Errorf("Failed to init migrations: %w", err) } if err = migration.Up(); err != nil { if !errors.Is(err, migrate.ErrNoChange) { - return nil, fmt.Errorf("Failed to run migrations: %v", err) + return nil, fmt.Errorf("Failed to run migrations: %w", err) } } db, err := sqlx.Connect("pgx", sqlURL) if err != nil { - return nil, fmt.Errorf("Failed to connect to SQL DB: %v", err) + return nil, fmt.Errorf("Failed to connect to SQL DB: %w", err) + } + + if isTesting { + variations.Ready = true } migrateInterval, _ := strconv.Atoi(os.Getenv(sqlMigrateUpdateIntervalEnvKey)) diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 330600e9..a001f8c9 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -116,7 +116,7 @@ func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *in // Get chain ID and lock for updates clientIDBytes, err := hex.DecodeString(clientID) if err != nil { - return nil, fmt.Errorf("failed to decode clientID: %w", err) + clientIDBytes = []byte(clientID) } var id int64 diff --git a/datastore/sync_entity_test.go b/datastore/sync_entity_test.go index ec040c8f..7825f66d 100644 --- a/datastore/sync_entity_test.go +++ b/datastore/sync_entity_test.go @@ -24,13 +24,13 @@ type SyncEntityTestSuite struct { func (suite *SyncEntityTestSuite) SetupSuite() { datastore.Table = "client-entity-test-datastore" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamo, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") } func (suite *SyncEntityTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") } func (suite *SyncEntityTestSuite) TearDownTest() { @@ -339,10 +339,9 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_Basic() { updateEntity1.Deleted = aws.Bool(true) updateEntity1.DataTypeMtime = aws.String("123#23456789") updateEntity1.Specifics = []byte{3, 4} - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + conflict, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Delete operation should return true") // Update with optional fields. updateEntity2 := updateEntity1 @@ -353,30 +352,27 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_Basic() { updateEntity2.ParentID = aws.String("parentID") updateEntity2.Name = aws.String("name") updateEntity2.NonUniqueName = aws.String("non_unique_name") - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") // Update with nil Folder and Deleted updateEntity3 := updateEntity1 updateEntity3.ID = "id3" updateEntity3.Folder = nil updateEntity3.Deleted = nil - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") // Reset these back to false because they will be the expected value in DB. updateEntity3.Folder = aws.Bool(false) updateEntity3.Deleted = aws.Bool(false) // Update entity again with the wrong old version as (version mismatch) // should return false. - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().True(conflict, "Update with the same version should return conflict") - suite.Assert().False(deleted, "Conflict operation should return false for delete") // Check sync entities are updated correctly in DB. syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) @@ -407,30 +403,28 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_HistoryType() { updateEntity1.Version = aws.Int64(2) updateEntity1.Folder = aws.Bool(true) updateEntity1.Mtime = aws.Int64(24242424) - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") // should still succeed with the same version number, // since the version number should be ignored updateEntity2 := updateEntity1 updateEntity2.Mtime = aws.Int64(42424242) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) suite.Require().NoError(err, "UpdateSyncEntity should not return an error") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") updateEntity3 := entity1 updateEntity3.Deleted = aws.Bool(true) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Delete operation should return true") syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) suite.Require().NoError(err, "ScanSyncEntities should succeed") + updateEntity3.ID = *updateEntity3.ClientDefinedUniqueTag suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity3}) } @@ -465,24 +459,21 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_ReuseClientTag() { updateEntity1.Folder = aws.Bool(true) updateEntity1.DataTypeMtime = aws.String("123#23456789") updateEntity1.Specifics = []byte{3, 4} - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") // Soft-delete the item with wrong version should get conflict. updateEntity1.Deleted = aws.Bool(true) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().True(conflict, "Version mismatched update should have conflict") - suite.Assert().False(deleted, "Failed delete operation should return false") // Soft-delete the item with matched version. updateEntity1.Version = aws.Int64(34567890) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) + conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Delete operation should return true") // Check tag item is deleted. tagItems, err = datastoretest.ScanTagItems(suite.dynamo) @@ -552,57 +543,59 @@ func (suite *SyncEntityTestSuite) TestGetUpdatesForType() { suite.Require().NoError(err, "InsertSyncEntity should succeed") // Get all updates for type 123 and client1 using token = 0. - hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 100) + var token int64 = 0 + hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) suite.Assert().False(hasChangesRemaining) // Get all updates for type 124 and client1 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, 0, true, "client1", 100) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client1", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity3}) suite.Assert().False(hasChangesRemaining) // Get all updates for type 123 and client2 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client2", 100) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client2", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity4}) suite.Assert().False(hasChangesRemaining) // Get all updates for type 124 and client2 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, 0, true, "client2", 100) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client2", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(len(syncItems), 0) suite.Assert().False(hasChangesRemaining) // Test maxSize will limit the return entries size, and hasChangesRemaining // should be true when there are more updates available in the DB. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 1) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 1, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1}) suite.Assert().True(hasChangesRemaining) // Test when num of query items equal to the limit, hasChangesRemaining should // be true. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 2) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 2, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) suite.Assert().True(hasChangesRemaining) // Test fetchFolders will remove folder items if false - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, false, "client1", 100) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, false, "client1", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) suite.Assert().False(hasChangesRemaining) // Get all updates for a type for a client using mtime of one item as token. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 12345678, true, "client1", 100) + token = 12345678 + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) suite.Assert().False(hasChangesRemaining) // Test batch is working correctly for over 100 items - err = datastoretest.ResetTable(suite.dynamo) + err = datastoretest.ResetDynamoTable(suite.dynamo) suite.Require().NoError(err, "Failed to reset table") expectedSyncItems := []datastore.SyncEntity{} @@ -629,7 +622,8 @@ func (suite *SyncEntityTestSuite) TestGetUpdatesForType() { } // All items should be returned and sorted by Mtime. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 300) + token = 0 + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 300, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") sort.Sort(datastore.SyncEntityByMtime(expectedSyncItems)) suite.Assert().Equal(syncItems, expectedSyncItems) @@ -637,7 +631,7 @@ func (suite *SyncEntityTestSuite) TestGetUpdatesForType() { // Test that when maxGUBatchSize is smaller than total updates, the first n // items ordered by Mtime should be returned. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 200) + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 200, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, expectedSyncItems[0:200]) suite.Assert().True(hasChangesRemaining) @@ -670,8 +664,10 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { Specifics: specifics, UniquePosition: uniquePosition, } + var expectedChainID int64 = 1 expectedDBEntity := datastore.SyncEntity{ ClientID: "client1", + ChainID: &expectedChainID, ParentID: pbEntity.ParentIdString, Version: pbEntity.Version, Name: pbEntity.Name, @@ -688,7 +684,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { ExpirationTime: nil, } - dbEntity, err := datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err := datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") // Check ID is replaced with a server-generated ID. @@ -714,7 +710,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { pbEntity.Deleted = nil pbEntity.Folder = nil - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().False(*dbEntity.Deleted, "Default value should be set for Deleted for new entities") suite.Assert().False(*dbEntity.Folder, "Default value should be set for Deleted for new entities") @@ -723,14 +719,14 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // Check the case when Ctime and Mtime are provided by the client. pbEntity.Ctime = aws.Int64(12345678) pbEntity.Mtime = aws.Int64(12345678) - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().Equal(*dbEntity.Ctime, *pbEntity.Ctime, "Client's Ctime should be respected") suite.Assert().NotEqual(*dbEntity.Mtime, *pbEntity.Mtime, "Client's Mtime should be replaced") suite.Assert().Nil(dbEntity.ExpirationTime) // When cacheGUID is nil, ID should be kept and no originator info are filled. - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, nil, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, nil, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().Equal(dbEntity.ID, *pbEntity.IdString) suite.Assert().Nil(dbEntity.OriginatorCacheGUID) @@ -740,7 +736,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // Check that when updating from a previous version with guid, ID will not be // replaced. pbEntity.Version = aws.Int64(1) - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().Equal(dbEntity.ID, *pbEntity.IdString) suite.Assert().Nil(dbEntity.Deleted, "Deleted won't apply its default value for updated entities") @@ -749,7 +745,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // Empty unique position should be marshalled to nil without error. pbEntity.UniquePosition = nil - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err) suite.Assert().Nil(dbEntity.UniquePosition) suite.Assert().Nil(dbEntity.ExpirationTime) @@ -758,16 +754,15 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // and an expiration time. historyEntitySpecific := &sync_pb.EntitySpecifics_History{} pbEntity.Specifics = &sync_pb.EntitySpecifics{SpecificsVariant: historyEntitySpecific} - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err) - suite.Assert().Equal(dbEntity.ID, "client_tag") expectedExpirationTime := time.Now().Unix() + datastore.HistoryExpirationIntervalSecs suite.Assert().Greater(*dbEntity.ExpirationTime+2, expectedExpirationTime) suite.Assert().Less(*dbEntity.ExpirationTime-2, expectedExpirationTime) // Empty specifics should report marshal error. pbEntity.Specifics = nil - _, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + _, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Assert().NotNil(err.Error(), "empty specifics should fail") } diff --git a/docker-compose.yml b/docker-compose.yml index 0b1090db..53e5161e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -16,6 +16,7 @@ services: depends_on: - dynamo-local - redis + - postgres networks: - sync environment: @@ -29,6 +30,7 @@ services: - AWS_ENDPOINT=http://dynamo-local:8000 - REDIS_URL=redis:6379 - SQL_DATABASE_URL=postgres://sync:password@postgres/postgres?sslmode=disable + - SQL_TEST_DATABASE_URL=postgres://sync:password@postgres/testing?sslmode=disable web: build: context: . @@ -56,6 +58,7 @@ services: - TABLE_NAME=client-entity-dev - REDIS_URL=redis:6379 - SQL_DATABASE_URL=postgres://sync:password@postgres/postgres?sslmode=disable + - SQL_TEST_DATABASE_URL=postgres://sync:password@postgres/testing?sslmode=disable dynamo-local: build: context: . @@ -81,3 +84,5 @@ services: - POSTGRES_PASSWORD=password networks: - sync + volumes: + - "./misc/create_additional_dbs.sql:/docker-entrypoint-initdb.d/create_additional_dbs.sql" diff --git a/middleware/middleware_test.go b/middleware/middleware_test.go index 21ebd8e4..8bc8f8cb 100644 --- a/middleware/middleware_test.go +++ b/middleware/middleware_test.go @@ -4,15 +4,18 @@ import ( "bytes" "context" "fmt" + "net/http" "net/http/httptest" "testing" + "time" "github.com/brave/go-sync/auth/authtest" syncContext "github.com/brave/go-sync/context" "github.com/brave/go-sync/datastore/datastoretest" "github.com/brave/go-sync/middleware" + "github.com/brave/go-sync/utils" "github.com/stretchr/testify/suite" ) diff --git a/misc/create_additional_dbs.sql b/misc/create_additional_dbs.sql new file mode 100644 index 00000000..da88a0d9 --- /dev/null +++ b/misc/create_additional_dbs.sql @@ -0,0 +1,2 @@ +CREATE DATABASE testing; +GRANT ALL PRIVILEGES ON DATABASE testing TO sync; diff --git a/server/server.go b/server/server.go index 2e32ce12..46107bc3 100644 --- a/server/server.go +++ b/server/server.go @@ -44,7 +44,7 @@ func setupLogger(ctx context.Context) (context.Context, *zerolog.Logger) { return logging.SetupLogger(ctx) } -func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, *chi.Mux) { +func setupRouter(ctx context.Context, logger *zerolog.Logger, isTesting bool) (context.Context, *chi.Mux) { r := chi.NewRouter() r.Use(chiware.RequestID) @@ -63,13 +63,13 @@ func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, r.Use(batware.BearerToken) r.Use(middleware.CommonResponseHeaders) - dynamoDB, err := datastore.NewDynamo() + dynamoDB, err := datastore.NewDynamo(isTesting) if err != nil { sentry.CaptureException(err) log.Panic().Err(err).Msg("Must be able to init Dynamo datastore to start") } - sqlDB, err := datastore.NewSQLDB() + sqlDB, err := datastore.NewSQLDB(isTesting) if err != nil { sentry.CaptureException(err) log.Panic().Err(err).Msg("Must be able to init SQL datastore to start") @@ -131,7 +131,7 @@ func StartServer() { subLog := logger.Info().Str("prefix", "main") subLog.Msg("Starting server") - serverCtx, r := setupRouter(serverCtx, logger) + serverCtx, r := setupRouter(serverCtx, logger, false) port := ":8295" srv := http.Server{ diff --git a/server/server_test.go b/server/server_test.go index 22f4ad56..82f24acf 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -19,7 +19,7 @@ var ( func init() { testCtx, logger := server.SetupLogger(context.Background()) - serverCtx, mux = server.SetupRouter(testCtx, logger) + serverCtx, mux = server.SetupRouter(testCtx, logger, true) } func TestPing(t *testing.T) { From 430249a1df49b5c2a65c91d99d5571da55494486 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Wed, 18 Sep 2024 15:40:22 -0700 Subject: [PATCH 09/19] Fix lint errors --- command/command.go | 2 +- command/helpers.go | 48 +++++++++++++++---------------- command/item_count.go | 3 ++ datastore/datastoretest/dynamo.go | 3 +- datastore/interfaces.go | 2 +- datastore/sql.go | 3 +- datastore/sql_variations.go | 2 +- datastore/sync_entity_sql.go | 4 +-- datastore/sync_entity_test.go | 2 +- 9 files changed, 36 insertions(+), 33 deletions(-) diff --git a/command/command.go b/command/command.go index dcaa705a..8437272f 100644 --- a/command/command.go +++ b/command/command.go @@ -422,7 +422,7 @@ func handleClearServerDataRequest(cache *cache.Cache, dynamoDB datastore.DynamoD } } - if err = dbHelpers.SqlDB.DeleteChain(dbHelpers.Trx, dbHelpers.ChainID); err != nil { + if err = dbHelpers.SQLDB.DeleteChain(dbHelpers.Trx, dbHelpers.ChainID); err != nil { log.Error().Err(err).Msg("Failed to disable sync chain") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, err diff --git a/command/helpers.go b/command/helpers.go index 550dd361..d655a843 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -12,7 +12,7 @@ import ( type DBHelpers struct { dynamoDB datastore.DynamoDatastore - SqlDB datastore.SQLDatastore + SQLDB datastore.SQLDatastore Trx *sqlx.Tx clientID string ChainID int64 @@ -44,7 +44,7 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatasto return &DBHelpers{ dynamoDB: dynamoDB, - SqlDB: sqlDB, + SQLDB: sqlDB, Trx: trx, clientID: clientID, ChainID: *chainID, @@ -55,8 +55,8 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatasto func (h *DBHelpers) hasItemInEitherDB(entity *datastore.SyncEntity) (exists bool, err error) { // Check if item exists using client_unique_tag - if h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { - exists, err := h.SqlDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) + if h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + exists, err := h.SQLDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) if err != nil { return false, err } @@ -72,8 +72,8 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo if curMaxSize == 0 { return false, nil, nil } - if h.SqlDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { - dynamoMigrationStatuses, err := h.SqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) + if h.SQLDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { + dynamoMigrationStatuses, err := h.SQLDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) if err != nil { return false, nil, err } @@ -91,7 +91,7 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo } if curMaxSize > 0 { - sqlHasChangesRemaining, sqlSyncEntities, err := h.SqlDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) + sqlHasChangesRemaining, sqlSyncEntities, err := h.SQLDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) if err != nil { return false, nil, err } @@ -107,9 +107,9 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo } func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict bool, err error) { - savedInSQL := h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + savedInSQL := h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) if savedInSQL { - conflict, err = h.SqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) + conflict, err = h.SQLDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) } else { conflict, err = h.dynamoDB.InsertSyncEntity(entity) } @@ -134,8 +134,8 @@ func getMigratedEntityID(entity *datastore.SyncEntity) (string, error) { } func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, migratedEntity *datastore.SyncEntity, err error) { - if h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { - conflict, err := h.SqlDB.UpdateSyncEntity(h.Trx, entity, oldVersion) + if h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + conflict, err := h.SQLDB.UpdateSyncEntity(h.Trx, entity, oldVersion) if err != nil { return false, nil, err } @@ -161,12 +161,12 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in return false, nil, err } } - migratedEntityId, err := getMigratedEntityID(entity) + migratedEntityID, err := getMigratedEntityID(entity) if err != nil { return false, nil, err } - entity.ID = migratedEntityId - conflict, err = h.SqlDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) + entity.ID = migratedEntityID + conflict, err = h.SQLDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) if err != nil { return false, nil, err } @@ -191,15 +191,15 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in } func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*datastore.SyncEntity, err error) { - if !h.SqlDB.Variations().Ready { + if !h.SQLDB.Variations().Ready { return nil, nil } - if rand.Float32() > h.SqlDB.MigrateIntervalPercent() { + if rand.Float32() > h.SQLDB.MigrateIntervalPercent() { return nil, nil } var applicableDataTypes []int for _, dataType := range dataTypes { - if !h.SqlDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { + if !h.SQLDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { continue } applicableDataTypes = append(applicableDataTypes, dataType) @@ -208,12 +208,12 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data return nil, nil } - migrationStatuses, err := h.SqlDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) + migrationStatuses, err := h.SQLDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) if err != nil { return nil, err } - currLimit := h.SqlDB.MigrateChunkSize() + currLimit := h.SQLDB.MigrateChunkSize() var updatedMigrationStatuses []*datastore.MigrationStatus for _, dataType := range applicableDataTypes { @@ -270,13 +270,13 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data } if len(syncEntitiesPtr) > 0 { - if _, err = h.SqlDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { + if _, err = h.SQLDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { return nil, err } } } if len(updatedMigrationStatuses) > 0 { - if err = h.SqlDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { + if err = h.SQLDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { return nil, err } } @@ -286,7 +286,7 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data // InsertServerDefinedUniqueEntities inserts the server defined unique tag // entities if it is not in the DB yet for a specific client. func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { - if !h.SqlDB.Variations().Ready { + if !h.SQLDB.Variations().Ready { return fmt.Errorf("SQL rollout not ready") } // Check if they're existed already for this client. @@ -307,7 +307,7 @@ func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { var dynamoEntities []*datastore.SyncEntity var sqlEntities []*datastore.SyncEntity for _, entity := range entities { - if h.SqlDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + if h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { sqlEntities = append(sqlEntities, entity) } else { dynamoEntities = append(dynamoEntities, entity) @@ -322,7 +322,7 @@ func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { } if len(sqlEntities) > 0 { - _, err = h.SqlDB.InsertSyncEntities(h.Trx, sqlEntities) + _, err = h.SQLDB.InsertSyncEntities(h.Trx, sqlEntities) if err != nil { return fmt.Errorf("error inserting entities with server tags to SQL: %w", err) } diff --git a/command/item_count.go b/command/item_count.go index 672b1bef..cea3aa80 100644 --- a/command/item_count.go +++ b/command/item_count.go @@ -29,6 +29,9 @@ func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB } sqlItemCounts, err := sqlDB.GetItemCounts(tx, chainID) + if err != nil { + return nil, err + } itemCounts := ItemCounts{ cache: cache, diff --git a/datastore/datastoretest/dynamo.go b/datastore/datastoretest/dynamo.go index 03c1b7e6..9f935e33 100644 --- a/datastore/datastoretest/dynamo.go +++ b/datastore/datastoretest/dynamo.go @@ -26,9 +26,8 @@ func DeleteTable(dynamo *datastore.Dynamo) error { return nil } return err - } else { - return fmt.Errorf("error deleting table: %w", err) } + return fmt.Errorf("error deleting table: %w", err) } return dynamo.WaitUntilTableNotExists( diff --git a/datastore/interfaces.go b/datastore/interfaces.go index 68fe2c57..63a73c21 100644 --- a/datastore/interfaces.go +++ b/datastore/interfaces.go @@ -40,7 +40,7 @@ type SQLDatastore interface { // InsertSyncEntities inserts multiple sync entities into the database InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (bool, error) // HasItem checks if an item exists in the database - HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (bool, error) + HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (bool, error) // UpdateSyncEntity updates a sync entity in the database UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (bool, error) // GetAndLockChainID retrieves and locks a chain ID for a given client ID diff --git a/datastore/sql.go b/datastore/sql.go index fb83f7f5..ccd01534 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -8,9 +8,10 @@ import ( "strconv" "github.com/golang-migrate/migrate/v4" + // import postgres package for migrations _ "github.com/golang-migrate/migrate/v4/database/postgres" - _ "github.com/golang-migrate/migrate/v4/source/file" "github.com/golang-migrate/migrate/v4/source/iofs" + // import pgx so it can be used with sqlx _ "github.com/jackc/pgx/stdlib" "github.com/jmoiron/sqlx" ) diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go index a87faffe..1c22a688 100644 --- a/datastore/sql_variations.go +++ b/datastore/sql_variations.go @@ -84,7 +84,7 @@ func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHa return exists && variationHashDecimal <= rolloutPercent } -func (v *SQLVariations) GetStateDigest() string { +func (sqlVariations *SQLVariations) GetStateDigest() string { return sqlSaveRolloutsEnvKey + ":" + os.Getenv(sqlSaveRolloutsEnvKey) + ";" + sqlMigrateRolloutsEnvKey + ":" + os.Getenv(sqlMigrateRolloutsEnvKey) } diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index a001f8c9..2ae107a9 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -47,8 +47,8 @@ func (sqlDB *SQLDB) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (con return int(rowsAffected) == len(entities), nil } -func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (exists bool, err error) { - err = tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND client_defined_unique_tag = $2)", chainId, clientTag).Scan(&exists) +func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (exists bool, err error) { + err = tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND client_defined_unique_tag = $2)", chainID, clientTag).Scan(&exists) if err != nil { return false, fmt.Errorf("failed to check existence of item: %w", err) } diff --git a/datastore/sync_entity_test.go b/datastore/sync_entity_test.go index 7825f66d..4401046b 100644 --- a/datastore/sync_entity_test.go +++ b/datastore/sync_entity_test.go @@ -543,7 +543,7 @@ func (suite *SyncEntityTestSuite) TestGetUpdatesForType() { suite.Require().NoError(err, "InsertSyncEntity should succeed") // Get all updates for type 123 and client1 using token = 0. - var token int64 = 0 + var token int64 hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) suite.Require().NoError(err, "GetUpdatesForType should succeed") suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) From 0c7f2d902ca2a684c613d10fb9647f6136d6cd36 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Wed, 18 Sep 2024 16:38:38 -0700 Subject: [PATCH 10/19] Restore deleted return value from update functions, to adjust item counts accordingly --- command/helpers.go | 17 +++++++----- datastore/datastoretest/mock_datastore.go | 4 +-- datastore/instrumented_dynamo_datastore.go | 4 +-- datastore/instrumented_sql_datastore.go | 8 +++--- datastore/interfaces.go | 8 +++--- datastore/sync_entity_dynamo.go | 29 ++++++++++++-------- datastore/sync_entity_sql.go | 8 +++--- datastore/sync_entity_test.go | 31 +++++++++++++++------- 8 files changed, 66 insertions(+), 43 deletions(-) diff --git a/command/helpers.go b/command/helpers.go index d655a843..e46b64aa 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -134,8 +134,10 @@ func getMigratedEntityID(entity *datastore.SyncEntity) (string, error) { } func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, migratedEntity *datastore.SyncEntity, err error) { - if h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { - conflict, err := h.SQLDB.UpdateSyncEntity(h.Trx, entity, oldVersion) + var deleted bool + shouldSaveInSQL := h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + if shouldSaveInSQL { + conflict, deleted, err = h.SQLDB.UpdateSyncEntity(h.Trx, entity, oldVersion) if err != nil { return false, nil, err } @@ -179,11 +181,14 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in } return conflict, oldEntity, err } - return conflict, nil, err + } else { + conflict, deleted, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) + if err != nil { + return false, nil, err + } } - conflict, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) - if !conflict && entity.Deleted != nil && *entity.Deleted { - if err = h.ItemCounts.recordChange(*entity.DataType, true, false); err != nil { + if !conflict && deleted { + if err = h.ItemCounts.recordChange(*entity.DataType, true, shouldSaveInSQL); err != nil { return false, nil, err } } diff --git a/datastore/datastoretest/mock_datastore.go b/datastore/datastoretest/mock_datastore.go index 95faf55c..c64cf418 100644 --- a/datastore/datastoretest/mock_datastore.go +++ b/datastore/datastoretest/mock_datastore.go @@ -23,9 +23,9 @@ func (m *MockDatastore) InsertSyncEntitiesWithServerTags(entities []*datastore.S } // UpdateSyncEntity mocks calls to UpdateSyncEntity -func (m *MockDatastore) UpdateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, err error) { +func (m *MockDatastore) UpdateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { args := m.Called(entity, oldVersion) - return args.Bool(0), args.Error(1) + return args.Bool(0), args.Bool(1), args.Error(2) } // GetUpdatesForType mocks calls to GetUpdatesForType diff --git a/datastore/instrumented_dynamo_datastore.go b/datastore/instrumented_dynamo_datastore.go index 7fd352e4..014137cb 100644 --- a/datastore/instrumented_dynamo_datastore.go +++ b/datastore/instrumented_dynamo_datastore.go @@ -108,7 +108,7 @@ func (_d DynamoDatastoreWithPrometheus) GetEntity(query ItemQuery) (sp1 *SyncEnt } // GetUpdatesForType implements DynamoDatastore -func (_d DynamoDatastoreWithPrometheus) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (b1 bool, sa1 []SyncEntity, err error) { +func (_d DynamoDatastoreWithPrometheus) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (hasChangesRemaining bool, entities []SyncEntity, err error) { _since := time.Now() defer func() { result := "ok" @@ -206,7 +206,7 @@ func (_d DynamoDatastoreWithPrometheus) UpdateClientItemCount(counts *DynamoItem } // UpdateSyncEntity implements DynamoDatastore -func (_d DynamoDatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, err error) { +func (_d DynamoDatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { _since := time.Now() defer func() { result := "ok" diff --git a/datastore/instrumented_sql_datastore.go b/datastore/instrumented_sql_datastore.go index 3c3482a6..6be6b70c 100644 --- a/datastore/instrumented_sql_datastore.go +++ b/datastore/instrumented_sql_datastore.go @@ -109,7 +109,7 @@ func (_d SQLDatastoreWithPrometheus) GetItemCounts(tx *sqlx.Tx, chainID int64) ( } // GetUpdatesForType implements SQLDatastore -func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (b1 bool, sa1 []SyncEntity, err error) { +func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { _since := time.Now() defer func() { result := "ok" @@ -123,7 +123,7 @@ func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(tx *sqlx.Tx, dataType int } // HasItem implements SQLDatastore -func (_d SQLDatastoreWithPrometheus) HasItem(tx *sqlx.Tx, chainId int64, clientTag string) (b1 bool, err error) { +func (_d SQLDatastoreWithPrometheus) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (b1 bool, err error) { _since := time.Now() defer func() { result := "ok" @@ -133,7 +133,7 @@ func (_d SQLDatastoreWithPrometheus) HasItem(tx *sqlx.Tx, chainId int64, clientT sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.HasItem(tx, chainId, clientTag) + return _d.base.HasItem(tx, chainID, clientTag) } // InsertSyncEntities implements SQLDatastore @@ -185,7 +185,7 @@ func (_d SQLDatastoreWithPrometheus) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, } // UpdateSyncEntity implements SQLDatastore -func (_d SQLDatastoreWithPrometheus) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (b1 bool, err error) { +func (_d SQLDatastoreWithPrometheus) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { _since := time.Now() defer func() { result := "ok" diff --git a/datastore/interfaces.go b/datastore/interfaces.go index 63a73c21..654690f2 100644 --- a/datastore/interfaces.go +++ b/datastore/interfaces.go @@ -9,12 +9,12 @@ type DynamoDatastore interface { // Insert a series of sync entities in a write transaction. InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error // Update an existing sync entity. - UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, err error) + UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) // Get updates for a specific type which are modified after the time of // client token for a given client. Besides the array of sync entities, a // boolean value indicating whether there are more updates to query in the // next batch is returned. - GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (bool, []SyncEntity, error) + GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (hasChangesRemaining bool, entities []SyncEntity, err error) // Check if a server-defined unique tag is in the datastore. HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) // Get the count of sync items for a client. @@ -42,11 +42,11 @@ type SQLDatastore interface { // HasItem checks if an item exists in the database HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (bool, error) // UpdateSyncEntity updates a sync entity in the database - UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (bool, error) + UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) // GetAndLockChainID retrieves and locks a chain ID for a given client ID GetAndLockChainID(tx *sqlx.Tx, clientID string) (*int64, error) // GetUpdatesForType retrieves updates for a specific data type - GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (bool, []SyncEntity, error) + GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) // GetDynamoMigrationStatuses retrieves migration statuses for specified data types GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (map[int]*MigrationStatus, error) // UpdateDynamoMigrationStatuses updates migration statuses in the database diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go index f13f081f..d13200ed 100644 --- a/datastore/sync_entity_dynamo.go +++ b/datastore/sync_entity_dynamo.go @@ -460,7 +460,7 @@ func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { } // UpdateSyncEntity updates a sync item in dynamoDB. -func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, error) { +func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { id := entity.ID if *entity.DataType == HistoryTypeID { id = *entity.ClientDefinedUniqueTag @@ -468,7 +468,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: id} key, err := dynamodbattribute.MarshalMap(primaryKey) if err != nil { - return false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) } // condition to ensure the request is update only... @@ -505,7 +505,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo expr, err := expression.NewBuilder().WithCondition(cond).WithUpdate(update).Build() if err != nil { - return false, fmt.Errorf("error building expression to update sync entity: %w", err) + return false, false, fmt.Errorf("error building expression to update sync entity: %w", err) } // Soft-delete a sync item with a client tag, use a transaction to delete its @@ -515,7 +515,7 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo ClientID: entity.ClientID, ID: clientTagItemPrefix + *entity.ClientDefinedUniqueTag} tagItemKey, err := dynamodbattribute.MarshalMap(pk) if err != nil { - return false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) } items := []*dynamodb.TransactWriteItem{} @@ -546,16 +546,16 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { for _, reason := range canceledException.CancellationReasons { if reason.Code != nil && *reason.Code == conditionalCheckFailed { - return true, nil + return true, false, nil } } } - return false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) + return false, false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) } // Successfully soft-delete the sync item and delete the tag item. - return false, nil + return false, true, nil } // Not deleting a sync item with a client tag, do a normal update on sync @@ -575,19 +575,26 @@ func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bo if aerr, ok := err.(awserr.Error); ok { // Return conflict if the write condition fails. if aerr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return true, nil + return true, false, nil } } - return false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) + return false, false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) } // Unmarshal out.Attributes oldEntity := &SyncEntity{} err = dynamodbattribute.UnmarshalMap(out.Attributes, oldEntity) if err != nil { - return false, fmt.Errorf("error unmarshalling old sync entity: %w", err) + return false, false, fmt.Errorf("error unmarshalling old sync entity: %w", err) } - return false, nil + if entity.Deleted == nil { // No updates on Deleted this time. + deleted = false + } else if oldEntity.Deleted == nil { // Consider it as Deleted = false. + deleted = *entity.Deleted + } else { + deleted = !*oldEntity.Deleted && *entity.Deleted + } + return false, deleted, nil } // GetUpdatesForType returns sync entities of a data type where it's mtime is diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 2ae107a9..eb073af9 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -55,7 +55,7 @@ func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (exist return exists, nil } -func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, err error) { +func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { var idCondition string if *entity.DataType == HistoryTypeID { idCondition = "client_defined_unique_tag = :client_defined_unique_tag" @@ -101,15 +101,15 @@ func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion result, err := tx.NamedExec(query, entity) if err != nil { - return false, fmt.Errorf("error updating entity: %w", err) + return false, false, fmt.Errorf("error updating entity: %w", err) } rowsAffected, err := result.RowsAffected() if err != nil { - return false, fmt.Errorf("error getting rows affected after update: %w", err) + return false, false, fmt.Errorf("error getting rows affected after update: %w", err) } - return rowsAffected == 0, nil + return rowsAffected == 0, entity.Deleted != nil && *entity.Deleted, nil } func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *int64, err error) { diff --git a/datastore/sync_entity_test.go b/datastore/sync_entity_test.go index 4401046b..f3a9369a 100644 --- a/datastore/sync_entity_test.go +++ b/datastore/sync_entity_test.go @@ -339,9 +339,10 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_Basic() { updateEntity1.Deleted = aws.Bool(true) updateEntity1.DataTypeMtime = aws.String("123#23456789") updateEntity1.Specifics = []byte{3, 4} - conflict, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") // Update with optional fields. updateEntity2 := updateEntity1 @@ -352,27 +353,31 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_Basic() { updateEntity2.ParentID = aws.String("parentID") updateEntity2.Name = aws.String("name") updateEntity2.NonUniqueName = aws.String("non_unique_name") - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") // Update with nil Folder and Deleted updateEntity3 := updateEntity1 updateEntity3.ID = "id3" updateEntity3.Folder = nil updateEntity3.Deleted = nil - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") // Reset these back to false because they will be the expected value in DB. updateEntity3.Folder = aws.Bool(false) updateEntity3.Deleted = aws.Bool(false) // Update entity again with the wrong old version as (version mismatch) // should return false. - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().True(conflict, "Update with the same version should return conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + // suite.Assert().False(deleted, "Successful update should not result in delete") // Check sync entities are updated correctly in DB. syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) @@ -403,24 +408,27 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_HistoryType() { updateEntity1.Version = aws.Int64(2) updateEntity1.Folder = aws.Bool(true) updateEntity1.Mtime = aws.Int64(24242424) - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") // should still succeed with the same version number, // since the version number should be ignored updateEntity2 := updateEntity1 updateEntity2.Mtime = aws.Int64(42424242) - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) suite.Require().NoError(err, "UpdateSyncEntity should not return an error") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") updateEntity3 := entity1 updateEntity3.Deleted = aws.Bool(true) - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) suite.Require().NoError(err, "ScanSyncEntities should succeed") @@ -459,21 +467,24 @@ func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_ReuseClientTag() { updateEntity1.Folder = aws.Bool(true) updateEntity1.DataTypeMtime = aws.String("123#23456789") updateEntity1.Specifics = []byte{3, 4} - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") // Soft-delete the item with wrong version should get conflict. updateEntity1.Deleted = aws.Bool(true) - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().True(conflict, "Version mismatched update should have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") // Soft-delete the item with matched version. updateEntity1.Version = aws.Int64(34567890) - conflict, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) suite.Require().NoError(err, "UpdateSyncEntity should succeed") suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") // Check tag item is deleted. tagItems, err = datastoretest.ScanTagItems(suite.dynamo) From f15ac946202b5d7fc0da2a82e7b64381425ba750 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Wed, 18 Sep 2024 21:49:56 -0700 Subject: [PATCH 11/19] Add additional sync entity tests --- command/command.go | 8 +- datastore/sync_entity_dynamo_test.go | 718 ++++++++++++++++++++++++++ datastore/sync_entity_sql.go | 2 +- datastore/sync_entity_sql_test.go | 389 +++++++++++++++ datastore/sync_entity_test.go | 720 +-------------------------- 5 files changed, 1133 insertions(+), 704 deletions(-) create mode 100644 datastore/sync_entity_dynamo_test.go create mode 100644 datastore/sync_entity_sql_test.go diff --git a/command/command.go b/command/command.go index 8437272f..2f291ed4 100644 --- a/command/command.go +++ b/command/command.go @@ -306,14 +306,16 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c // so the client can continue to sync other entities. var conflict bool conflict, err = dbHelpers.insertSyncEntity(entityToCommit) - if err != nil { - log.Error().Err(err).Msg("Insert sync entity failed") + if err != nil || conflict { + if err != nil { + log.Error().Err(err).Msg("Insert sync entity failed") + } rspType := sync_pb.CommitResponse_TRANSIENT_ERROR if conflict { rspType = sync_pb.CommitResponse_CONFLICT } entryRsp.ResponseType = &rspType - entryRsp.ErrorMessage = aws.String(fmt.Sprintf("Insert sync entity failed: %v", err.Error())) + entryRsp.ErrorMessage = aws.String("Insert sync entity failed") continue } diff --git a/datastore/sync_entity_dynamo_test.go b/datastore/sync_entity_dynamo_test.go new file mode 100644 index 00000000..8666d1fd --- /dev/null +++ b/datastore/sync_entity_dynamo_test.go @@ -0,0 +1,718 @@ +package datastore_test + +import ( + "sort" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/brave/go-sync/utils" + "github.com/stretchr/testify/suite" +) + +type SyncEntityDynamoTestSuite struct { + suite.Suite + dynamo *datastore.Dynamo +} + +func (suite *SyncEntityDynamoTestSuite) SetupSuite() { + datastore.Table = "client-entity-test-datastore" + var err error + suite.dynamo, err = datastore.NewDynamo(true) + suite.Require().NoError(err, "Failed to get dynamoDB session") +} + +func (suite *SyncEntityDynamoTestSuite) SetupTest() { + suite.Require().NoError( + datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") +} + +func (suite *SyncEntityDynamoTestSuite) TearDownTest() { + suite.Require().NoError( + datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") +} + +func (suite *SyncEntityDynamoTestSuite) TestInsertSyncEntity() { + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + } + entity2 := entity1 + entity2.ID = "id2" + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity with other ID should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().Error(err, "InsertSyncEntity with the same ClientID and ID should fail") + + // Each InsertSyncEntity without client tag should result in one sync item saved. + tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal( + 0, len(tagItems), "Insert without client tag should not insert tag items") + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) + + // Insert entity with client tag should result in one sync item and one tag + // item saved. + entity3 := entity1 + entity3.ID = "id3" + entity3.ClientDefinedUniqueTag = aws.String("tag1") + _, err = suite.dynamo.InsertSyncEntity(&entity3) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + // Insert entity with different tag for same ClientID should succeed. + entity4 := entity3 + entity4.ID = "id4" + entity4.ClientDefinedUniqueTag = aws.String("tag2") + _, err = suite.dynamo.InsertSyncEntity(&entity4) + suite.Require().NoError(err, "InsertSyncEntity with different server tag should succeed") + + // Insert entity with the same client tag and ClientID should fail with conflict. + entity4Copy := entity4 + entity4Copy.ID = "id4_copy" + conflict, err := suite.dynamo.InsertSyncEntity(&entity4Copy) + suite.Require().Error(err, "InsertSyncEntity with the same client tag and ClientID should fail") + suite.Assert().True(conflict, "Return conflict for duplicate client tag") + + // Insert entity with the same client tag for other client should not fail. + entity5 := entity3 + entity5.ClientID = "client2" + entity5.ID = "id5" + _, err = suite.dynamo.InsertSyncEntity(&entity5) + suite.Require().NoError(err, + "InsertSyncEntity with the same client tag for another client should succeed") + + // Check sync items are saved for entity1, entity2, entity3, entity4, entity5. + syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3, entity4, entity5} + sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) + suite.Assert().Equal(syncItems, expectedSyncItems) + + // Check tag items should be saved for entity3, entity4, entity5. + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + + // Check that Ctime and Mtime have been set, reset to zero value for subsequent + // tests + for i := 0; i < len(tagItems); i++ { + suite.Assert().NotNil(tagItems[i].Ctime) + suite.Assert().NotNil(tagItems[i].Mtime) + + tagItems[i].Ctime = nil + tagItems[i].Mtime = nil + } + + suite.Require().NoError(err, "ScanTagItems should succeed") + expectedTagItems := []datastore.ServerClientUniqueTagItem{ + {ClientID: "client1", ID: "Client#tag1"}, + {ClientID: "client1", ID: "Client#tag2"}, + {ClientID: "client2", ID: "Client#tag1"}, + } + sort.Sort(datastore.TagItemByClientIDID(tagItems)) + suite.Assert().Equal(expectedTagItems, tagItems) +} + +func (suite *SyncEntityDynamoTestSuite) TestHasServerDefinedUniqueTag() { + // Insert entities with server tags using InsertSyncEntitiesWithServerTags. + tag1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(true), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + ServerDefinedUniqueTag: aws.String("tag1"), + } + tag2 := tag1 + tag2.ClientID = "client2" + tag2.ID = "id2" + tag2.ServerDefinedUniqueTag = aws.String("tag2") + entities := []*datastore.SyncEntity{&tag1, &tag2} + + err := suite.dynamo.InsertSyncEntitiesWithServerTags(entities) + suite.Require().NoError(err, "Insert sync entities should succeed") + + hasTag, err := suite.dynamo.HasServerDefinedUniqueTag("client1", "tag1") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, true) + + hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client1", "tag2") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, false) + + hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag1") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, false) + + hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag2") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, true) +} + +func (suite *SyncEntityDynamoTestSuite) TestHasItem() { + // Insert entity which will be checked later + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + entity2 := entity1 + entity2.ClientID = "client2" + entity2.ID = "id2" + + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + hasTag, err := suite.dynamo.HasItem("client1", "id1") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, true) + + hasTag, err = suite.dynamo.HasItem("client2", "id2") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, true) + + hasTag, err = suite.dynamo.HasItem("client2", "id3") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, false) + + hasTag, err = suite.dynamo.HasItem("client3", "id2") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, false) +} + +func (suite *SyncEntityDynamoTestSuite) TestInsertSyncEntitiesWithServerTags() { + // Insert with same ClientID and server tag would fail. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + ServerDefinedUniqueTag: aws.String("tag1"), + } + entity2 := entity1 + entity2.ID = "id2" + entities := []*datastore.SyncEntity{&entity1, &entity2} + suite.Require().Error( + suite.dynamo.InsertSyncEntitiesWithServerTags(entities), + "Insert with same ClientID and server tag would fail") + + // Check nothing is written to DB when it fails. + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(0, len(syncItems), "No items should be written if fail") + tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(0, len(tagItems), "No items should be written if fail") + + entity2.ServerDefinedUniqueTag = aws.String("tag2") + entity3 := entity1 + entity3.ClientID = "client2" + entity3.ID = "id3" + entities = []*datastore.SyncEntity{&entity1, &entity2, &entity3} + suite.Require().NoError( + suite.dynamo.InsertSyncEntitiesWithServerTags(entities), + "InsertSyncEntitiesWithServerTags should succeed") + + // Scan DB and check all items are saved + syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3} + sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) + suite.Assert().Equal(syncItems, expectedSyncItems) + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + + // Check that Ctime and Mtime have been set, reset to zero value for subsequent + // tests + for i := 0; i < len(tagItems); i++ { + suite.Assert().NotNil(tagItems[i].Ctime) + suite.Assert().NotNil(tagItems[i].Mtime) + + tagItems[i].Ctime = nil + tagItems[i].Mtime = nil + } + + expectedTagItems := []datastore.ServerClientUniqueTagItem{ + {ClientID: "client1", ID: "Server#tag1"}, + {ClientID: "client1", ID: "Server#tag2"}, + {ClientID: "client2", ID: "Server#tag1"}, + } + sort.Sort(datastore.TagItemByClientIDID(tagItems)) + suite.Assert().Equal(expectedTagItems, tagItems) +} + +func (suite *SyncEntityDynamoTestSuite) TestUpdateSyncEntity_Basic() { + // Insert three new items. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + entity2 := entity1 + entity2.ID = "id2" + entity3 := entity1 + entity3.ID = "id3" + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity3) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + // Check sync entities are inserted correctly in DB. + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2, entity3}) + + // Update without optional fields. + updateEntity1 := entity1 + updateEntity1.Version = aws.Int64(23456789) + updateEntity1.Mtime = aws.Int64(23456789) + updateEntity1.Folder = aws.Bool(true) + updateEntity1.Deleted = aws.Bool(true) + updateEntity1.DataTypeMtime = aws.String("123#23456789") + updateEntity1.Specifics = []byte{3, 4} + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") + + // Update with optional fields. + updateEntity2 := updateEntity1 + updateEntity2.ID = "id2" + updateEntity2.Deleted = aws.Bool(false) + updateEntity2.Folder = aws.Bool(false) + updateEntity2.UniquePosition = []byte{5, 6} + updateEntity2.ParentID = aws.String("parentID") + updateEntity2.Name = aws.String("name") + updateEntity2.NonUniqueName = aws.String("non_unique_name") + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // Update with nil Folder and Deleted + updateEntity3 := updateEntity1 + updateEntity3.ID = "id3" + updateEntity3.Folder = nil + updateEntity3.Deleted = nil + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + // Reset these back to false because they will be the expected value in DB. + updateEntity3.Folder = aws.Bool(false) + updateEntity3.Deleted = aws.Bool(false) + + // Update entity again with the wrong old version as (version mismatch) + // should return false. + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update with the same version should return conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + // suite.Assert().False(deleted, "Successful update should not result in delete") + + // Check sync entities are updated correctly in DB. + syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity1, updateEntity2, updateEntity3}) +} + +func (suite *SyncEntityDynamoTestSuite) TestUpdateSyncEntity_HistoryType() { + // Insert a history item + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + ClientDefinedUniqueTag: aws.String("client_tag1"), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(963985), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + conflict, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Successful insert should not have conflict") + + updateEntity1 := entity1 + updateEntity1.Version = aws.Int64(2) + updateEntity1.Folder = aws.Bool(true) + updateEntity1.Mtime = aws.Int64(24242424) + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // should still succeed with the same version number, + // since the version number should be ignored + updateEntity2 := updateEntity1 + updateEntity2.Mtime = aws.Int64(42424242) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) + suite.Require().NoError(err, "UpdateSyncEntity should not return an error") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + updateEntity3 := entity1 + updateEntity3.Deleted = aws.Bool(true) + + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") + + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + updateEntity3.ID = *updateEntity3.ClientDefinedUniqueTag + suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity3}) +} + +func (suite *SyncEntityDynamoTestSuite) TestUpdateSyncEntity_ReuseClientTag() { + // Insert an item with client tag. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + ClientDefinedUniqueTag: aws.String("client_tag"), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + conflict, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Successful insert should not have conflict") + + // Check a tag item is inserted. + tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") + + // Update it to version 23456789. + updateEntity1 := entity1 + updateEntity1.Version = aws.Int64(23456789) + updateEntity1.Mtime = aws.Int64(23456789) + updateEntity1.Folder = aws.Bool(true) + updateEntity1.DataTypeMtime = aws.String("123#23456789") + updateEntity1.Specifics = []byte{3, 4} + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // Soft-delete the item with wrong version should get conflict. + updateEntity1.Deleted = aws.Bool(true) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Version mismatched update should have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // Soft-delete the item with matched version. + updateEntity1.Version = aws.Int64(34567890) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") + + // Check tag item is deleted. + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(0, len(tagItems), "Tag item should be deleted") + + // Insert another item with the same client tag again. + entity2 := entity1 + entity2.ID = "id2" + conflict, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Successful insert should not have conflict") + + // Check a tag item is inserted. + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") +} + +func (suite *SyncEntityDynamoTestSuite) TestGetUpdatesForType() { + // Insert items for testing. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(true), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + + entity2 := entity1 + entity2.ID = "id2" + entity2.Folder = aws.Bool(false) + entity2.Mtime = aws.Int64(12345679) + entity2.DataTypeMtime = aws.String("123#12345679") + + entity3 := entity2 + entity3.ID = "id3" + entity3.DataType = aws.Int(124) + entity3.DataTypeMtime = aws.String("124#12345679") + + // non-expired item + entity4 := entity2 + entity4.ClientID = "client2" + entity4.ID = "id4" + entity4.ExpirationTime = aws.Int64(time.Now().Unix() + 300) + + // expired item + entity5 := entity2 + entity5.ClientID = "client2" + entity5.ID = "id5" + entity5.ExpirationTime = aws.Int64(time.Now().Unix() - 300) + + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity3) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity4) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity5) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + // Get all updates for type 123 and client1 using token = 0. + var token int64 + hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for type 124 and client1 using token = 0. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity3}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for type 123 and client2 using token = 0. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client2", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity4}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for type 124 and client2 using token = 0. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client2", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(len(syncItems), 0) + suite.Assert().False(hasChangesRemaining) + + // Test maxSize will limit the return entries size, and hasChangesRemaining + // should be true when there are more updates available in the DB. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 1, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1}) + suite.Assert().True(hasChangesRemaining) + + // Test when num of query items equal to the limit, hasChangesRemaining should + // be true. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 2, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) + suite.Assert().True(hasChangesRemaining) + + // Test fetchFolders will remove folder items if false + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, false, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for a type for a client using mtime of one item as token. + token = 12345678 + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) + suite.Assert().False(hasChangesRemaining) + + // Test batch is working correctly for over 100 items + err = datastoretest.ResetDynamoTable(suite.dynamo) + suite.Require().NoError(err, "Failed to reset table") + + expectedSyncItems := []datastore.SyncEntity{} + entity1 = datastore.SyncEntity{ + ClientID: "client1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + Specifics: []byte{1, 2}, + } + + mtime := utils.UnixMilli(time.Now()) + for i := 1; i <= 250; i++ { + mtime = mtime + 1 + entity := entity1 + entity.ID = "id" + strconv.Itoa(i) + entity.Mtime = aws.Int64(mtime) + entity.DataTypeMtime = aws.String("123#" + strconv.FormatInt(*entity.Mtime, 10)) + _, err := suite.dynamo.InsertSyncEntity(&entity) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + expectedSyncItems = append(expectedSyncItems, entity) + } + + // All items should be returned and sorted by Mtime. + token = 0 + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 300, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + sort.Sort(datastore.SyncEntityByMtime(expectedSyncItems)) + suite.Assert().Equal(syncItems, expectedSyncItems) + suite.Assert().False(hasChangesRemaining) + + // Test that when maxGUBatchSize is smaller than total updates, the first n + // items ordered by Mtime should be returned. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 200, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, expectedSyncItems[0:200]) + suite.Assert().True(hasChangesRemaining) +} + +func (suite *SyncEntityDynamoTestSuite) TestDisableSyncChain() { + clientID := "client1" + id := "disabled_chain" + err := suite.dynamo.DisableSyncChain(clientID) + suite.Require().NoError(err, "DisableSyncChain should succeed") + e, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(1, len(e)) + suite.Assert().Equal(clientID, e[0].ClientID) + suite.Assert().Equal(id, e[0].ID) +} + +func (suite *SyncEntityDynamoTestSuite) TestIsSyncChainDisabled() { + clientID := "client1" + + disabled, err := suite.dynamo.IsSyncChainDisabled(clientID) + suite.Require().NoError(err, "IsSyncChainDisabled should succeed") + suite.Assert().Equal(false, disabled) + + err = suite.dynamo.DisableSyncChain(clientID) + suite.Require().NoError(err, "DisableSyncChain should succeed") + disabled, err = suite.dynamo.IsSyncChainDisabled(clientID) + suite.Require().NoError(err, "IsSyncChainDisabled should succeed") + suite.Assert().Equal(true, disabled) +} + +func (suite *SyncEntityDynamoTestSuite) TestClearServerData() { + // Test clear sync entities + entity := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + } + _, err := suite.dynamo.InsertSyncEntity(&entity) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + e, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(1, len(e)) + + e, err = suite.dynamo.ClearServerData(entity.ClientID) + suite.Require().NoError(err, "ClearServerData should succeed") + suite.Assert().Equal(1, len(e)) + + e, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(0, len(e)) + + // Test clear tagged items + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + ServerDefinedUniqueTag: aws.String("tag1"), + } + entity2 := entity1 + entity2.ID = "id2" + entity2.ServerDefinedUniqueTag = aws.String("tag2") + entities := []*datastore.SyncEntity{&entity1, &entity2} + suite.Require().NoError( + suite.dynamo.InsertSyncEntitiesWithServerTags(entities), + "InsertSyncEntitiesWithServerTags should succeed") + + e, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(2, len(e), "No items should be written if fail") + + t, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(2, len(t), "No items should be written if fail") + + e, err = suite.dynamo.ClearServerData(entity.ClientID) + suite.Require().NoError(err, "ClearServerData should succeed") + suite.Assert().Equal(4, len(e)) + + e, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(0, len(e), "No items should be written if fail") + + t, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(0, len(t), "No items should be written if fail") +} + +func TestSyncEntityDynamoTestSuite(t *testing.T) { + suite.Run(t, new(SyncEntityDynamoTestSuite)) +} diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index eb073af9..4f4bc100 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -44,7 +44,7 @@ func (sqlDB *SQLDB) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (con } // if rows affected is not len(entities), then there must be a conflict. return true to indicate this condition. - return int(rowsAffected) == len(entities), nil + return int(rowsAffected) != len(entities), nil } func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (exists bool, err error) { diff --git a/datastore/sync_entity_sql_test.go b/datastore/sync_entity_sql_test.go new file mode 100644 index 00000000..f7ccbedc --- /dev/null +++ b/datastore/sync_entity_sql_test.go @@ -0,0 +1,389 @@ +package datastore_test + +import ( + "testing" + "time" + + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/google/uuid" + "github.com/stretchr/testify/suite" +) + +type SyncEntitySQLTestSuite struct { + suite.Suite + sqlDB *datastore.SQLDB +} + +func (suite *SyncEntitySQLTestSuite) SetupSuite() { + var err error + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to create SQL database") +} + +func (suite *SyncEntitySQLTestSuite) SetupTest() { + err := datastoretest.ResetSQLTables(suite.sqlDB) + suite.Require().NoError(err, "Failed to reset SQL tables") +} + +func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { + id, _ := uuid.NewV7() + entity := datastore.SyncEntity{ + ID: id.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345678}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2}, + ClientDefinedUniqueTag: &[]string{"tag1"}[0], + } + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + entity.ChainID = chainID + + conflict, err := suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Insert should not conflict") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") + + // Try to insert the same entity again + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + id, _ = uuid.NewV7() + entity.ID = id.String() + conflict, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().True(conflict, "Insert should conflict") + + err = tx.Rollback() + suite.Require().NoError(err, "Rollback should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestHasItem() { + id, _ := uuid.NewV7() + entity := datastore.SyncEntity{ + ID: id.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345678}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2}, + ClientDefinedUniqueTag: &[]string{"tag1"}[0], + } + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + entity.ChainID = chainID + + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + exists, err := suite.sqlDB.HasItem(tx, *chainID, *entity.ClientDefinedUniqueTag) + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().True(exists, "Item should exist") + + exists, err = suite.sqlDB.HasItem(tx, *chainID, "non_existent_tag") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().False(exists, "Item should not exist") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestUpdateSyncEntity() { + id, _ := uuid.NewV7() + entity := datastore.SyncEntity{ + ID: id.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345678}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2}, + } + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + entity.ChainID = chainID + + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + // Test normal update + updatedEntity := entity + updatedEntity.Version = &[]int64{2}[0] + updatedEntity.Mtime = &[]int64{23456789}[0] + updatedEntity.Folder = &[]bool{true}[0] + + // Test updating with wrong chain ID + wrongChainEntity := updatedEntity + wrongChainEntity.ChainID = &[]int64{*chainID + 1}[0] + conflict, deleted, err := suite.sqlDB.UpdateSyncEntity(tx, &wrongChainEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update should conflict due to wrong chain ID") + suite.Assert().False(deleted, "Entity should not be deleted") + + // Valid update + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &updatedEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Update should not conflict") + suite.Assert().False(deleted, "Entity should not be deleted") + + *entity.Version = *updatedEntity.Version + + *updatedEntity.Version = 3 + + // Test updating with wrong version + conflictEntity := updatedEntity + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &conflictEntity, 99) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update should conflict due to version mismatch") + suite.Assert().False(deleted, "Entity should not be deleted") + + // Test updating to deleted state + *updatedEntity.Deleted = true + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &updatedEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Update should not conflict") + suite.Assert().True(deleted, "Entity should be deleted") + + *entity.Version = *updatedEntity.Version + + // Test updating a deleted entity + *updatedEntity.Version = 4 + *updatedEntity.Deleted = false + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &updatedEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update should conflict") + suite.Assert().False(deleted, "Entity should not be deleted") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestGetUpdatesForType() { + id1, _ := uuid.NewV7() + id2, _ := uuid.NewV7() + id3, _ := uuid.NewV7() + id4, _ := uuid.NewV7() + entities := []datastore.SyncEntity{ + { + ID: id1.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345678}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2}, + }, + { + ID: id2.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345679}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{true}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{3, 4}, + }, + { + ID: id3.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345679}[0], + Mtime: &[]int64{12345680}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{true}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{3, 4}, + }, + { + ID: id4.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345680}[0], + Mtime: &[]int64{12345680}[0], + DataType: &[]int{124}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{5, 6}, + }, + } + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + + for i := range entities { + entities[i].ChainID = chainID + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entities[i]}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + } + + hasChangesRemaining, syncItems, err := suite.sqlDB.GetUpdatesForType(tx, 123, 0, true, *chainID, 100) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().False(hasChangesRemaining, "Should not have changes remaining") + suite.Assert().Equal(entities[:3], syncItems) + + hasChangesRemaining, syncItems, err = suite.sqlDB.GetUpdatesForType(tx, 123, 12345678, true, *chainID, 100) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().False(hasChangesRemaining, "Should not have changes remaining") + suite.Assert().Equal(entities[1:3], syncItems) + + hasChangesRemaining, syncItems, err = suite.sqlDB.GetUpdatesForType(tx, 123, 0, true, *chainID, 2) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().True(hasChangesRemaining, "Should have changes remaining") + suite.Assert().Equal(entities[:2], syncItems) + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestDeleteChain() { + id1, _ := uuid.NewV7() + id2, _ := uuid.NewV7() + entity1 := datastore.SyncEntity{ + ID: id1.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345678}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2}, + } + entity2 := datastore.SyncEntity{ + ID: id2.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &[]int64{12345678}[0], + DataType: &[]int{123}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{3, 4}, + } + + // Insert data for two chains + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + chainID1, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed for client1") + entity1.ChainID = chainID1 + + chainID2, err := suite.sqlDB.GetAndLockChainID(tx, "client2") + suite.Require().NoError(err, "GetAndLockChainID should succeed for client2") + entity2.ChainID = chainID2 + + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity1, &entity2}) + suite.Require().NoError(err, "InsertSyncEntities should succeed") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") + + // Delete chain for client1 + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + + err = suite.sqlDB.DeleteChain(tx, *chainID1) + suite.Require().NoError(err, "DeleteChain should succeed") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") + + // Verify that the chain and its entities are deleted for client1 + suite.checkChainExistence(*chainID1, false) + + // Verify that data still exists for client2 + suite.checkChainExistence(*chainID2, true) +} + +func (suite *SyncEntitySQLTestSuite) checkChainExistence(chainID int64, shouldExist bool) { + var expectedCount int + var count int + if shouldExist { + expectedCount = 1 + } + err := suite.sqlDB.QueryRow("SELECT COUNT(*) FROM entities WHERE chain_id = $1", chainID).Scan(&count) + suite.Require().NoError(err, "Count query should succeed for entities") + suite.Assert().Equal(expectedCount, count, "Entities for chain should be correct amount") + + err = suite.sqlDB.QueryRow("SELECT COUNT(*) FROM chains WHERE id = $1", chainID).Scan(&count) + suite.Require().NoError(err, "Count query should succeed") + suite.Assert().Equal(expectedCount, count, "Chain entry should be correct amount") +} + +func (suite *SyncEntitySQLTestSuite) TestConcurrentGetAndLockChainID() { + clientID := "testClient" + + // Start first transaction + tx1, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction 1 should succeed") + + // Get and lock chain ID in first transaction + chainID1, err := suite.sqlDB.GetAndLockChainID(tx1, clientID) + suite.Require().NoError(err, "GetAndLockChainID should succeed for tx1") + + // Try to get and lock chain ID in second transaction + // This should block until the first transaction is committed + stepChan := make(chan bool) + go func() { + // Start second transaction + tx2, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction 2 should succeed") + + stepChan <- true + chainID2, err := suite.sqlDB.GetAndLockChainID(tx2, clientID) + suite.Require().NoError(err, "GetAndLockChainID should succeed for tx2") + suite.Assert().Equal(*chainID1, *chainID2, "Chain IDs should be the same") + + err = tx2.Commit() + suite.Require().NoError(err, "Commit transaction 2 should succeed") + stepChan <- true + }() + + // Wait until second transaction has started + <-stepChan + + select { + case <-stepChan: + suite.FailNow("Second transaction goroutine exited prematurely") + case <-time.After(200 * time.Millisecond): + } + + // Commit the first transaction + err = tx1.Commit() + suite.Require().NoError(err, "Commit transaction 1 should succeed") + + // Wait for the second transaction to complete + select { + case <-stepChan: + // Success, second transaction completed + case <-time.After(5 * time.Second): + suite.Fail("Second transaction did not complete in time") + } + +} + +func TestSyncEntitySQLTestSuite(t *testing.T) { + suite.Run(t, new(SyncEntitySQLTestSuite)) +} diff --git a/datastore/sync_entity_test.go b/datastore/sync_entity_test.go index f3a9369a..eb3e1748 100644 --- a/datastore/sync_entity_test.go +++ b/datastore/sync_entity_test.go @@ -2,40 +2,20 @@ package datastore_test import ( "encoding/json" - "sort" "strconv" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/brave/go-sync/datastore" - "github.com/brave/go-sync/datastore/datastoretest" "github.com/brave/go-sync/schema/protobuf/sync_pb" - "github.com/brave/go-sync/utils" + "github.com/google/uuid" "github.com/stretchr/testify/suite" "google.golang.org/protobuf/proto" ) type SyncEntityTestSuite struct { suite.Suite - dynamo *datastore.Dynamo -} - -func (suite *SyncEntityTestSuite) SetupSuite() { - datastore.Table = "client-entity-test-datastore" - var err error - suite.dynamo, err = datastore.NewDynamo(true) - suite.Require().NoError(err, "Failed to get dynamoDB session") -} - -func (suite *SyncEntityTestSuite) SetupTest() { - suite.Require().NoError( - datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") -} - -func (suite *SyncEntityTestSuite) TearDownTest() { - suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") } func (suite *SyncEntityTestSuite) TestNewServerClientUniqueTagItem() { @@ -66,588 +46,6 @@ func (suite *SyncEntityTestSuite) TestNewServerClientUniqueTagItem() { suite.Assert().Equal(expectedClientTag, actualClientTag) } -func (suite *SyncEntityTestSuite) TestInsertSyncEntity() { - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - } - entity2 := entity1 - entity2.ID = "id2" - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity with other ID should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().Error(err, "InsertSyncEntity with the same ClientID and ID should fail") - - // Each InsertSyncEntity without client tag should result in one sync item saved. - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal( - 0, len(tagItems), "Insert without client tag should not insert tag items") - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) - - // Insert entity with client tag should result in one sync item and one tag - // item saved. - entity3 := entity1 - entity3.ID = "id3" - entity3.ClientDefinedUniqueTag = aws.String("tag1") - _, err = suite.dynamo.InsertSyncEntity(&entity3) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - // Insert entity with different tag for same ClientID should succeed. - entity4 := entity3 - entity4.ID = "id4" - entity4.ClientDefinedUniqueTag = aws.String("tag2") - _, err = suite.dynamo.InsertSyncEntity(&entity4) - suite.Require().NoError(err, "InsertSyncEntity with different server tag should succeed") - - // Insert entity with the same client tag and ClientID should fail with conflict. - entity4Copy := entity4 - entity4Copy.ID = "id4_copy" - conflict, err := suite.dynamo.InsertSyncEntity(&entity4Copy) - suite.Require().Error(err, "InsertSyncEntity with the same client tag and ClientID should fail") - suite.Assert().True(conflict, "Return conflict for duplicate client tag") - - // Insert entity with the same client tag for other client should not fail. - entity5 := entity3 - entity5.ClientID = "client2" - entity5.ID = "id5" - _, err = suite.dynamo.InsertSyncEntity(&entity5) - suite.Require().NoError(err, - "InsertSyncEntity with the same client tag for another client should succeed") - - // Check sync items are saved for entity1, entity2, entity3, entity4, entity5. - syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3, entity4, entity5} - sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) - suite.Assert().Equal(syncItems, expectedSyncItems) - - // Check tag items should be saved for entity3, entity4, entity5. - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - - // Check that Ctime and Mtime have been set, reset to zero value for subsequent - // tests - for i := 0; i < len(tagItems); i++ { - suite.Assert().NotNil(tagItems[i].Ctime) - suite.Assert().NotNil(tagItems[i].Mtime) - - tagItems[i].Ctime = nil - tagItems[i].Mtime = nil - } - - suite.Require().NoError(err, "ScanTagItems should succeed") - expectedTagItems := []datastore.ServerClientUniqueTagItem{ - {ClientID: "client1", ID: "Client#tag1"}, - {ClientID: "client1", ID: "Client#tag2"}, - {ClientID: "client2", ID: "Client#tag1"}, - } - sort.Sort(datastore.TagItemByClientIDID(tagItems)) - suite.Assert().Equal(expectedTagItems, tagItems) -} - -func (suite *SyncEntityTestSuite) TestHasServerDefinedUniqueTag() { - // Insert entities with server tags using InsertSyncEntitiesWithServerTags. - tag1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(true), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - ServerDefinedUniqueTag: aws.String("tag1"), - } - tag2 := tag1 - tag2.ClientID = "client2" - tag2.ID = "id2" - tag2.ServerDefinedUniqueTag = aws.String("tag2") - entities := []*datastore.SyncEntity{&tag1, &tag2} - - err := suite.dynamo.InsertSyncEntitiesWithServerTags(entities) - suite.Require().NoError(err, "Insert sync entities should succeed") - - hasTag, err := suite.dynamo.HasServerDefinedUniqueTag("client1", "tag1") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, true) - - hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client1", "tag2") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, false) - - hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag1") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, false) - - hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag2") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, true) -} - -func (suite *SyncEntityTestSuite) TestHasItem() { - // Insert entity which will be checked later - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - entity2 := entity1 - entity2.ClientID = "client2" - entity2.ID = "id2" - - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - hasTag, err := suite.dynamo.HasItem("client1", "id1") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, true) - - hasTag, err = suite.dynamo.HasItem("client2", "id2") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, true) - - hasTag, err = suite.dynamo.HasItem("client2", "id3") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, false) - - hasTag, err = suite.dynamo.HasItem("client3", "id2") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, false) -} - -func (suite *SyncEntityTestSuite) TestInsertSyncEntitiesWithServerTags() { - // Insert with same ClientID and server tag would fail. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - ServerDefinedUniqueTag: aws.String("tag1"), - } - entity2 := entity1 - entity2.ID = "id2" - entities := []*datastore.SyncEntity{&entity1, &entity2} - suite.Require().Error( - suite.dynamo.InsertSyncEntitiesWithServerTags(entities), - "Insert with same ClientID and server tag would fail") - - // Check nothing is written to DB when it fails. - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(0, len(syncItems), "No items should be written if fail") - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(0, len(tagItems), "No items should be written if fail") - - entity2.ServerDefinedUniqueTag = aws.String("tag2") - entity3 := entity1 - entity3.ClientID = "client2" - entity3.ID = "id3" - entities = []*datastore.SyncEntity{&entity1, &entity2, &entity3} - suite.Require().NoError( - suite.dynamo.InsertSyncEntitiesWithServerTags(entities), - "InsertSyncEntitiesWithServerTags should succeed") - - // Scan DB and check all items are saved - syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3} - sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) - suite.Assert().Equal(syncItems, expectedSyncItems) - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - - // Check that Ctime and Mtime have been set, reset to zero value for subsequent - // tests - for i := 0; i < len(tagItems); i++ { - suite.Assert().NotNil(tagItems[i].Ctime) - suite.Assert().NotNil(tagItems[i].Mtime) - - tagItems[i].Ctime = nil - tagItems[i].Mtime = nil - } - - expectedTagItems := []datastore.ServerClientUniqueTagItem{ - {ClientID: "client1", ID: "Server#tag1"}, - {ClientID: "client1", ID: "Server#tag2"}, - {ClientID: "client2", ID: "Server#tag1"}, - } - sort.Sort(datastore.TagItemByClientIDID(tagItems)) - suite.Assert().Equal(expectedTagItems, tagItems) -} - -func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_Basic() { - // Insert three new items. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - entity2 := entity1 - entity2.ID = "id2" - entity3 := entity1 - entity3.ID = "id3" - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity3) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - // Check sync entities are inserted correctly in DB. - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2, entity3}) - - // Update without optional fields. - updateEntity1 := entity1 - updateEntity1.Version = aws.Int64(23456789) - updateEntity1.Mtime = aws.Int64(23456789) - updateEntity1.Folder = aws.Bool(true) - updateEntity1.Deleted = aws.Bool(true) - updateEntity1.DataTypeMtime = aws.String("123#23456789") - updateEntity1.Specifics = []byte{3, 4} - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Successful update should result in delete") - - // Update with optional fields. - updateEntity2 := updateEntity1 - updateEntity2.ID = "id2" - updateEntity2.Deleted = aws.Bool(false) - updateEntity2.Folder = aws.Bool(false) - updateEntity2.UniquePosition = []byte{5, 6} - updateEntity2.ParentID = aws.String("parentID") - updateEntity2.Name = aws.String("name") - updateEntity2.NonUniqueName = aws.String("non_unique_name") - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - - // Update with nil Folder and Deleted - updateEntity3 := updateEntity1 - updateEntity3.ID = "id3" - updateEntity3.Folder = nil - updateEntity3.Deleted = nil - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - // Reset these back to false because they will be the expected value in DB. - updateEntity3.Folder = aws.Bool(false) - updateEntity3.Deleted = aws.Bool(false) - - // Update entity again with the wrong old version as (version mismatch) - // should return false. - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().True(conflict, "Update with the same version should return conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - // suite.Assert().False(deleted, "Successful update should not result in delete") - - // Check sync entities are updated correctly in DB. - syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity1, updateEntity2, updateEntity3}) -} - -func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_HistoryType() { - // Insert a history item - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - ClientDefinedUniqueTag: aws.String("client_tag1"), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(963985), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - conflict, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - suite.Assert().False(conflict, "Successful insert should not have conflict") - - updateEntity1 := entity1 - updateEntity1.Version = aws.Int64(2) - updateEntity1.Folder = aws.Bool(true) - updateEntity1.Mtime = aws.Int64(24242424) - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - - // should still succeed with the same version number, - // since the version number should be ignored - updateEntity2 := updateEntity1 - updateEntity2.Mtime = aws.Int64(42424242) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) - suite.Require().NoError(err, "UpdateSyncEntity should not return an error") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - - updateEntity3 := entity1 - updateEntity3.Deleted = aws.Bool(true) - - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Successful update should result in delete") - - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - updateEntity3.ID = *updateEntity3.ClientDefinedUniqueTag - suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity3}) -} - -func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_ReuseClientTag() { - // Insert an item with client tag. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - ClientDefinedUniqueTag: aws.String("client_tag"), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - conflict, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - suite.Assert().False(conflict, "Successful insert should not have conflict") - - // Check a tag item is inserted. - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") - - // Update it to version 23456789. - updateEntity1 := entity1 - updateEntity1.Version = aws.Int64(23456789) - updateEntity1.Mtime = aws.Int64(23456789) - updateEntity1.Folder = aws.Bool(true) - updateEntity1.DataTypeMtime = aws.String("123#23456789") - updateEntity1.Specifics = []byte{3, 4} - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - - // Soft-delete the item with wrong version should get conflict. - updateEntity1.Deleted = aws.Bool(true) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().True(conflict, "Version mismatched update should have conflict") - suite.Assert().False(deleted, "Successful update should not result in delete") - - // Soft-delete the item with matched version. - updateEntity1.Version = aws.Int64(34567890) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Successful update should result in delete") - - // Check tag item is deleted. - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(0, len(tagItems), "Tag item should be deleted") - - // Insert another item with the same client tag again. - entity2 := entity1 - entity2.ID = "id2" - conflict, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - suite.Assert().False(conflict, "Successful insert should not have conflict") - - // Check a tag item is inserted. - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") -} - -func (suite *SyncEntityTestSuite) TestGetUpdatesForType() { - // Insert items for testing. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(true), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - - entity2 := entity1 - entity2.ID = "id2" - entity2.Folder = aws.Bool(false) - entity2.Mtime = aws.Int64(12345679) - entity2.DataTypeMtime = aws.String("123#12345679") - - entity3 := entity2 - entity3.ID = "id3" - entity3.DataType = aws.Int(124) - entity3.DataTypeMtime = aws.String("124#12345679") - - // non-expired item - entity4 := entity2 - entity4.ClientID = "client2" - entity4.ID = "id4" - entity4.ExpirationTime = aws.Int64(time.Now().Unix() + 300) - - // expired item - entity5 := entity2 - entity5.ClientID = "client2" - entity5.ID = "id5" - entity5.ExpirationTime = aws.Int64(time.Now().Unix() - 300) - - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity3) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity4) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity5) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - // Get all updates for type 123 and client1 using token = 0. - var token int64 - hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for type 124 and client1 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client1", 100, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity3}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for type 123 and client2 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client2", 100, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity4}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for type 124 and client2 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client2", 100, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(len(syncItems), 0) - suite.Assert().False(hasChangesRemaining) - - // Test maxSize will limit the return entries size, and hasChangesRemaining - // should be true when there are more updates available in the DB. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 1, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1}) - suite.Assert().True(hasChangesRemaining) - - // Test when num of query items equal to the limit, hasChangesRemaining should - // be true. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 2, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) - suite.Assert().True(hasChangesRemaining) - - // Test fetchFolders will remove folder items if false - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, false, "client1", 100, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for a type for a client using mtime of one item as token. - token = 12345678 - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) - suite.Assert().False(hasChangesRemaining) - - // Test batch is working correctly for over 100 items - err = datastoretest.ResetDynamoTable(suite.dynamo) - suite.Require().NoError(err, "Failed to reset table") - - expectedSyncItems := []datastore.SyncEntity{} - entity1 = datastore.SyncEntity{ - ClientID: "client1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - Specifics: []byte{1, 2}, - } - - mtime := utils.UnixMilli(time.Now()) - for i := 1; i <= 250; i++ { - mtime = mtime + 1 - entity := entity1 - entity.ID = "id" + strconv.Itoa(i) - entity.Mtime = aws.Int64(mtime) - entity.DataTypeMtime = aws.String("123#" + strconv.FormatInt(*entity.Mtime, 10)) - _, err := suite.dynamo.InsertSyncEntity(&entity) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - expectedSyncItems = append(expectedSyncItems, entity) - } - - // All items should be returned and sorted by Mtime. - token = 0 - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 300, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - sort.Sort(datastore.SyncEntityByMtime(expectedSyncItems)) - suite.Assert().Equal(syncItems, expectedSyncItems) - suite.Assert().False(hasChangesRemaining) - - // Test that when maxGUBatchSize is smaller than total updates, the first n - // items ordered by Mtime should be returned. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 200, true) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, expectedSyncItems[0:200]) - suite.Assert().True(hasChangesRemaining) -} - func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { nigoriSpecific := &sync_pb.NigoriSpecifics{} nigoriEntitySpecific := &sync_pb.EntitySpecifics_Nigori{Nigori: nigoriSpecific} @@ -702,6 +100,9 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { suite.Assert().NotEqual( dbEntity.ID, *pbEntity.IdString, "ID should be a server-generated ID and not equal to the passed IdString") + _, err = uuid.Parse(dbEntity.ID) + suite.Assert().NoError(err, "dbEntity.ID should be a valid UUID") + expectedDBEntity.ID = dbEntity.ID // Check Mtime and Ctime should be provided by the server if client does not @@ -790,9 +191,11 @@ func (suite *SyncEntityTestSuite) TestCreatePBSyncEntity() { uniquePositionBytes, err := proto.Marshal(uniquePosition) suite.Require().NoError(err, "Marshal unique position should succeed") + id, _ := uuid.NewV7() + dbEntity := datastore.SyncEntity{ ClientID: "client1", - ID: "id1", + ID: id.String(), ParentID: aws.String("parent_id"), Version: aws.Int64(10), Mtime: aws.Int64(12345678), @@ -838,6 +241,19 @@ func (suite *SyncEntityTestSuite) TestCreatePBSyncEntity() { suite.Require().NoError(err, "json.Marshal should succeed") suite.Assert().Equal(s1, s2) + // Ensure ID is the client tag for history items + expectedPBEntity.IdString = expectedPBEntity.ClientTagHash + *dbEntity.DataType = datastore.HistoryTypeID + pbEntity, err = datastore.CreatePBSyncEntity(&dbEntity) + suite.Require().NoError(err, "CreatePBSyncEntity should succeed") + + // Marshal to json to ignore protobuf internal fields when checking equality. + s1, err = json.Marshal(pbEntity) + suite.Require().NoError(err, "json.Marshal should succeed") + s2, err = json.Marshal(&expectedPBEntity) + suite.Require().NoError(err, "json.Marshal should succeed") + suite.Assert().Equal(s1, s2) + // Nil UniquePosition should be unmarshalled as nil without error. dbEntity.UniquePosition = nil pbEntity, err = datastore.CreatePBSyncEntity(&dbEntity) @@ -851,102 +267,6 @@ func (suite *SyncEntityTestSuite) TestCreatePBSyncEntity() { suite.Assert().Nil(pbEntity.Specifics) } -func (suite *SyncEntityTestSuite) TestDisableSyncChain() { - clientID := "client1" - id := "disabled_chain" - err := suite.dynamo.DisableSyncChain(clientID) - suite.Require().NoError(err, "DisableSyncChain should succeed") - e, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(1, len(e)) - suite.Assert().Equal(clientID, e[0].ClientID) - suite.Assert().Equal(id, e[0].ID) -} - -func (suite *SyncEntityTestSuite) TestIsSyncChainDisabled() { - clientID := "client1" - - disabled, err := suite.dynamo.IsSyncChainDisabled(clientID) - suite.Require().NoError(err, "IsSyncChainDisabled should succeed") - suite.Assert().Equal(false, disabled) - - err = suite.dynamo.DisableSyncChain(clientID) - suite.Require().NoError(err, "DisableSyncChain should succeed") - disabled, err = suite.dynamo.IsSyncChainDisabled(clientID) - suite.Require().NoError(err, "IsSyncChainDisabled should succeed") - suite.Assert().Equal(true, disabled) -} - -func (suite *SyncEntityTestSuite) TestClearServerData() { - // Test clear sync entities - entity := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - } - _, err := suite.dynamo.InsertSyncEntity(&entity) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - e, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(1, len(e)) - - e, err = suite.dynamo.ClearServerData(entity.ClientID) - suite.Require().NoError(err, "ClearServerData should succeed") - suite.Assert().Equal(1, len(e)) - - e, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(0, len(e)) - - // Test clear tagged items - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - ServerDefinedUniqueTag: aws.String("tag1"), - } - entity2 := entity1 - entity2.ID = "id2" - entity2.ServerDefinedUniqueTag = aws.String("tag2") - entities := []*datastore.SyncEntity{&entity1, &entity2} - suite.Require().NoError( - suite.dynamo.InsertSyncEntitiesWithServerTags(entities), - "InsertSyncEntitiesWithServerTags should succeed") - - e, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(2, len(e), "No items should be written if fail") - - t, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(2, len(t), "No items should be written if fail") - - e, err = suite.dynamo.ClearServerData(entity.ClientID) - suite.Require().NoError(err, "ClearServerData should succeed") - suite.Assert().Equal(4, len(e)) - - e, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(0, len(e), "No items should be written if fail") - - t, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(0, len(t), "No items should be written if fail") -} - func TestSyncEntityTestSuite(t *testing.T) { suite.Run(t, new(SyncEntityTestSuite)) } From 69d56f9654eeecf301388d1fdfd3fb15fee7d9bf Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Thu, 19 Sep 2024 17:26:42 -0700 Subject: [PATCH 12/19] Run command test suite for both Dynamo and SQL --- command/command.go | 12 ++++++++ command/command_test.go | 58 +++++++++++++++++++++++++++++++++---- command/item_count.go | 4 +-- datastore/item_count_sql.go | 9 +++--- datastore/sql_variations.go | 12 ++++---- 5 files changed, 76 insertions(+), 19 deletions(-) diff --git a/command/command.go b/command/command.go index 2f291ed4..58ee68ec 100644 --- a/command/command.go +++ b/command/command.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "fmt" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/brave/go-sync/cache" @@ -266,6 +267,8 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c continue } + createTime := time.Now() + // Check if ParentID is a client-generated ID which appears in previous // commit entries, if so, replace with corresponding server-generated ID. if entityToCommit.ParentID != nil { @@ -356,6 +359,15 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.IdString = aws.String(entityToCommit.ID) entryRsp.Version = entityToCommit.Version entryRsp.Mtime = entityToCommit.Mtime + + if time.Since(createTime) < time.Millisecond { + // To ensure that all entities are in perfect order (sorted by mtime), + // we should ensure that the mtime for each entity is unique. + // CreateDBSyncEntity sets the mtime to the current time. + // If processing the entity took less than a millisecond, + // wait a little longer. + time.Sleep(time.Millisecond - time.Since(createTime)) + } } err = dbHelpers.ItemCounts.save() diff --git a/command/command_test.go b/command/command_test.go index e14826d2..c28df45b 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -15,6 +15,8 @@ import ( "github.com/brave/go-sync/datastore" "github.com/brave/go-sync/datastore/datastoretest" "github.com/brave/go-sync/schema/protobuf/sync_pb" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" "github.com/stretchr/testify/suite" ) @@ -27,9 +29,16 @@ const ( type CommandTestSuite struct { suite.Suite - dynamoDB *datastore.Dynamo - cache *cache.Cache - sqlDB *datastore.SQLDB + storeInSQL bool + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB +} + +func NewCommandTestSuite(storeInSQL bool) *CommandTestSuite { + return &CommandTestSuite{ + storeInSQL: storeInSQL, + } } type PBSyncAttrs struct { @@ -59,6 +68,13 @@ func NewPBSyncAttrs(name *string, version *int64, deleted *bool, folder *bool, s } func (suite *CommandTestSuite) SetupSuite() { + var rollouts string + if suite.storeInSQL { + rollouts = strconv.Itoa(int(bookmarkType)) + "=1.0," + strconv.Itoa(int(nigoriType)) + "=1.0" + } + suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, rollouts) + datastore.Table = "client-entity-test-command" var err error suite.dynamoDB, err = datastore.NewDynamo(true) @@ -594,11 +610,32 @@ func assertTypeMtimeCacheValue(suite *CommandTestSuite, key string, mtime int64, func insertSyncEntitiesWithoutUpdateCache( suite *CommandTestSuite, entries []*sync_pb.SyncEntity, clientID string) (ret []*datastore.SyncEntity) { + var chainID *int64 + var tx *sqlx.Tx + if suite.storeInSQL { + var err error + tx, err = suite.sqlDB.DB.Beginx() + suite.Require().NoError(err, "should be able to begin transaction") + chainID, err = suite.sqlDB.GetAndLockChainID(tx, clientID) + } for _, entry := range entries { dbEntry, err := datastore.CreateDBSyncEntity(entry, nil, clientID, 1) suite.Require().NoError(err, "Create db entity from pb entity should succeed") - _, err = suite.dynamoDB.InsertSyncEntity(dbEntry) - suite.Require().NoError(err, "Insert sync entity should succeed") + + if suite.storeInSQL { + id, _ := uuid.NewV7() + dbEntry.ID = id.String() + dbEntry.ChainID = chainID + + conflict, err := suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{dbEntry}) + suite.Require().NoError(err, "Insert sync entity should succeed") + suite.Require().False(conflict, "Insert should not conflict") + + } else { + _, err = suite.dynamoDB.InsertSyncEntity(dbEntry) + suite.Require().NoError(err, "Insert sync entity should succeed") + } + val, err := suite.cache.Get(context.Background(), clientID+"#"+strconv.Itoa(*dbEntry.DataType), false) suite.Require().NoError(err, "Get from cache should succeed") @@ -606,6 +643,10 @@ func insertSyncEntitiesWithoutUpdateCache( "Cache should not be updated") ret = append(ret, dbEntry) } + if tx != nil { + err := tx.Commit() + suite.Require().NoError(err, "Commit transaction should succeed") + } return } @@ -821,5 +862,10 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch } func TestCommandTestSuite(t *testing.T) { - suite.Run(t, new(CommandTestSuite)) + t.Run("Dynamo", func(t *testing.T) { + suite.Run(t, NewCommandTestSuite(false)) + }) + t.Run("SQL", func(t *testing.T) { + suite.Run(t, NewCommandTestSuite(true)) + }) } diff --git a/command/item_count.go b/command/item_count.go index cea3aa80..c472fa20 100644 --- a/command/item_count.go +++ b/command/item_count.go @@ -97,9 +97,9 @@ func (itemCounts *ItemCounts) recordChange(dataType int, subtract bool, isStored } func (itemCounts *ItemCounts) sumCounts(historyOnly bool) int { - sum := itemCounts.dynamoItemCounts.SumHistoryCounts() + itemCounts.sqlTxNewHistoryCount + itemCounts.cacheNewHistoryCount + sum := itemCounts.dynamoItemCounts.SumHistoryCounts() + itemCounts.sqlItemCounts.HistoryItemCount + itemCounts.sqlTxNewHistoryCount + itemCounts.cacheNewHistoryCount if !historyOnly { - sum += itemCounts.dynamoItemCounts.ItemCount + itemCounts.sqlTxNewNormalCount + itemCounts.cacheNewNormalCount + sum += itemCounts.dynamoItemCounts.ItemCount + itemCounts.sqlItemCounts.NormalItemCount + itemCounts.sqlTxNewNormalCount + itemCounts.cacheNewNormalCount } return sum } diff --git a/datastore/item_count_sql.go b/datastore/item_count_sql.go index 95a5e059..08f40962 100644 --- a/datastore/item_count_sql.go +++ b/datastore/item_count_sql.go @@ -2,7 +2,6 @@ package datastore import ( "fmt" - "strconv" "github.com/jmoiron/sqlx" ) @@ -16,11 +15,11 @@ func (sqlDB *SQLDB) GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, e counts := SQLItemCounts{} err := tx.Get(&counts, ` SELECT - COUNT(*) FILTER (WHERE data_type != `+strconv.Itoa(HistoryTypeID)+`) normal_item_count, - COUNT(*) FILTER (WHERE data_type = `+strconv.Itoa(HistoryTypeID)+`) history_item_count + COUNT(*) FILTER (WHERE data_type != $1) AS normal_item_count, + COUNT(*) FILTER (WHERE data_type = $1) AS history_item_count FROM entities - WHERE chain_id = $1 - `, chainID) + WHERE chain_id = $2 AND deleted = false + `, HistoryTypeID, chainID) if err != nil { return nil, fmt.Errorf("failed to get item counts: %w", err) } diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go index 1c22a688..219fce76 100644 --- a/datastore/sql_variations.go +++ b/datastore/sql_variations.go @@ -9,8 +9,8 @@ import ( "strings" ) -const sqlSaveRolloutsEnvKey = "SQL_SAVE_ROLLOUTS" -const sqlMigrateRolloutsEnvKey = "SQL_MIGRATE_ROLLOUTS" +const SQLSaveRolloutsEnvKey = "SQL_SAVE_ROLLOUTS" +const SQLMigrateRolloutsEnvKey = "SQL_MIGRATE_ROLLOUTS" func VariationHashDecimal(input string) float32 { h := fnv.New32a() @@ -58,11 +58,11 @@ func parseRollouts(envKey string) (map[int]float32, error) { } func LoadSQLVariations() (*SQLVariations, error) { - sqlSaveRollouts, err := parseRollouts(sqlSaveRolloutsEnvKey) + sqlSaveRollouts, err := parseRollouts(SQLSaveRolloutsEnvKey) if err != nil { return nil, err } - sqlMigrateRollouts, err := parseRollouts(sqlMigrateRolloutsEnvKey) + sqlMigrateRollouts, err := parseRollouts(SQLMigrateRolloutsEnvKey) if err != nil { return nil, err } @@ -85,8 +85,8 @@ func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHa } func (sqlVariations *SQLVariations) GetStateDigest() string { - return sqlSaveRolloutsEnvKey + ":" + os.Getenv(sqlSaveRolloutsEnvKey) + ";" + - sqlMigrateRolloutsEnvKey + ":" + os.Getenv(sqlMigrateRolloutsEnvKey) + return SQLSaveRolloutsEnvKey + ":" + os.Getenv(SQLSaveRolloutsEnvKey) + ";" + + SQLMigrateRolloutsEnvKey + ":" + os.Getenv(SQLMigrateRolloutsEnvKey) } func (sqlDB *SQLDB) Variations() *SQLVariations { From 9d5f4fea1c09394ad2aa2092c7eb4f5f09ec9a84 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Thu, 19 Sep 2024 22:28:54 -0700 Subject: [PATCH 13/19] Add item count test --- command/command.go | 6 +- command/command_test.go | 3 + command/helpers.go | 10 +- command/item_count.go | 14 +- command/item_count_test.go | 272 ++++++++++++++++++++++++++++ datastore/item_count_dynamo_test.go | 14 +- datastore/item_count_sql.go | 7 +- datastore/sync_entity_sql_test.go | 9 + 8 files changed, 310 insertions(+), 25 deletions(-) create mode 100644 command/item_count_test.go diff --git a/command/command.go b/command/command.go index 58ee68ec..bdbc6ab8 100644 --- a/command/command.go +++ b/command/command.go @@ -295,7 +295,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } if !isUpdateOp { // Create - totalItemCount := dbHelpers.ItemCounts.sumCounts(false) + totalItemCount := dbHelpers.ItemCounts.SumCounts(false) if totalItemCount >= maxClientObjectQuota { rspType := sync_pb.CommitResponse_OVER_QUOTA entryRsp.ResponseType = &rspType @@ -303,7 +303,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c continue } - if !isHistoryRelatedItem || dbHelpers.ItemCounts.sumCounts(true) < maxClientHistoryObjectQuota { + if !isHistoryRelatedItem || dbHelpers.ItemCounts.SumCounts(true) < maxClientHistoryObjectQuota { // Insert all non-history items. For history items, ignore any items above history quoto // and lie to the client about the objects being synced instead of returning OVER_QUOTA // so the client can continue to sync other entities. @@ -370,7 +370,7 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } } - err = dbHelpers.ItemCounts.save() + err = dbHelpers.ItemCounts.Save() if err != nil { log.Error().Err(err).Msg("Get interim item counts failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR diff --git a/command/command_test.go b/command/command_test.go index c28df45b..f519348f 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -616,7 +616,10 @@ func insertSyncEntitiesWithoutUpdateCache( var err error tx, err = suite.sqlDB.DB.Beginx() suite.Require().NoError(err, "should be able to begin transaction") + defer tx.Rollback() + chainID, err = suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "should be able to get chain ID") } for _, entry := range entries { dbEntry, err := datastore.CreateDBSyncEntity(entry, nil, clientID, 1) diff --git a/command/helpers.go b/command/helpers.go index e46b64aa..407c9bd0 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -35,7 +35,7 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatasto var itemCounts *ItemCounts if initItemCounts { - itemCounts, err = getItemCounts(cache, dynamoDB, sqlDB, trx, clientID, *chainID) + itemCounts, err = GetItemCounts(cache, dynamoDB, sqlDB, trx, clientID, *chainID) if err != nil { trx.Rollback() return nil, err @@ -114,7 +114,7 @@ func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict boo conflict, err = h.dynamoDB.InsertSyncEntity(entity) } if err == nil && !conflict && (entity.Deleted == nil || !*entity.Deleted) { - if err = h.ItemCounts.recordChange(*entity.DataType, false, savedInSQL); err != nil { + if err = h.ItemCounts.RecordChange(*entity.DataType, false, savedInSQL); err != nil { return false, err } } @@ -159,7 +159,7 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in if oldEntity.Deleted == nil || !*oldEntity.Deleted { // If the stored entity was not already deleted, decrement the // Dynamo item count since we'll be migrating the entity to SQL. - if err = h.ItemCounts.recordChange(*entity.DataType, true, false); err != nil { + if err = h.ItemCounts.RecordChange(*entity.DataType, true, false); err != nil { return false, nil, err } } @@ -175,7 +175,7 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in if !conflict && (entity.Deleted == nil || !*entity.Deleted) { // If the new entity is not considered deleted, increment the // SQL interim count. - if err = h.ItemCounts.recordChange(*entity.DataType, false, true); err != nil { + if err = h.ItemCounts.RecordChange(*entity.DataType, false, true); err != nil { return false, nil, err } } @@ -188,7 +188,7 @@ func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion in } } if !conflict && deleted { - if err = h.ItemCounts.recordChange(*entity.DataType, true, shouldSaveInSQL); err != nil { + if err = h.ItemCounts.RecordChange(*entity.DataType, true, shouldSaveInSQL); err != nil { return false, nil, err } } diff --git a/command/item_count.go b/command/item_count.go index c472fa20..ef4ea2d6 100644 --- a/command/item_count.go +++ b/command/item_count.go @@ -22,7 +22,7 @@ type ItemCounts struct { sqlTxNewHistoryCount int } -func getItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { +func GetItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { dynamoItemCounts, err := dynamoDB.GetClientItemCount(clientID) if err != nil { return nil, err @@ -66,8 +66,8 @@ func (itemCounts *ItemCounts) updateInterimItemCounts(clear bool) error { return nil } -func (itemCounts *ItemCounts) recordChange(dataType int, subtract bool, isStoredInSQL bool) error { - isHistory := dataType == datastore.HistoryTypeID +func (itemCounts *ItemCounts) RecordChange(dataType int, subtract bool, isStoredInSQL bool) error { + isHistory := dataType == datastore.HistoryTypeID || dataType == datastore.HistoryDeleteDirectiveTypeID if isStoredInSQL { delta := 1 if subtract { @@ -88,15 +88,15 @@ func (itemCounts *ItemCounts) recordChange(dataType int, subtract bool, isStored return fmt.Errorf("failed to increment history cache count") } if isHistory { - itemCounts.cacheNewNormalCount = newCount - } else { itemCounts.cacheNewHistoryCount = newCount + } else { + itemCounts.cacheNewNormalCount = newCount } } return nil } -func (itemCounts *ItemCounts) sumCounts(historyOnly bool) int { +func (itemCounts *ItemCounts) SumCounts(historyOnly bool) int { sum := itemCounts.dynamoItemCounts.SumHistoryCounts() + itemCounts.sqlItemCounts.HistoryItemCount + itemCounts.sqlTxNewHistoryCount + itemCounts.cacheNewHistoryCount if !historyOnly { sum += itemCounts.dynamoItemCounts.ItemCount + itemCounts.sqlItemCounts.NormalItemCount + itemCounts.sqlTxNewNormalCount + itemCounts.cacheNewNormalCount @@ -104,7 +104,7 @@ func (itemCounts *ItemCounts) sumCounts(historyOnly bool) int { return sum } -func (itemCounts *ItemCounts) save() error { +func (itemCounts *ItemCounts) Save() error { err := itemCounts.updateInterimItemCounts(true) if err != nil { return fmt.Errorf("error getting interim item count: %w", err) diff --git a/command/item_count_test.go b/command/item_count_test.go new file mode 100644 index 00000000..09ad52e0 --- /dev/null +++ b/command/item_count_test.go @@ -0,0 +1,272 @@ +package command_test + +import ( + "context" + "testing" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/command" + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/suite" +) + +type ItemCountTestSuite struct { + suite.Suite + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB +} + +func (suite *ItemCountTestSuite) SetupSuite() { + var rollouts string + suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, rollouts) + + datastore.Table = "client-entity-test-command" + var err error + suite.dynamoDB, err = datastore.NewDynamo(true) + suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") + + suite.cache = cache.NewCache(cache.NewRedisClient()) +} + +func (suite *ItemCountTestSuite) SetupTest() { + suite.Require().NoError( + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") +} + +func (suite *ItemCountTestSuite) TearDownTest() { + suite.Require().NoError( + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") + suite.Require().NoError( + suite.cache.FlushAll(context.Background()), "Failed to clear cache") +} + +func (suite *ItemCountTestSuite) insertSyncEntity(tx *sqlx.Tx, itemCounts *command.ItemCounts, insertInSQL bool, dataType int, clientID string, chainID int64) *datastore.SyncEntity { + id, err := uuid.NewV7() + suite.Require().NoError(err, "Failed to generate UUID") + + entity := &datastore.SyncEntity{ + ChainID: &chainID, + ClientID: clientID, + ID: id.String(), + DataType: &dataType, + Version: &[]int64{1}[0], + Mtime: &[]int64{123}[0], + Ctime: &[]int64{123}[0], + Specifics: []byte{1, 2}, + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + ClientDefinedUniqueTag: &[]string{id.String()}[0], + DataTypeMtime: &[]string{"123#12345678"}[0], + } + + if insertInSQL { + conflict, err := suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{entity}) + suite.Require().NoError(err, "Failed to insert sync entity in SQL") + suite.Require().False(conflict, "Unexpected conflict when inserting sync entity in SQL") + } else { + conflict, err := suite.dynamoDB.InsertSyncEntity(entity) + suite.Require().NoError(err, "Failed to insert sync entity in DynamoDB") + suite.Require().False(conflict, "Unexpected conflict when inserting sync entity in DynamoDB") + } + suite.Require().NoError(itemCounts.RecordChange(dataType, false, insertInSQL), "Should be able record change") + return entity +} + +func (suite *ItemCountTestSuite) deleteSyncEntity(tx *sqlx.Tx, itemCounts *command.ItemCounts, deleteInSQL bool, entity *datastore.SyncEntity) { + *entity.Version = 2 + *entity.Deleted = true + if deleteInSQL { + conflict, deleted, err := suite.sqlDB.UpdateSyncEntity(tx, entity, 1) + suite.Require().NoError(err, "Failed to delete sync entity in SQL") + suite.Require().False(conflict, "Unexpected conflict when deleting sync entity in SQL") + suite.Require().True(deleted, "Expected entity to be marked as deleted in SQL") + } else { + conflict, deleted, err := suite.dynamoDB.UpdateSyncEntity(entity, 1) + suite.Require().NoError(err, "Failed to delete sync entity in DynamoDB") + suite.Require().False(conflict, "Unexpected conflict when deleting sync entity in DynamoDB") + suite.Require().True(deleted, "Expected entity to be marked as deleted in DynamoDB") + } + suite.Require().NoError(itemCounts.RecordChange(*entity.DataType, true, deleteInSQL), "Should be able to record change") +} + +func (suite *ItemCountTestSuite) TestPreloaded() { + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err) + + suite.Equal(0, itemCounts.SumCounts(false), "Expected initial sum of item counts to be zero") + suite.Equal(0, itemCounts.SumCounts(true), "Expected initial sum of item counts to be zero") +} + +func (suite *ItemCountTestSuite) TestInsertAndCountItems() { + clientID := "client1" + + // Start a new transaction for insertions + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for insertions") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + + // Insert items + suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, true, 124, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, 123, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID) + + suite.Equal(4, itemCounts.SumCounts(true), "Expected history total count of 4") + suite.Equal(7, itemCounts.SumCounts(false), "Expected total count of 7") + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + // Start a new transaction for counting + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for counting") + defer tx.Rollback() + + itemCounts, err = command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get item counts") + + suite.Equal(4, itemCounts.SumCounts(true), "Expected history total count of 4") + suite.Equal(7, itemCounts.SumCounts(false), "Expected total count of 7") + + clientID = "client2" + chainID, err = suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID for other client") + + otherItemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get item counts for other client") + + suite.Equal(0, otherItemCounts.SumCounts(true), "Expected history total count of 0 for other client") + suite.Equal(0, otherItemCounts.SumCounts(false), "Expected total count of 0 for other client") +} + +func (suite *ItemCountTestSuite) TestDeleteAfterInsertCommit() { + clientID := "client1" + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get initial item counts") + + var sqlEntitiesToDelete []*datastore.SyncEntity + var dynamoEntitiesToDelete []*datastore.SyncEntity + + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, 124, clientID, *chainID) + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID)) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryTypeID, clientID, *chainID) + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + for _, entity := range sqlEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, true, entity) + } + for _, entity := range dynamoEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, false, entity) + } + + suite.Equal(2, itemCounts.SumCounts(true), "Expected history count of 2 after deletions") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 after deletions") + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for final count") + defer tx.Rollback() + + itemCounts, err = command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get final item counts") + + suite.Equal(2, itemCounts.SumCounts(true), "Expected history count of 2 after deletions") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 after deletions") +} + +func (suite *ItemCountTestSuite) TestDeleteBeforeInsertCommit() { + clientID := "client1" + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get initial item counts") + + var sqlEntitiesToDelete []*datastore.SyncEntity + var dynamoEntitiesToDelete []*datastore.SyncEntity + + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, 124, clientID, *chainID) + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID)) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryTypeID, clientID, *chainID) + + for _, entity := range sqlEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, true, entity) + } + for _, entity := range dynamoEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, false, entity) + } + + // Check counts before commit + suite.Equal(2, itemCounts.SumCounts(true), "Expected SQL count of 2 before commit") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 before commit") + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + // Start a new transaction for final count + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for final count") + defer tx.Rollback() + + itemCounts, err = command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get final item counts") + + // Check counts after commit + suite.Equal(2, itemCounts.SumCounts(true), "Expected SQL count of 2 after commit") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 after commit") +} + +func TestItemCountTestSuite(t *testing.T) { + suite.Run(t, new(ItemCountTestSuite)) +} diff --git a/datastore/item_count_dynamo_test.go b/datastore/item_count_dynamo_test.go index 60255d7c..f7a62cba 100644 --- a/datastore/item_count_dynamo_test.go +++ b/datastore/item_count_dynamo_test.go @@ -9,29 +9,29 @@ import ( "github.com/stretchr/testify/suite" ) -type ItemCountTestSuite struct { +type ItemCountDynamoTestSuite struct { suite.Suite dynamo *datastore.Dynamo } -func (suite *ItemCountTestSuite) SetupSuite() { +func (suite *ItemCountDynamoTestSuite) SetupSuite() { datastore.Table = "client-entity-test-datastore" var err error suite.dynamo, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") } -func (suite *ItemCountTestSuite) SetupTest() { +func (suite *ItemCountDynamoTestSuite) SetupTest() { suite.Require().NoError( datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") } -func (suite *ItemCountTestSuite) TearDownTest() { +func (suite *ItemCountDynamoTestSuite) TearDownTest() { suite.Require().NoError( datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") } -func (suite *ItemCountTestSuite) TestGetClientItemCount() { +func (suite *ItemCountDynamoTestSuite) TestGetClientItemCount() { // Insert two items for test. items := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 5}, @@ -55,7 +55,7 @@ func (suite *ItemCountTestSuite) TestGetClientItemCount() { suite.Assert().Equal(count.ItemCount, 0) } -func (suite *ItemCountTestSuite) TestUpdateClientItemCount() { +func (suite *ItemCountDynamoTestSuite) TestUpdateClientItemCount() { items := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 1}, {ClientID: "client1", ID: "client1", ItemCount: 5}, @@ -85,5 +85,5 @@ func (suite *ItemCountTestSuite) TestUpdateClientItemCount() { } func TestItemCountTestSuite(t *testing.T) { - suite.Run(t, new(ItemCountTestSuite)) + suite.Run(t, new(ItemCountDynamoTestSuite)) } diff --git a/datastore/item_count_sql.go b/datastore/item_count_sql.go index 08f40962..d3831e0e 100644 --- a/datastore/item_count_sql.go +++ b/datastore/item_count_sql.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jmoiron/sqlx" + "github.com/lib/pq" ) type SQLItemCounts struct { @@ -15,11 +16,11 @@ func (sqlDB *SQLDB) GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, e counts := SQLItemCounts{} err := tx.Get(&counts, ` SELECT - COUNT(*) FILTER (WHERE data_type != $1) AS normal_item_count, - COUNT(*) FILTER (WHERE data_type = $1) AS history_item_count + COUNT(*) FILTER (WHERE NOT (data_type = ANY($1))) AS normal_item_count, + COUNT(*) FILTER (WHERE data_type = ANY($1)) AS history_item_count FROM entities WHERE chain_id = $2 AND deleted = false - `, HistoryTypeID, chainID) + `, pq.Array([]int{HistoryTypeID, HistoryDeleteDirectiveTypeID}), chainID) if err != nil { return nil, fmt.Errorf("failed to get item counts: %w", err) } diff --git a/datastore/sync_entity_sql_test.go b/datastore/sync_entity_sql_test.go index f7ccbedc..d988ae3e 100644 --- a/datastore/sync_entity_sql_test.go +++ b/datastore/sync_entity_sql_test.go @@ -42,6 +42,7 @@ func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") suite.Require().NoError(err, "GetAndLockChainID should succeed") @@ -57,6 +58,7 @@ func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { // Try to insert the same entity again tx, err = suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() id, _ = uuid.NewV7() entity.ID = id.String() @@ -84,6 +86,7 @@ func (suite *SyncEntitySQLTestSuite) TestHasItem() { tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") suite.Require().NoError(err, "GetAndLockChainID should succeed") @@ -119,6 +122,7 @@ func (suite *SyncEntitySQLTestSuite) TestUpdateSyncEntity() { tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") suite.Require().NoError(err, "GetAndLockChainID should succeed") @@ -229,6 +233,7 @@ func (suite *SyncEntitySQLTestSuite) TestGetUpdatesForType() { tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") suite.Require().NoError(err, "GetAndLockChainID should succeed") @@ -285,6 +290,7 @@ func (suite *SyncEntitySQLTestSuite) TestDeleteChain() { // Insert data for two chains tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() chainID1, err := suite.sqlDB.GetAndLockChainID(tx, "client1") suite.Require().NoError(err, "GetAndLockChainID should succeed for client1") @@ -303,6 +309,7 @@ func (suite *SyncEntitySQLTestSuite) TestDeleteChain() { // Delete chain for client1 tx, err = suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() err = suite.sqlDB.DeleteChain(tx, *chainID1) suite.Require().NoError(err, "DeleteChain should succeed") @@ -338,6 +345,7 @@ func (suite *SyncEntitySQLTestSuite) TestConcurrentGetAndLockChainID() { // Start first transaction tx1, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction 1 should succeed") + defer tx1.Rollback() // Get and lock chain ID in first transaction chainID1, err := suite.sqlDB.GetAndLockChainID(tx1, clientID) @@ -350,6 +358,7 @@ func (suite *SyncEntitySQLTestSuite) TestConcurrentGetAndLockChainID() { // Start second transaction tx2, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction 2 should succeed") + defer tx2.Rollback() stepChan <- true chainID2, err := suite.sqlDB.GetAndLockChainID(tx2, clientID) From fa45895cfe964af293a668c8c0645838789c7fe2 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Fri, 20 Sep 2024 17:05:02 -0600 Subject: [PATCH 14/19] Add migration and SQL variations test, improve other tests --- command/command_test.go | 236 +++++++++++++++------- command/item_count_test.go | 3 +- command/migrate_test.go | 314 ++++++++++++++++++++++++++++++ datastore/item_count_sql.go | 9 +- datastore/sql.go | 8 +- datastore/sql_variations_test.go | 57 ++++++ datastore/sync_entity_sql_test.go | 127 +++--------- 7 files changed, 573 insertions(+), 181 deletions(-) create mode 100644 command/migrate_test.go create mode 100644 datastore/sql_variations_test.go diff --git a/command/command_test.go b/command/command_test.go index f519348f..5b54db9c 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -4,12 +4,15 @@ import ( "context" "encoding/binary" "encoding/json" + "fmt" "sort" "strconv" "strings" "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/expression" "github.com/brave/go-sync/cache" "github.com/brave/go-sync/command" "github.com/brave/go-sync/datastore" @@ -17,16 +20,26 @@ import ( "github.com/brave/go-sync/schema/protobuf/sync_pb" "github.com/google/uuid" "github.com/jmoiron/sqlx" + "github.com/lib/pq" "github.com/stretchr/testify/suite" ) const ( - clientID string = "client" - bookmarkType int32 = 32904 - nigoriType int32 = 47745 - cacheGUID string = "cache_guid" + testClientID string = "client" + bookmarkType int32 = 32904 + nigoriType int32 = 47745 + cacheGUID string = "cache_guid" + testDynamoTable = "client-entity-test-command" ) +func buildRolloutConfigString(dataTypes []int32) string { + var configParts []string + for _, dataType := range dataTypes { + configParts = append(configParts, fmt.Sprintf("%d=1.0", dataType)) + } + return strings.Join(configParts, ",") +} + type CommandTestSuite struct { suite.Suite storeInSQL bool @@ -70,12 +83,11 @@ func NewPBSyncAttrs(name *string, version *int64, deleted *bool, folder *bool, s func (suite *CommandTestSuite) SetupSuite() { var rollouts string if suite.storeInSQL { - rollouts = strconv.Itoa(int(bookmarkType)) + "=1.0," + strconv.Itoa(int(nigoriType)) + "=1.0" + rollouts = buildRolloutConfigString([]int32{bookmarkType, nigoriType}) } suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) - suite.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, rollouts) - datastore.Table = "client-entity-test-command" + datastore.Table = testDynamoTable var err error suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") @@ -93,6 +105,9 @@ func (suite *CommandTestSuite) SetupTest() { } func (suite *CommandTestSuite) TearDownTest() { + isEmpty, err := verifyNoDataInOtherDB(suite.storeInSQL, suite.dynamoDB, suite.sqlDB) + suite.Require().NoError(err, "Empty table verification should succeed") + suite.Require().True(isEmpty, "Other datastore should be empty") suite.Require().NoError( datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") suite.Require().NoError( @@ -147,16 +162,28 @@ func getClientToServerCommitMsg(entries []*sync_pb.SyncEntity) *sync_pb.ClientTo } } -func getMarker(suite *CommandTestSuite, tokens []int64) []*sync_pb.DataTypeProgressMarker { - types := []int32{nigoriType, bookmarkType} // hard-coded types used in tests. - suite.Assert().Equal(len(types), len(tokens)) +type MarkerTokens struct { + Nigori *int64 + Bookmark *int64 +} + +func getMarker(tokens MarkerTokens) []*sync_pb.DataTypeProgressMarker { marker := []*sync_pb.DataTypeProgressMarker{} - for i, token := range tokens { - tokenBytes := make([]byte, binary.MaxVarintLen64) - binary.PutVarint(tokenBytes, token) - marker = append(marker, &sync_pb.DataTypeProgressMarker{ - DataTypeId: aws.Int32(types[i]), Token: tokenBytes}) + + createMarker := func(tokenPtr *int64, dataTypeID int32) { + if tokenPtr != nil { + tokenBytes := make([]byte, binary.MaxVarintLen64) + binary.PutVarint(tokenBytes, *tokenPtr) + marker = append(marker, &sync_pb.DataTypeProgressMarker{ + DataTypeId: aws.Int32(dataTypeID), + Token: tokenBytes, + }) + } } + + createMarker(tokens.Nigori, nigoriType) + createMarker(tokens.Bookmark, bookmarkType) + return marker } @@ -175,12 +202,15 @@ func getClientToServerGUMsg(marker []*sync_pb.DataTypeProgressMarker, } } -func getTokensFromNewMarker(suite *CommandTestSuite, newMarker []*sync_pb.DataTypeProgressMarker) (int64, int64) { +func getTokensFromNewMarker(suite *CommandTestSuite, newMarker []*sync_pb.DataTypeProgressMarker) MarkerTokens { nigoriToken, n := binary.Varint(newMarker[0].Token) suite.Assert().Greater(n, 0) bookmarkToken, n := binary.Varint(newMarker[1].Token) suite.Assert().Greater(n, 0) - return nigoriToken, bookmarkToken + return MarkerTokens{ + Nigori: &nigoriToken, + Bookmark: &bookmarkToken, + } } func assertCommonResponse(suite *CommandTestSuite, rsp *sync_pb.ClientToServerResponse, isCommit bool) { @@ -242,7 +272,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { // Commit and check response. suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -257,12 +287,16 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { } // GetUpdates with token 0 should get all of them. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) + msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -286,7 +320,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) @@ -302,13 +336,12 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { // GetUpdates again with previous returned mtimes and check the result, it // should include update items and newly commit items. - nigoriToken, bookmarkToken := getTokensFromNewMarker(suite, newMarker) - marker = getMarker(suite, []int64{nigoriToken, bookmarkToken}) + marker = getMarker(getTokensFromNewMarker(suite, newMarker)) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -333,7 +366,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { msg = getClientToServerCommitMsg(entries) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -343,13 +376,12 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { } // GetUpdates again with previous returned tokens should return 0 updates. - nigoriToken, bookmarkToken = getTokensFromNewMarker(suite, newMarker) - marker = getMarker(suite, []int64{nigoriToken, bookmarkToken}) + marker = getMarker(getTokensFromNewMarker(suite, newMarker)) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -360,13 +392,17 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { func (suite *CommandTestSuite) TestHandleClientToServerMessage_NewClient() { // Prepare input message for NEW_CLIENT get updates request. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) + msg := getClientToServerGUMsg( marker, sync_pb.SyncEnums_NEW_CLIENT, true, nil) rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -410,7 +446,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_GUBatchSize() { // Commit and check response. suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -434,7 +470,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -459,7 +495,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -480,7 +516,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -497,7 +533,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -517,7 +553,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -539,7 +575,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo msg := getClientToServerCommitMsg([]*sync_pb.SyncEntity{child0}) rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -568,7 +604,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(6, len(rsp.Commit.Entryresponse)) @@ -578,12 +614,15 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo // Get updates to check if child's parent ID is replaced with the server // generated ID of its parent. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(6, len(rsp.GetUpdates.Entries)) @@ -665,7 +704,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(3, len(rsp.Commit.Entryresponse)) @@ -683,10 +722,10 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba } // Latest mtime of each type in the commit should be stored in the cache. - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Successful commit should write the latest mtime into cache") - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(nigoriType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(nigoriType)), latestNigoriMtime, "Successful commit should write the latest mtime into cache") @@ -697,29 +736,32 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba getCommitEntity("id4_bookmark", 0, false, getBookmarkSpecifics()), getCommitEntity("id5_nigori", 0, false, getNigoriSpecifics()), }, - clientID) + testClientID) // GU request with the same or newer token should be short circuited, so // should return no updates. - marker := getMarker(suite, []int64{latestNigoriMtime, latestBookmarkMtime + 1}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(latestNigoriMtime), + Bookmark: aws.Int64(latestBookmarkMtime + 1), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Assert().Equal(0, len(rsp.GetUpdates.Entries)) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "cache is not updated when short circuited") - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(nigoriType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(nigoriType)), latestNigoriMtime, "cache is not updated when short circuited") // Manually update cache for our DB insert. latestBookmarkMtime = *dbEntries[0].Mtime latestNigoriMtime = *dbEntries[1].Mtime - suite.cache.SetTypeMtime(context.Background(), clientID, int(bookmarkType), latestBookmarkMtime) - suite.cache.SetTypeMtime(context.Background(), clientID, int(nigoriType), latestNigoriMtime) + suite.cache.SetTypeMtime(context.Background(), testClientID, int(bookmarkType), latestBookmarkMtime) + suite.cache.SetTypeMtime(context.Background(), testClientID, int(nigoriType), latestNigoriMtime) // Commit another entry and check if cache is updated. entries = []*sync_pb.SyncEntity{ @@ -729,7 +771,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -737,25 +779,28 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba suite.Assert().Equal(commitSuccess, *entryRsp.ResponseType) latestBookmarkMtime = *entryRsp.Mtime - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Successful commit should update the cache") // Send GU with an old token will get updates immediately. // Check the cache value again, should be the same as the latest mtime in rsp. - marker = getMarker(suite, []int64{latestNigoriMtime - 1, latestBookmarkMtime - 1}) + marker = getMarker(MarkerTokens{ + Nigori: aws.Int64(latestNigoriMtime - 1), + Bookmark: aws.Int64(latestBookmarkMtime - 1), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Assert().Equal(2, len(rsp.GetUpdates.Entries)) suite.Assert().Equal(latestNigoriMtime, *rsp.GetUpdates.Entries[0].Mtime) suite.Assert().Equal(latestBookmarkMtime, *rsp.GetUpdates.Entries[1].Mtime) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Cached token should be equal to latest mtime") - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(nigoriType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(nigoriType)), latestNigoriMtime, "Cached token should be equal to latest mtime") } @@ -768,14 +813,14 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Sk rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) commitSuccess := sync_pb.CommitResponse_SUCCESS suite.Assert().Equal(commitSuccess, *rsp.Commit.Entryresponse[0].ResponseType) latestBookmarkMtime := *rsp.Commit.Entryresponse[0].Mtime - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Commit should write the latest mtime into cache") @@ -787,20 +832,23 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Sk []*sync_pb.SyncEntity{ getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), }, - clientID) + testClientID) // Check that we will receive the manually inserted item from DB immediately. - marker := getMarker(suite, []int64{0, latestBookmarkMtime}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(latestBookmarkMtime), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(1, len(rsp.GetUpdates.Entries)) suite.Require().Equal(dbEntries[0].Mtime, rsp.GetUpdates.Entries[0].Mtime) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), *dbEntries[0].Mtime, "Successful commit should update the cache") } @@ -814,7 +862,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -825,45 +873,93 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch suite.Assert().NotEqual(latestBookmarkMtime, *entryRsp.Mtime) latestBookmarkMtime = *entryRsp.Mtime } - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Commit should write the latest mtime into cache") // Send a GU with batch size set to 1, changesRemaining in rsp should be 1 // and cache should not be updated. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) clientBatch := int32(2) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, true, &clientBatch) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(2, len(rsp.GetUpdates.Entries)) suite.Require().Equal(int64(0), *rsp.GetUpdates.ChangesRemaining) mtime := *rsp.GetUpdates.Entries[0].Mtime - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "cache should not be updated when changes remaining = 1") // Send a second GU with changesRemaining in rsp = 0 and check cache is now // updated. - marker = getMarker(suite, []int64{0, mtime}) + marker = getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(mtime), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(1, len(rsp.GetUpdates.Entries)) suite.Require().Equal(int64(0), *rsp.GetUpdates.ChangesRemaining) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "cache should be updated when changes remaining = 0") } +func getDatastoreCount(checkSQL bool, dynamoDB *datastore.Dynamo, sqlDB *datastore.SQLDB, dataTypes []int32) (int64, error) { + var count int64 + if !checkSQL { + filt := expression.Name("DataType").Equal(expression.Value(dataTypes[0])) + for _, dataType := range dataTypes[1:] { + filt = filt.Or(expression.Name("DataType").Equal(expression.Value(dataType))) + } + + expr, err := expression.NewBuilder().WithFilter(filt).Build() + if err != nil { + return 0, err + } + + input := &dynamodb.ScanInput{ + TableName: aws.String(datastore.Table), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + FilterExpression: expr.Filter(), + } + result, err := dynamoDB.Scan(input) + if err != nil { + return 0, err + } + count = *result.Count + } else { + query := "SELECT COUNT(*) FROM entities WHERE data_type = ANY($1)" + err := sqlDB.QueryRow(query, pq.Array(dataTypes)).Scan(&count) + if err != nil { + return 0, err + } + } + return count, nil +} + +func verifyNoDataInOtherDB(storeInSQL bool, dynamoDB *datastore.Dynamo, sqlDB *datastore.SQLDB) (bool, error) { + count, err := getDatastoreCount(!storeInSQL, dynamoDB, sqlDB, []int32{nigoriType, bookmarkType}) + if err != nil { + return false, err + } + return count == 0, nil +} + func TestCommandTestSuite(t *testing.T) { t.Run("Dynamo", func(t *testing.T) { suite.Run(t, NewCommandTestSuite(false)) diff --git a/command/item_count_test.go b/command/item_count_test.go index 09ad52e0..2ca6472a 100644 --- a/command/item_count_test.go +++ b/command/item_count_test.go @@ -106,7 +106,7 @@ func (suite *ItemCountTestSuite) TestPreloaded() { chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") suite.Require().NoError(err, "Failed to get chain ID") - itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, testClientID, *chainID) suite.Require().NoError(err) suite.Equal(0, itemCounts.SumCounts(false), "Expected initial sum of item counts to be zero") @@ -125,6 +125,7 @@ func (suite *ItemCountTestSuite) TestInsertAndCountItems() { suite.Require().NoError(err, "Failed to get chain ID") itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get item counts") // Insert items suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID) diff --git a/command/migrate_test.go b/command/migrate_test.go new file mode 100644 index 00000000..38dc92e0 --- /dev/null +++ b/command/migrate_test.go @@ -0,0 +1,314 @@ +package command_test + +import ( + "context" + "math" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/command" + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/brave/go-sync/schema/protobuf/sync_pb" + "github.com/stretchr/testify/suite" +) + +type CommandMigrateTestSuite struct { + suite.Suite + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB +} + +func (suite *CommandMigrateTestSuite) SetupSuite() { + datastore.Table = testDynamoTable + var err error + suite.dynamoDB, err = datastore.NewDynamo(true) + suite.Require().NoError(err, "Failed to get dynamoDB session") + + suite.cache = cache.NewCache(cache.NewRedisClient()) +} + +type ExpectedCounts struct { + SQLNigori int64 + SQLBookmark int64 + DynamoNigori int64 + DynamoBookmark int64 +} + +func (suite *CommandMigrateTestSuite) assertDatastoreCounts(expected ExpectedCounts) { + sqlNigoriCount, err := getDatastoreCount(true, suite.dynamoDB, suite.sqlDB, []int32{nigoriType}) + suite.Require().NoError(err, "Failed to get SQL nigori count") + + sqlBookmarkCount, err := getDatastoreCount(true, suite.dynamoDB, suite.sqlDB, []int32{bookmarkType}) + suite.Require().NoError(err, "Failed to get SQL bookmark count") + + dynamoNigoriCount, err := getDatastoreCount(false, suite.dynamoDB, suite.sqlDB, []int32{nigoriType}) + suite.Require().NoError(err, "Failed to get DynamoDB nigori count") + + dynamoBookmarkCount, err := getDatastoreCount(false, suite.dynamoDB, suite.sqlDB, []int32{bookmarkType}) + suite.Require().NoError(err, "Failed to get DynamoDB bookmark count") + + suite.Assert().Equal(expected.SQLNigori, sqlNigoriCount, "SQL nigori count mismatch") + suite.Assert().Equal(expected.SQLBookmark, sqlBookmarkCount, "SQL bookmark count mismatch") + suite.Assert().Equal(expected.DynamoNigori, dynamoNigoriCount, "DynamoDB nigori count mismatch") + suite.Assert().Equal(expected.DynamoBookmark, dynamoBookmarkCount, "DynamoDB bookmark count mismatch") +} + +func (suite *CommandMigrateTestSuite) assertSQLMigrationStatus(dataType int32, checkForFullMigration bool, shouldExist bool) { + var count int + query := ` + SELECT COUNT(*) + FROM dynamo_migration_statuses + WHERE data_type = $1` + + if checkForFullMigration { + query += ` AND earliest_mtime IS NULL` + } + + err := suite.sqlDB.QueryRow(query, dataType).Scan(&count) + + var expectedCount int + if shouldExist { + expectedCount = 1 + } + + suite.Require().NoError(err, "Failed to query dynamo_migration_statuses") + suite.Assert().Equal(expectedCount, count, "Migration status row count should match") +} + +func (suite *CommandMigrateTestSuite) createSQLDB(migrateDataTypes []int32) { + rollouts := buildRolloutConfigString(migrateDataTypes) + suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateChunkSizeEnvKey, "2") + suite.T().Setenv(datastore.SQLMigrateUpdateIntervalEnvKey, "1") + + isFirstRun := suite.sqlDB == nil + + var err error + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") + + if isFirstRun { + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") + } +} + +func (suite *CommandMigrateTestSuite) SetupTest() { + suite.Require().NoError( + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") +} + +func (suite *CommandMigrateTestSuite) TearDownTest() { + suite.Require().NoError( + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") + suite.Require().NoError( + suite.cache.FlushAll(context.Background()), "Failed to clear cache") + suite.sqlDB = nil +} + +func (suite *CommandMigrateTestSuite) sendMessageAndAssertEmptyResponse(msg *sync_pb.ClientToServerMessage) { + rsp := &sync_pb.ClientToServerResponse{} + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + suite.Assert().Equal(sync_pb.SyncEnums_SUCCESS, *rsp.ErrorCode, "errorCode should match") + suite.Assert().NotNil(rsp.GetUpdates) + suite.Assert().Empty(rsp.GetUpdates.Entries) +} + +func (suite *CommandMigrateTestSuite) TestBasicMigrate() { + suite.createSQLDB([]int32{}) + entries := []*sync_pb.SyncEntity{ + getCommitEntity("id1_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id3_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id4_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id5_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id6_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id7_nigori", 0, false, getNigoriSpecifics()), + } + msg := getClientToServerCommitMsg(entries) + rsp := &sync_pb.ClientToServerResponse{} + + // Commit and check response. + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + // GetUpdates should return nothing. + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(math.MaxInt64 - 1000), + Bookmark: aws.Int64(math.MaxInt64 - 1000), + }) + + msg = getClientToServerGUMsg( + marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) + suite.sendMessageAndAssertEmptyResponse(msg) + + isSQLEmpty, err := verifyNoDataInOtherDB(false, suite.dynamoDB, suite.sqlDB) + suite.Require().NoError(err, "Empty database verification should succeed") + suite.Assert().True(isSQLEmpty, "SQL database should be empty") + + suite.createSQLDB([]int32{nigoriType, bookmarkType}) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 2, + SQLBookmark: 0, + DynamoNigori: 2, + DynamoBookmark: 3, + }) + suite.assertSQLMigrationStatus(bookmarkType, false, false) + suite.assertSQLMigrationStatus(nigoriType, false, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 0, + DynamoNigori: 0, + DynamoBookmark: 3, + }) + suite.assertSQLMigrationStatus(bookmarkType, false, false) + suite.assertSQLMigrationStatus(nigoriType, false, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 2, + DynamoNigori: 0, + DynamoBookmark: 1, + }) + suite.assertSQLMigrationStatus(bookmarkType, false, true) + suite.assertSQLMigrationStatus(nigoriType, true, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 3, + DynamoNigori: 0, + DynamoBookmark: 0, + }) + suite.assertSQLMigrationStatus(bookmarkType, true, true) + suite.assertSQLMigrationStatus(nigoriType, true, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 3, + DynamoNigori: 0, + DynamoBookmark: 0, + }) + // fully migrated + suite.assertSQLMigrationStatus(bookmarkType, true, true) + suite.assertSQLMigrationStatus(nigoriType, true, true) +} + +func (suite *CommandMigrateTestSuite) TestBookmarkOnlyMigration() { + suite.createSQLDB([]int32{}) + entries := []*sync_pb.SyncEntity{ + getCommitEntity("id1_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id3_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id4_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id5_nigori", 0, false, getNigoriSpecifics()), + } + msg := getClientToServerCommitMsg(entries) + rsp := &sync_pb.ClientToServerResponse{} + + // Commit initial entities + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + // Enable migration for bookmarks only + suite.createSQLDB([]int32{bookmarkType}) + + // GetUpdates message + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(math.MaxInt64 - 1000), + Bookmark: aws.Int64(math.MaxInt64 - 1000), + }) + msg = getClientToServerGUMsg(marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) + + // Initial counts + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 0, + SQLBookmark: 0, + DynamoNigori: 2, + DynamoBookmark: 3, + }) + + // Migrate bookmarks + for i := 0; i < 4; i++ { + suite.sendMessageAndAssertEmptyResponse(msg) + if i == 0 { + suite.assertSQLMigrationStatus(bookmarkType, false, true) + } + } + + // Final counts + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 0, + SQLBookmark: 3, + DynamoNigori: 2, + DynamoBookmark: 0, + }) + + suite.assertSQLMigrationStatus(bookmarkType, true, true) + suite.assertSQLMigrationStatus(nigoriType, false, false) +} + +func (suite *CommandMigrateTestSuite) TestMigrateDisabled() { + suite.createSQLDB([]int32{}) + entries := []*sync_pb.SyncEntity{ + getCommitEntity("id1_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id3_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id4_nigori", 0, false, getNigoriSpecifics()), + } + msg := getClientToServerCommitMsg(entries) + rsp := &sync_pb.ClientToServerResponse{} + + // Commit initial entities + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + // GetUpdates message + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(math.MaxInt64 - 1000), + Bookmark: aws.Int64(math.MaxInt64 - 1000), + }) + msg = getClientToServerGUMsg(marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) + + // Initial counts + initialCounts := ExpectedCounts{ + SQLNigori: 0, + SQLBookmark: 0, + DynamoNigori: 2, + DynamoBookmark: 2, + } + suite.assertDatastoreCounts(initialCounts) + + // Send multiple GetUpdates messages + for i := 0; i < 5; i++ { + suite.sendMessageAndAssertEmptyResponse(msg) + + // Assert that counts haven't changed + suite.assertDatastoreCounts(initialCounts) + } + + suite.assertSQLMigrationStatus(bookmarkType, false, false) + suite.assertSQLMigrationStatus(nigoriType, false, false) +} + +// test migration of only one type + +func TestCommandMigrateTestSuite(t *testing.T) { + suite.Run(t, new(CommandMigrateTestSuite)) +} diff --git a/datastore/item_count_sql.go b/datastore/item_count_sql.go index d3831e0e..d203912b 100644 --- a/datastore/item_count_sql.go +++ b/datastore/item_count_sql.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/jmoiron/sqlx" - "github.com/lib/pq" ) type SQLItemCounts struct { @@ -16,11 +15,11 @@ func (sqlDB *SQLDB) GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, e counts := SQLItemCounts{} err := tx.Get(&counts, ` SELECT - COUNT(*) FILTER (WHERE NOT (data_type = ANY($1))) AS normal_item_count, - COUNT(*) FILTER (WHERE data_type = ANY($1)) AS history_item_count + COUNT(*) FILTER (WHERE data_type NOT IN ($1, $2)) AS normal_item_count, + COUNT(*) FILTER (WHERE data_type IN ($1, $2)) AS history_item_count FROM entities - WHERE chain_id = $2 AND deleted = false - `, pq.Array([]int{HistoryTypeID, HistoryDeleteDirectiveTypeID}), chainID) + WHERE chain_id = $3 AND deleted = false + `, HistoryTypeID, HistoryDeleteDirectiveTypeID, chainID) if err != nil { return nil, fmt.Errorf("failed to get item counts: %w", err) } diff --git a/datastore/sql.go b/datastore/sql.go index ccd01534..fafd842c 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -22,8 +22,8 @@ const ( // Default value is defined here, since the .env file will not be loaded // because tests are run in the subdirectories where the tests live defaultSQLTestURL = "postgres://sync:password@localhost:5434/testing?sslmode=disable" - sqlMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" - sqlMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" + SQLMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" + SQLMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" defaultMigrateUpdateInterval = 4 defaultMigrateChunkSize = 100 ) @@ -89,8 +89,8 @@ func NewSQLDB(isTesting bool) (*SQLDB, error) { variations.Ready = true } - migrateInterval, _ := strconv.Atoi(os.Getenv(sqlMigrateUpdateIntervalEnvKey)) - migrateChunkSize, _ := strconv.Atoi(os.Getenv(sqlMigrateChunkSizeEnvKey)) + migrateInterval, _ := strconv.Atoi(os.Getenv(SQLMigrateUpdateIntervalEnvKey)) + migrateChunkSize, _ := strconv.Atoi(os.Getenv(SQLMigrateChunkSizeEnvKey)) if migrateInterval <= 0 { migrateInterval = defaultMigrateUpdateInterval diff --git a/datastore/sql_variations_test.go b/datastore/sql_variations_test.go new file mode 100644 index 00000000..3f629fed --- /dev/null +++ b/datastore/sql_variations_test.go @@ -0,0 +1,57 @@ +package datastore_test + +import ( + "os" + "testing" + + "github.com/brave/go-sync/datastore" + "github.com/stretchr/testify/suite" +) + +type SQLVariationsSuite struct { + suite.Suite + variations *datastore.SQLVariations +} + +func (s *SQLVariationsSuite) SetupTest() { + s.T().Setenv(datastore.SQLSaveRolloutsEnvKey, "1=0.5,2=0.75") + s.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, "1=0.25,3=1.0") + var err error + s.variations, err = datastore.LoadSQLVariations() + s.Require().NoError(err) +} + +func (s *SQLVariationsSuite) TestShouldSaveToSQL() { + s.True(s.variations.ShouldSaveToSQL(1, 0.4)) + s.False(s.variations.ShouldSaveToSQL(1, 0.6)) + s.True(s.variations.ShouldSaveToSQL(2, 0.7)) + s.False(s.variations.ShouldSaveToSQL(2, 0.8)) + s.False(s.variations.ShouldSaveToSQL(3, 0.5)) // Non-existent key +} + +func (s *SQLVariationsSuite) TestShouldMigrateToSQL() { + s.True(s.variations.ShouldMigrateToSQL(1, 0.2)) + s.False(s.variations.ShouldMigrateToSQL(1, 0.3)) + s.True(s.variations.ShouldMigrateToSQL(3, 0.9)) + s.False(s.variations.ShouldMigrateToSQL(2, 0.5)) // Non-existent key +} + +func (s *SQLVariationsSuite) TestVariationHashDecimal() { + hash1 := datastore.VariationHashDecimal("test1") + hash2 := datastore.VariationHashDecimal("test2") + s.NotEqual(hash1, hash2) + s.Less(hash1, float32(1.0)) + s.Less(hash2, float32(1.0)) + s.GreaterOrEqual(hash1, float32(0.0)) + s.GreaterOrEqual(hash2, float32(0.0)) +} + +func (s *SQLVariationsSuite) TestParseRolloutsError() { + os.Setenv(datastore.SQLSaveRolloutsEnvKey, "invalid=format") + _, err := datastore.LoadSQLVariations() + s.Error(err) +} + +func TestSQLVariationsSuite(t *testing.T) { + suite.Run(t, new(SQLVariationsSuite)) +} diff --git a/datastore/sync_entity_sql_test.go b/datastore/sync_entity_sql_test.go index d988ae3e..de10e3de 100644 --- a/datastore/sync_entity_sql_test.go +++ b/datastore/sync_entity_sql_test.go @@ -26,19 +26,23 @@ func (suite *SyncEntitySQLTestSuite) SetupTest() { suite.Require().NoError(err, "Failed to reset SQL tables") } -func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { +func createSyncEntity(dataType int32, mtime int64) datastore.SyncEntity { id, _ := uuid.NewV7() - entity := datastore.SyncEntity{ - ID: id.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345678}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{1, 2}, - ClientDefinedUniqueTag: &[]string{"tag1"}[0], + return datastore.SyncEntity{ + ID: id.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &mtime, + DataType: &[]int{int(dataType)}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2, 3}, } +} + +func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { + entity := createSyncEntity(123, 12345678) + entity.ClientDefinedUniqueTag = &[]string{"test1"}[0] tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") @@ -60,7 +64,7 @@ func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { suite.Require().NoError(err, "Begin transaction should succeed") defer tx.Rollback() - id, _ = uuid.NewV7() + id, _ := uuid.NewV7() entity.ID = id.String() conflict, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) suite.Require().NoError(err, "InsertSyncEntity should succeed") @@ -71,18 +75,8 @@ func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { } func (suite *SyncEntitySQLTestSuite) TestHasItem() { - id, _ := uuid.NewV7() - entity := datastore.SyncEntity{ - ID: id.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345678}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{1, 2}, - ClientDefinedUniqueTag: &[]string{"tag1"}[0], - } + entity := createSyncEntity(123, 12345678) + entity.ClientDefinedUniqueTag = &[]string{"test1"}[0] tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") @@ -108,17 +102,8 @@ func (suite *SyncEntitySQLTestSuite) TestHasItem() { } func (suite *SyncEntitySQLTestSuite) TestUpdateSyncEntity() { - id, _ := uuid.NewV7() - entity := datastore.SyncEntity{ - ID: id.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345678}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{1, 2}, - } + entity := createSyncEntity(123, 12345678) + entity.Specifics = []byte{1, 2} tx, err := suite.sqlDB.Beginx() suite.Require().NoError(err, "Begin transaction should succeed") @@ -184,51 +169,11 @@ func (suite *SyncEntitySQLTestSuite) TestUpdateSyncEntity() { } func (suite *SyncEntitySQLTestSuite) TestGetUpdatesForType() { - id1, _ := uuid.NewV7() - id2, _ := uuid.NewV7() - id3, _ := uuid.NewV7() - id4, _ := uuid.NewV7() entities := []datastore.SyncEntity{ - { - ID: id1.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345678}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{1, 2}, - }, - { - ID: id2.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345679}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{true}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{3, 4}, - }, - { - ID: id3.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345679}[0], - Mtime: &[]int64{12345680}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{true}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{3, 4}, - }, - { - ID: id4.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345680}[0], - Mtime: &[]int64{12345680}[0], - DataType: &[]int{124}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{5, 6}, - }, + createSyncEntity(123, 12345678), + createSyncEntity(123, 12345679), + createSyncEntity(123, 12345680), + createSyncEntity(124, 12345680), } tx, err := suite.sqlDB.Beginx() @@ -264,28 +209,8 @@ func (suite *SyncEntitySQLTestSuite) TestGetUpdatesForType() { } func (suite *SyncEntitySQLTestSuite) TestDeleteChain() { - id1, _ := uuid.NewV7() - id2, _ := uuid.NewV7() - entity1 := datastore.SyncEntity{ - ID: id1.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345678}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{1, 2}, - } - entity2 := datastore.SyncEntity{ - ID: id2.String(), - Version: &[]int64{1}[0], - Ctime: &[]int64{12345678}[0], - Mtime: &[]int64{12345678}[0], - DataType: &[]int{123}[0], - Folder: &[]bool{false}[0], - Deleted: &[]bool{false}[0], - Specifics: []byte{3, 4}, - } + entity1 := createSyncEntity(123, 12345678) + entity2 := createSyncEntity(123, 12345678) // Insert data for two chains tx, err := suite.sqlDB.Beginx() From efdf198557210ed702596489f09c04df95f9d618 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Fri, 20 Sep 2024 18:03:14 -0600 Subject: [PATCH 15/19] Update SQL related comments --- command/helpers.go | 26 +++++++++++++++++++++++++- command/item_count.go | 4 ++++ datastore/dynamo_migration_status.go | 2 ++ datastore/interfaces.go | 5 +++-- datastore/item_count_sql.go | 1 + datastore/sql.go | 15 +++++++++++---- datastore/sql_variations.go | 13 +++++++++++++ datastore/sync_entity_sql.go | 6 ++++++ 8 files changed, 65 insertions(+), 7 deletions(-) diff --git a/command/helpers.go b/command/helpers.go index 407c9bd0..52fb62a8 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -3,6 +3,7 @@ package command import ( "fmt" "math/rand/v2" + "time" "github.com/brave/go-sync/cache" "github.com/brave/go-sync/datastore" @@ -31,6 +32,7 @@ func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatasto trx.Rollback() return nil, err } + // Get this value to determine if the client should be included in SQL rollouts variationHashDecimal := datastore.VariationHashDecimal(clientID) var itemCounts *ItemCounts @@ -73,11 +75,14 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo return false, nil, nil } if h.SQLDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { + // Get the earliest mtime for entities migrated from Dynamo to SQL, if available. dynamoMigrationStatuses, err := h.SQLDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) if err != nil { return false, nil, err } + // First, get all entities from Dynamo that are past the given token. + // Only query up until the earliest mtime within SQL. if migrationStatus := dynamoMigrationStatuses[dataType]; migrationStatus == nil || (migrationStatus.EarliestMtime != nil && *migrationStatus.EarliestMtime > token) { var earliestMtime *int64 if migrationStatus != nil { @@ -90,6 +95,8 @@ func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bo curMaxSize -= len(syncEntities) } + // Then get all entities from SQL. We can append the items to syncEntities because + // all Dynamo entities are guaranteed to be older (by mtime) than SQL entities. if curMaxSize > 0 { sqlHasChangesRemaining, sqlSyncEntities, err := h.SQLDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) if err != nil { @@ -124,6 +131,8 @@ func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict boo func getMigratedEntityID(entity *datastore.SyncEntity) (string, error) { id := entity.ID if *entity.DataType == datastore.HistoryTypeID { + // In Dynamo, History entities are stored with the client tag as the ID. + // Since the SQL table uses a UUID for the id, generate a new ID here. newID, err := uuid.NewV7() if err != nil { return "", err @@ -203,6 +212,7 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data return nil, nil } var applicableDataTypes []int + // Get all applicable data types for migration for a given chain. for _, dataType := range dataTypes { if !h.SQLDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { continue @@ -213,6 +223,8 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data return nil, nil } + // Get the earliest mtime for entities that were already migrated. + // We use this so we can apply a max mtime filter to our Dynamo query. migrationStatuses, err := h.SQLDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) if err != nil { return nil, err @@ -227,6 +239,8 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data } migrationStatus := migrationStatuses[dataType] if migrationStatus != nil && migrationStatus.EarliestMtime == nil { + // earliest_mtime = null in migration status means that all entities + // for the data type have already been migrated. skip this data type continue } @@ -234,13 +248,17 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data if migrationStatus != nil { earliestMtime = migrationStatus.EarliestMtime } else { + now := time.Now().UnixMilli() migrationStatus = &datastore.MigrationStatus{ ChainID: h.ChainID, DataType: dataType, - EarliestMtime: nil, + EarliestMtime: &now, } } + // Query the entities in descending order, so we insert the latest items first. + // If the total entity count exceeds the chunk size, then we only want to insert a subset of the latest + // entities from Dynamo for this particular update. hasChangesRemaining, syncEntities, err := h.dynamoDB.GetUpdatesForType(dataType, nil, earliestMtime, true, h.clientID, currLimit, false) if err != nil { return nil, err @@ -249,8 +267,12 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data currLimit -= len(syncEntities) if !hasChangesRemaining { + // No entities from Dynamo remaining. mark earliest_mtime as null to + // indicate that all entities have been moved over. migrationStatus.EarliestMtime = nil } else if len(syncEntities) > 0 { + // Since the dynamo query was sorted in descending order, the last item + // will contain the earliest_mtime in the slice. if lastItem := &syncEntities[len(syncEntities)-1]; lastItem.Mtime != nil { migrationStatus.EarliestMtime = lastItem.Mtime } @@ -266,6 +288,8 @@ func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*data return nil, err } if migratedEntityID != syncEntity.ID { + // Only apply new entity ID to the entity that will be inserted, + // and NOT the original entity which will be deleted later on. entityClone := syncEntity entityClone.ID = migratedEntityID newEntity = &entityClone diff --git a/command/item_count.go b/command/item_count.go index ef4ea2d6..87485e1a 100644 --- a/command/item_count.go +++ b/command/item_count.go @@ -22,6 +22,7 @@ type ItemCounts struct { sqlTxNewHistoryCount int } +// GetItemCounts returns consolidated item counts from Dynamo and SQL func GetItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { dynamoItemCounts, err := dynamoDB.GetClientItemCount(clientID) if err != nil { @@ -66,6 +67,7 @@ func (itemCounts *ItemCounts) updateInterimItemCounts(clear bool) error { return nil } +// RecordChange updates the interim count according to the addition of deletion of an entity func (itemCounts *ItemCounts) RecordChange(dataType int, subtract bool, isStoredInSQL bool) error { isHistory := dataType == datastore.HistoryTypeID || dataType == datastore.HistoryDeleteDirectiveTypeID if isStoredInSQL { @@ -96,6 +98,7 @@ func (itemCounts *ItemCounts) RecordChange(dataType int, subtract bool, isStored return nil } +// SumCounts returns of count of entities for a chain func (itemCounts *ItemCounts) SumCounts(historyOnly bool) int { sum := itemCounts.dynamoItemCounts.SumHistoryCounts() + itemCounts.sqlItemCounts.HistoryItemCount + itemCounts.sqlTxNewHistoryCount + itemCounts.cacheNewHistoryCount if !historyOnly { @@ -104,6 +107,7 @@ func (itemCounts *ItemCounts) SumCounts(historyOnly bool) int { return sum } +// Save persists the interim counts to Dynamo func (itemCounts *ItemCounts) Save() error { err := itemCounts.updateInterimItemCounts(true) if err != nil { diff --git a/datastore/dynamo_migration_status.go b/datastore/dynamo_migration_status.go index aa09e26c..cc217862 100644 --- a/datastore/dynamo_migration_status.go +++ b/datastore/dynamo_migration_status.go @@ -13,6 +13,7 @@ type MigrationStatus struct { EarliestMtime *int64 `db:"earliest_mtime"` } +// GetDynamoMigrationStatuses retrieves migration statuses for specified data types func (sqlDB *SQLDB) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (dataTypeToStatusMap map[int]*MigrationStatus, err error) { dataTypeToStatusMap = make(map[int]*MigrationStatus) @@ -34,6 +35,7 @@ func (sqlDB *SQLDB) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataT return dataTypeToStatusMap, nil } +// UpdateDynamoMigrationStatuses updates migration statuses in the database func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) error { _, err := tx.NamedExec(` INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) diff --git a/datastore/interfaces.go b/datastore/interfaces.go index 654690f2..787f7251 100644 --- a/datastore/interfaces.go +++ b/datastore/interfaces.go @@ -57,9 +57,10 @@ type SQLDatastore interface { Beginx() (*sqlx.Tx, error) // Variations returns the SQLVariations utility Variations() *SQLVariations - // MigrateIntervalPercent returns migration update interval percentage + // MigrateIntervalPercent returns the percentage of update requests that will perform + // a chunked migration MigrateIntervalPercent() float32 - // MigrateChunkSize returns the max entity count for each migration chunk + // MigrateChunkSize returns the max chunk size of migration attempts MigrateChunkSize() int // DeleteChain removes a chain and its associated data from the database DeleteChain(tx *sqlx.Tx, chainID int64) error diff --git a/datastore/item_count_sql.go b/datastore/item_count_sql.go index d203912b..53ddae51 100644 --- a/datastore/item_count_sql.go +++ b/datastore/item_count_sql.go @@ -11,6 +11,7 @@ type SQLItemCounts struct { HistoryItemCount int `db:"history_item_count"` } +// GetItemCounts returns the counts of items in the SQL database for a given chain ID func (sqlDB *SQLDB) GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, error) { counts := SQLItemCounts{} err := tx.Get(&counts, ` diff --git a/datastore/sql.go b/datastore/sql.go index fafd842c..45adcaef 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -21,11 +21,15 @@ const ( sqlTestURLEnvKey = "SQL_TEST_DATABASE_URL" // Default value is defined here, since the .env file will not be loaded // because tests are run in the subdirectories where the tests live - defaultSQLTestURL = "postgres://sync:password@localhost:5434/testing?sslmode=disable" + defaultSQLTestURL = "postgres://sync:password@localhost:5434/testing?sslmode=disable" + // SQLMigrateUpdateIntervalEnvKey is the env var name used to define the frequency + // of chunked migration within "get update" requests SQLMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" - SQLMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" - defaultMigrateUpdateInterval = 4 - defaultMigrateChunkSize = 100 + // SQLMigrateChunkSizeEnvKey is the env var name used to define the max migration + // chunk size + SQLMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" + defaultMigrateUpdateInterval = 4 + defaultMigrateChunkSize = 100 ) //go:embed migrations/* @@ -104,10 +108,13 @@ func NewSQLDB(isTesting bool) (*SQLDB, error) { return &wrappedDB, nil } +// MigrateIntervalPercent returns the percentage of update requests that will perform +// a chunked migration func (db *SQLDB) MigrateIntervalPercent() float32 { return db.migrateIntervalPercent } +// MigrateChunkSize returns the max chunk size of migration attempts func (db *SQLDB) MigrateChunkSize() int { return db.migrateChunkSize } diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go index 219fce76..c648e0ff 100644 --- a/datastore/sql_variations.go +++ b/datastore/sql_variations.go @@ -9,9 +9,17 @@ import ( "strings" ) +// SQLSaveRolloutsEnvKey defines the data types and rollout percentages for saving +// new items into the SQL database, instead of Dynamo. const SQLSaveRolloutsEnvKey = "SQL_SAVE_ROLLOUTS" + +// SQLSaveRolloutsEnvKey defines the data types and rollout percentages for periodic +// chunked migration from Dynamo to SQL. const SQLMigrateRolloutsEnvKey = "SQL_MIGRATE_ROLLOUTS" +// VariationHashDecimal returns a decimal from 0.0 to 1.0 for a given client ID. +// The decimal is typically checked against a rollout percentage to determine if a user +// should be included in a rollout. func VariationHashDecimal(input string) float32 { h := fnv.New32a() h.Write([]byte(input)) @@ -21,6 +29,7 @@ func VariationHashDecimal(input string) float32 { return float32(hashValue) / math.MaxUint32 } +// SQLVariations handles SQL variation rollout functions type SQLVariations struct { sqlSaveRollouts map[int]float32 sqlMigrateRollouts map[int]float32 @@ -57,6 +66,7 @@ func parseRollouts(envKey string) (map[int]float32, error) { return rollouts, nil } +// LoadSQLVariations creates a SQLVariations struct, configured by env vars func LoadSQLVariations() (*SQLVariations, error) { sqlSaveRollouts, err := parseRollouts(SQLSaveRolloutsEnvKey) if err != nil { @@ -74,16 +84,19 @@ func LoadSQLVariations() (*SQLVariations, error) { }, nil } +// ShouldSaveToSQL returns true if a client should save the entity to the SQL database for a given data type func (sqlVariations *SQLVariations) ShouldSaveToSQL(dataType int, variationHashDecimal float32) bool { rolloutPercent, exists := sqlVariations.sqlSaveRollouts[dataType] return exists && variationHashDecimal <= rolloutPercent } +// ShouldMigrateToSQL returns true if chunked migration from Dynamo to SQL should occur for a given data type func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHashDecimal float32) bool { rolloutPercent, exists := sqlVariations.sqlMigrateRollouts[dataType] return exists && variationHashDecimal <= rolloutPercent } +// GetStateDigest returns a string that combines the env vars related to variations func (sqlVariations *SQLVariations) GetStateDigest() string { return SQLSaveRolloutsEnvKey + ":" + os.Getenv(SQLSaveRolloutsEnvKey) + ";" + SQLMigrateRolloutsEnvKey + ":" + os.Getenv(SQLMigrateRolloutsEnvKey) diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go index 4f4bc100..7d7508b5 100644 --- a/datastore/sync_entity_sql.go +++ b/datastore/sync_entity_sql.go @@ -33,6 +33,7 @@ func buildInsertQuery() string { joinedSetValues + ` WHERE entities.deleted = true` } +// InsertSyncEntities inserts multiple sync entities into the database func (sqlDB *SQLDB) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (conflict bool, err error) { res, err := tx.NamedExec(sqlDB.insertQuery, entities) if err != nil { @@ -47,6 +48,7 @@ func (sqlDB *SQLDB) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (con return int(rowsAffected) != len(entities), nil } +// HasItem checks if an item exists in the database func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (exists bool, err error) { err = tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND client_defined_unique_tag = $2)", chainID, clientTag).Scan(&exists) if err != nil { @@ -55,6 +57,7 @@ func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (exist return exists, nil } +// UpdateSyncEntity updates a sync entity in the database func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { var idCondition string if *entity.DataType == HistoryTypeID { @@ -112,6 +115,7 @@ func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion return rowsAffected == 0, entity.Deleted != nil && *entity.Deleted, nil } +// GetAndLockChainID retrieves and locks a chain ID for a given client ID func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *int64, err error) { // Get chain ID and lock for updates clientIDBytes, err := hex.DecodeString(clientID) @@ -144,6 +148,7 @@ func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *in return &id, nil } +// GetUpdatesForType retrieves updates for a specific data type func (sqlDB *SQLDB) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { var additionalCondition string if !fetchFolders { @@ -158,6 +163,7 @@ func (sqlDB *SQLDB) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int return len(entities) == maxSize, entities, nil } +// DeleteChain removes a chain and its associated data from the database func (sqlDB *SQLDB) DeleteChain(tx *sqlx.Tx, chainID int64) error { _, err := tx.Exec(`DELETE FROM chains WHERE id = $1`, chainID) if err != nil { From 156289300967c3b6a9acb9cbfc6e66175303c7e7 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Wed, 9 Oct 2024 13:25:23 -0700 Subject: [PATCH 16/19] Reorder columns for data alignment --- datastore/migrations/20240904202925_init.up.sql | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/datastore/migrations/20240904202925_init.up.sql b/datastore/migrations/20240904202925_init.up.sql index a06dd6e3..31333505 100644 --- a/datastore/migrations/20240904202925_init.up.sql +++ b/datastore/migrations/20240904202925_init.up.sql @@ -1,36 +1,36 @@ CREATE TABLE chains ( id BIGSERIAL PRIMARY KEY, - client_id BYTEA NOT NULL, last_usage_time TIMESTAMP NOT NULL, + client_id BYTEA NOT NULL, UNIQUE (client_id) ); CREATE TABLE dynamo_migration_statuses ( chain_id BIGINT REFERENCES chains(id) ON DELETE CASCADE, - data_type INTEGER, -- null earliest_mtime indicates that all entities have been migrated earliest_mtime BIGINT, + data_type INTEGER, PRIMARY KEY (chain_id, data_type) ); CREATE TABLE entities ( id UUID, chain_id BIGINT NOT NULL REFERENCES chains(id) ON DELETE CASCADE, - data_type INTEGER NOT NULL, ctime BIGINT NOT NULL, mtime BIGINT NOT NULL, + version BIGINT NOT NULL, + data_type INTEGER NOT NULL, specifics BYTEA STORAGE EXTERNAL NOT NULL, - deleted BOOL NOT NULL, client_defined_unique_tag TEXT STORAGE PLAIN, server_defined_unique_tag TEXT STORAGE PLAIN, - folder BOOLEAN, - version BIGINT NOT NULL, name TEXT STORAGE PLAIN, originator_cache_guid TEXT STORAGE PLAIN, originator_client_item_id TEXT STORAGE PLAIN, parent_id TEXT STORAGE PLAIN, non_unique_name TEXT STORAGE PLAIN, unique_position BYTEA STORAGE PLAIN, + folder BOOLEAN, + deleted BOOLEAN NOT NULL, PRIMARY KEY (id, chain_id), UNIQUE (chain_id, client_defined_unique_tag) ); From c75155521c27351a909aaddf8331c3658ff785e9 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Tue, 8 Oct 2024 17:36:44 -0700 Subject: [PATCH 17/19] Add partitioning --- .../migrations/20240904202925_init.down.sql | 4 ++++ datastore/migrations/20240904202925_init.up.sql | 16 +++++++++++++++- docker-compose.yml | 4 +++- pg.Dockerfile | 9 +++++++++ 4 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 pg.Dockerfile diff --git a/datastore/migrations/20240904202925_init.down.sql b/datastore/migrations/20240904202925_init.down.sql index 9eb17df2..072e325f 100644 --- a/datastore/migrations/20240904202925_init.down.sql +++ b/datastore/migrations/20240904202925_init.down.sql @@ -1,3 +1,7 @@ DROP TABLE entities; DROP TABLE dynamo_migration_statuses; DROP TABLE chains; + +DROP EXTENSION pg_partman; +DROP EXTENSION pg_cron; +DROP SCHEMA partman CASCADE; diff --git a/datastore/migrations/20240904202925_init.up.sql b/datastore/migrations/20240904202925_init.up.sql index 31333505..9485753b 100644 --- a/datastore/migrations/20240904202925_init.up.sql +++ b/datastore/migrations/20240904202925_init.up.sql @@ -1,3 +1,6 @@ +CREATE SCHEMA IF NOT EXISTS partman; +CREATE EXTENSION IF NOT EXISTS pg_partman SCHEMA partman; + CREATE TABLE chains ( id BIGSERIAL PRIMARY KEY, last_usage_time TIMESTAMP NOT NULL, @@ -33,5 +36,16 @@ CREATE TABLE entities ( deleted BOOLEAN NOT NULL, PRIMARY KEY (id, chain_id), UNIQUE (chain_id, client_defined_unique_tag) -); +) +PARTITION BY RANGE (chain_id); CREATE INDEX entities_chain_id_data_type_mtime_idx ON entities (chain_id, data_type, mtime); + +SELECT partman.create_parent( + p_parent_table := 'public.entities', + p_control := 'chain_id', + p_interval := '3500' +); + +CREATE EXTENSION IF NOT EXISTS pg_cron; + +SELECT cron.schedule('@hourly', $$CALL partman.run_maintenance_proc()$$); diff --git a/docker-compose.yml b/docker-compose.yml index 53e5161e..237de6a5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -76,7 +76,8 @@ services: networks: - sync postgres: - image: public.ecr.aws/docker/library/postgres:16 + build: + dockerfile: pg.Dockerfile ports: - "5434:5432" environment: @@ -84,5 +85,6 @@ services: - POSTGRES_PASSWORD=password networks: - sync + command: ["postgres", "-c", "shared_preload_libraries=pg_cron"] volumes: - "./misc/create_additional_dbs.sql:/docker-entrypoint-initdb.d/create_additional_dbs.sql" diff --git a/pg.Dockerfile b/pg.Dockerfile new file mode 100644 index 00000000..0a7d3475 --- /dev/null +++ b/pg.Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/docker/library/postgres:16 + +RUN apt update && apt install -y git make gcc postgresql-server-dev-16 + +RUN git clone https://github.com/pgpartman/pg_partman +RUN cd pg_partman && make NO_BGW=1 install + +RUN git clone https://github.com/citusdata/pg_cron +RUN cd pg_cron && make && make install From 61bcf2122ffc871a5fefc0a7253ea47dffc268f7 Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Thu, 17 Oct 2024 16:29:48 -0700 Subject: [PATCH 18/19] Add RDS IAM authentication support --- .../migrations/20240904202925_init.up.sql | 52 ++++++++--- datastore/rds.go | 93 +++++++++++++++++++ datastore/sql.go | 26 +++++- go.mod | 14 +++ go.sum | 28 ++++++ 5 files changed, 196 insertions(+), 17 deletions(-) create mode 100644 datastore/rds.go diff --git a/datastore/migrations/20240904202925_init.up.sql b/datastore/migrations/20240904202925_init.up.sql index 9485753b..001ecb9d 100644 --- a/datastore/migrations/20240904202925_init.up.sql +++ b/datastore/migrations/20240904202925_init.up.sql @@ -23,28 +23,52 @@ CREATE TABLE entities ( mtime BIGINT NOT NULL, version BIGINT NOT NULL, data_type INTEGER NOT NULL, - specifics BYTEA STORAGE EXTERNAL NOT NULL, - client_defined_unique_tag TEXT STORAGE PLAIN, - server_defined_unique_tag TEXT STORAGE PLAIN, - name TEXT STORAGE PLAIN, - originator_cache_guid TEXT STORAGE PLAIN, - originator_client_item_id TEXT STORAGE PLAIN, - parent_id TEXT STORAGE PLAIN, - non_unique_name TEXT STORAGE PLAIN, - unique_position BYTEA STORAGE PLAIN, + specifics BYTEA NOT NULL, + client_defined_unique_tag TEXT, + server_defined_unique_tag TEXT, + name TEXT, + originator_cache_guid TEXT, + originator_client_item_id TEXT, + parent_id TEXT, + non_unique_name TEXT, + unique_position BYTEA, folder BOOLEAN, deleted BOOLEAN NOT NULL, PRIMARY KEY (id, chain_id), UNIQUE (chain_id, client_defined_unique_tag) ) PARTITION BY RANGE (chain_id); + +ALTER TABLE entities ALTER specifics SET STORAGE EXTERNAL; +ALTER TABLE entities ALTER client_defined_unique_tag SET STORAGE PLAIN; +ALTER TABLE entities ALTER server_defined_unique_tag SET STORAGE PLAIN; +ALTER TABLE entities ALTER name SET STORAGE PLAIN; +ALTER TABLE entities ALTER originator_cache_guid SET STORAGE PLAIN; +ALTER TABLE entities ALTER originator_client_item_id SET STORAGE PLAIN; +ALTER TABLE entities ALTER parent_id SET STORAGE PLAIN; +ALTER TABLE entities ALTER non_unique_name SET STORAGE PLAIN; +ALTER TABLE entities ALTER unique_position SET STORAGE PLAIN; + CREATE INDEX entities_chain_id_data_type_mtime_idx ON entities (chain_id, data_type, mtime); -SELECT partman.create_parent( - p_parent_table := 'public.entities', - p_control := 'chain_id', - p_interval := '3500' -); +DO $$ +BEGIN + -- for vanilla postgres + PERFORM partman.create_parent( + p_parent_table := 'public.entities', + p_control := 'chain_id', + p_interval := '3500', + p_type := 'range' + ); +EXCEPTION WHEN OTHERS THEN + -- for Aurora + PERFORM partman.create_parent( + p_parent_table := 'public.entities', + p_control := 'chain_id', + p_interval := '3500', + p_type := 'native' + ); +END $$; CREATE EXTENSION IF NOT EXISTS pg_cron; diff --git a/datastore/rds.go b/datastore/rds.go new file mode 100644 index 00000000..be3c2eda --- /dev/null +++ b/datastore/rds.go @@ -0,0 +1,93 @@ +package datastore + +import ( + "context" + "database/sql/driver" + "fmt" + "net/url" + "os" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/rds/auth" + "github.com/jackc/pgx/stdlib" +) + +const defaultRegion = "us-west-2" + +const ( + rdsPortKey = "RDS_DATABASE_PORT" + rdsHostKey = "RDS_WRITER_ENDPOINT" + rdsUserKey = "RDS_USER" + rdsDbNameKey = "RDS_DATABASE_NAME" + regionKey = "AWS_REGION" +) + +type rdsConnector struct { + hostAndPort string + dbName string + user string + token string + region string + tokenCacheTime time.Time + mu sync.Mutex +} + +func newRDSConnector() *rdsConnector { + port := os.Getenv(rdsPortKey) + host := os.Getenv(rdsHostKey) + user := os.Getenv(rdsUserKey) + dbName := os.Getenv(rdsDbNameKey) + region := os.Getenv(regionKey) + + if region == "" { + region = defaultRegion + } + hostAndPort := fmt.Sprintf("%s:%s", host, port) + return &rdsConnector{ + hostAndPort: hostAndPort, + dbName: dbName, + user: user, + region: region, + } +} + +func (c *rdsConnector) getConnectionString(ctx context.Context) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if time.Since(c.tokenCacheTime) > 10*time.Minute { + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return "", fmt.Errorf("failed to load AWS config") + } + + token, err := auth.BuildAuthToken( + ctx, c.hostAndPort, c.region, c.user, cfg.Credentials) + if err != nil { + return "", fmt.Errorf("failed to create authentication token: %w", err) + } + c.token = url.QueryEscape(token) + c.tokenCacheTime = time.Now() + } + + return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", c.user, c.token, c.hostAndPort, c.dbName), nil +} + +func (c *rdsConnector) Connect(ctx context.Context) (driver.Conn, error) { + connStr, err := c.getConnectionString(ctx) + if err != nil { + return nil, err + } + + return stdlib.GetDefaultDriver().Open(connStr) +} + +func (c *rdsConnector) Driver() driver.Driver { + return c +} + +func (c *rdsConnector) Open(_ string) (driver.Conn, error) { + return nil, fmt.Errorf("open method unsupported") +} diff --git a/datastore/sql.go b/datastore/sql.go index 45adcaef..7899921d 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -1,6 +1,8 @@ package datastore import ( + "context" + "database/sql" "embed" "errors" "fmt" @@ -11,6 +13,7 @@ import ( // import postgres package for migrations _ "github.com/golang-migrate/migrate/v4/database/postgres" "github.com/golang-migrate/migrate/v4/source/iofs" + // import pgx so it can be used with sqlx _ "github.com/jackc/pgx/stdlib" "github.com/jmoiron/sqlx" @@ -58,12 +61,22 @@ func NewSQLDB(isTesting bool) (*SQLDB, error) { envKey = sqlURLEnvKey } + var rdsConnector *rdsConnector + if os.Getenv(rdsHostKey) != "" { + rdsConnector = newRDSConnector() + } + sqlURL := os.Getenv(envKey) - if sqlURL == "" { + if rdsConnector != nil { + sqlURL, err = rdsConnector.getConnectionString(context.Background()) + if err != nil { + return nil, err + } + } else if sqlURL == "" { if isTesting { sqlURL = defaultSQLTestURL } else { - return nil, fmt.Errorf("%s must be defined", envKey) + return nil, fmt.Errorf("%s or %s must be defined", envKey, rdsHostKey) } } iofsDriver, err := iofs.New(migrationFiles, "migrations") @@ -82,9 +95,16 @@ func NewSQLDB(isTesting bool) (*SQLDB, error) { if !errors.Is(err, migrate.ErrNoChange) { return nil, fmt.Errorf("Failed to run migrations: %w", err) } + err = nil } - db, err := sqlx.Connect("pgx", sqlURL) + var db *sqlx.DB + if rdsConnector != nil { + baseDB := sql.OpenDB(rdsConnector) + db = sqlx.NewDb(baseDB, "pgx") + } else { + db, err = sqlx.Connect("pgx", sqlURL) + } if err != nil { return nil, fmt.Errorf("Failed to connect to SQL DB: %w", err) } diff --git a/go.mod b/go.mod index 07d23ce6..4921e38a 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,20 @@ require ( require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index 1ab98501..776fc785 100644 --- a/go.sum +++ b/go.sum @@ -31,6 +31,34 @@ github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbV github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= +github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= +github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.21 h1:wRH9E07mfYqZ1EPphNTUIkrZ/7wcbZAGcjhrBlkWy4c= +github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.21/go.mod h1:6m/MDzT+aFxaIo46f2MYV4d+qG9J9keLlHL0qKnQFgA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae h1:CGUFAtMXAsGajLeobq6ep+5wREYS+lepZSdPckY+Ba0= From 74abc2cb6c923c0cf7b528d030d742b61f198c3a Mon Sep 17 00:00:00 2001 From: Darnell Andries Date: Mon, 21 Oct 2024 20:36:11 -0700 Subject: [PATCH 19/19] Use different method for refreshing IAM token --- datastore/rds.go | 30 +++++++++++++++--------------- datastore/sql.go | 10 +++++++--- go.mod | 11 ++++++++--- go.sum | 18 ++++++++++++++++++ 4 files changed, 48 insertions(+), 21 deletions(-) diff --git a/datastore/rds.go b/datastore/rds.go index be3c2eda..0642e416 100644 --- a/datastore/rds.go +++ b/datastore/rds.go @@ -2,7 +2,6 @@ package datastore import ( "context" - "database/sql/driver" "fmt" "net/url" "os" @@ -11,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/feature/rds/auth" - "github.com/jackc/pgx/stdlib" + "github.com/jackc/pgx/v5" ) const defaultRegion = "us-west-2" @@ -53,7 +52,7 @@ func newRDSConnector() *rdsConnector { } } -func (c *rdsConnector) getConnectionString(ctx context.Context) (string, error) { +func (c *rdsConnector) getAuthToken(ctx context.Context) (string, error) { c.mu.Lock() defer c.mu.Unlock() @@ -68,26 +67,27 @@ func (c *rdsConnector) getConnectionString(ctx context.Context) (string, error) if err != nil { return "", fmt.Errorf("failed to create authentication token: %w", err) } - c.token = url.QueryEscape(token) + c.token = token c.tokenCacheTime = time.Now() } - - return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", c.user, c.token, c.hostAndPort, c.dbName), nil + return c.token, nil } -func (c *rdsConnector) Connect(ctx context.Context) (driver.Conn, error) { - connStr, err := c.getConnectionString(ctx) +func (c *rdsConnector) getConnectionString(ctx context.Context) (string, error) { + token, err := c.getAuthToken(ctx) if err != nil { - return nil, err + return "", err } - return stdlib.GetDefaultDriver().Open(connStr) + return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", c.user, url.QueryEscape(token), c.hostAndPort, c.dbName), nil } -func (c *rdsConnector) Driver() driver.Driver { - return c -} +func (c *rdsConnector) updateConnConfig(ctx context.Context, config *pgx.ConnConfig) error { + token, err := c.getAuthToken(ctx) + if err != nil { + return err + } + config.Password = token -func (c *rdsConnector) Open(_ string) (driver.Conn, error) { - return nil, fmt.Errorf("open method unsupported") + return nil } diff --git a/datastore/sql.go b/datastore/sql.go index 7899921d..b127b1a2 100644 --- a/datastore/sql.go +++ b/datastore/sql.go @@ -2,7 +2,6 @@ package datastore import ( "context" - "database/sql" "embed" "errors" "fmt" @@ -15,7 +14,8 @@ import ( "github.com/golang-migrate/migrate/v4/source/iofs" // import pgx so it can be used with sqlx - _ "github.com/jackc/pgx/stdlib" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/stdlib" "github.com/jmoiron/sqlx" ) @@ -100,7 +100,11 @@ func NewSQLDB(isTesting bool) (*SQLDB, error) { var db *sqlx.DB if rdsConnector != nil { - baseDB := sql.OpenDB(rdsConnector) + config, err := pgx.ParseConfig(sqlURL) + if err != nil { + return nil, err + } + baseDB := stdlib.OpenDB(*config, stdlib.OptionBeforeConnect(rdsConnector.updateConnConfig)) db = sqlx.NewDb(baseDB, "pgx") } else { db, err = sqlx.Connect("pgx", sqlURL) diff --git a/go.mod b/go.mod index 4921e38a..f830bc6c 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,11 @@ require ( github.com/golang-migrate/migrate/v4 v4.18.1 // indirect github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/pgx v3.6.2+incompatible // indirect + github.com/jackc/pgx/v5 v5.7.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect github.com/joho/godotenv v1.5.1 // indirect @@ -67,8 +71,9 @@ require ( github.com/shengdoushi/base58 v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/throttled/throttled v2.2.5+incompatible // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 776fc785..f5961c49 100644 --- a/go.sum +++ b/go.sum @@ -136,9 +136,16 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -223,6 +230,8 @@ github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/throttled/throttled v2.2.5+incompatible h1:65UB52X0qNTYiT0Sohp8qLYVFwZQPDw85uSa65OljjQ= @@ -245,8 +254,12 @@ golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -258,10 +271,14 @@ golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -279,6 +296,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=