diff --git a/.gitignore b/.gitignore index 4304ae0a..5ce67900 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ # Temp files /tmp/ + +.env diff --git a/Makefile b/Makefile index 3f40a34c..dbf56aa8 100644 --- a/Makefile +++ b/Makefile @@ -38,5 +38,6 @@ docker-test: COMMIT=$(GIT_COMMIT) VERSION=$(GIT_VERSION) BUILD_TIME=$(BUILD_TIME) docker compose -f docker-compose.yml run --rm dev make test instrumented: - gowrap gen -p github.com/brave/go-sync/datastore -i Datastore -t ./.prom-gowrap.tmpl -o ./datastore/instrumented_datastore.go + gowrap gen -p github.com/brave/go-sync/datastore -i DynamoDatastore -t ./.prom-gowrap.tmpl -o ./datastore/instrumented_dynamo_datastore.go + gowrap gen -p github.com/brave/go-sync/datastore -i SQLDatastore -t ./.prom-gowrap.tmpl -o ./datastore/instrumented_sql_datastore.go gowrap gen -p github.com/brave/go-sync/cache -i RedisClient -t ./.prom-gowrap.tmpl -o ./cache/instrumented_redis.go diff --git a/cache/instrumented_redis.go b/cache/instrumented_redis.go index 9c7b83e9..37fd9338 100755 --- a/cache/instrumented_redis.go +++ b/cache/instrumented_redis.go @@ -1,10 +1,10 @@ -package cache +// Code generated by gowrap. DO NOT EDIT. +// template: ../.prom-gowrap.tmpl +// gowrap: http://github.com/hexdigest/gowrap -// DO NOT EDIT! -// This code is generated with http://github.com/hexdigest/gowrap tool -// using ../.prom-gowrap.tmpl template +package cache -//go:generate gowrap gen -p github.com/brave/go-sync/cache -i RedisClient -t ../.prom-gowrap.tmpl -o instrumented_redis.go +//go:generate gowrap gen -p github.com/brave/go-sync/cache -i RedisClient -t ../.prom-gowrap.tmpl -o instrumented_redis.go -l "" import ( "context" @@ -80,6 +80,20 @@ func (_d RedisClientWithPrometheus) Get(ctx context.Context, key string, delete return _d.base.Get(ctx, key, delete) } +// Incr implements RedisClient +func (_d RedisClientWithPrometheus) Incr(ctx context.Context, key string, subtract bool) (i1 int, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "Incr", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.Incr(ctx, key, subtract) +} + // Set implements RedisClient func (_d RedisClientWithPrometheus) Set(ctx context.Context, key string, val string, ttl time.Duration) (err error) { _since := time.Now() @@ -94,8 +108,8 @@ func (_d RedisClientWithPrometheus) Set(ctx context.Context, key string, val str return _d.base.Set(ctx, key, val, ttl) } -// Incr implements RedisClient -func (_d RedisClientWithPrometheus) Incr(ctx context.Context, key string, subtract bool) (val int, err error) { +// SubscribeAndWait implements RedisClient +func (_d RedisClientWithPrometheus) SubscribeAndWait(ctx context.Context, channel string) (err error) { _since := time.Now() defer func() { result := "ok" @@ -103,7 +117,7 @@ func (_d RedisClientWithPrometheus) Incr(ctx context.Context, key string, subtra result = "error" } - redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "Incr", result).Observe(time.Since(_since).Seconds()) + redisclientDurationSummaryVec.WithLabelValues(_d.instanceName, "SubscribeAndWait", result).Observe(time.Since(_since).Seconds()) }() - return _d.base.Incr(ctx, key, subtract) + return _d.base.SubscribeAndWait(ctx, channel) } diff --git a/cache/redis.go b/cache/redis.go index 5aa9fbf2..a41f4896 100644 --- a/cache/redis.go +++ b/cache/redis.go @@ -2,6 +2,7 @@ package cache import ( "context" + "fmt" "os" "strconv" "strings" @@ -18,14 +19,11 @@ type RedisClient interface { Get(ctx context.Context, key string, delete bool) (string, error) Del(ctx context.Context, keys ...string) error FlushAll(ctx context.Context) error + SubscribeAndWait(ctx context.Context, channel string) error } -type redisSimpleClient struct { - client *redis.Client -} - -type redisClusterClient struct { - client *redis.ClusterClient +type redisClientImpl struct { + client redis.UniversalClient } // NewRedisClient create a client for standalone redis or redis cluster. @@ -50,24 +48,24 @@ func NewRedisClient() RedisClient { client := redis.NewClient(&redis.Options{ Addr: addrs[0], }) - r = &redisSimpleClient{client} + r = &redisClientImpl{client} } else { client := redis.NewClusterClient(&redis.ClusterOptions{ Addrs: addrs, PoolSize: poolSize, ReadOnly: true, }) - r = &redisClusterClient{client} + r = &redisClientImpl{client} } return r } -func (r *redisSimpleClient) Set(ctx context.Context, key string, val string, ttl time.Duration) error { +func (r *redisClientImpl) Set(ctx context.Context, key string, val string, ttl time.Duration) error { return r.client.Set(ctx, key, val, ttl).Err() } -func (r *redisSimpleClient) Incr(ctx context.Context, key string, subtract bool) (int, error) { +func (r *redisClientImpl) Incr(ctx context.Context, key string, subtract bool) (int, error) { var res *redis.IntCmd if subtract { res = r.client.Decr(ctx, key) @@ -78,7 +76,7 @@ func (r *redisSimpleClient) Incr(ctx context.Context, key string, subtract bool) return int(val), err } -func (r *redisSimpleClient) Get(ctx context.Context, key string, delete bool) (string, error) { +func (r *redisClientImpl) Get(ctx context.Context, key string, delete bool) (string, error) { var res *redis.StringCmd if delete { res = r.client.GetDel(ctx, key) @@ -92,47 +90,31 @@ func (r *redisSimpleClient) Get(ctx context.Context, key string, delete bool) (s return val, err } -func (r *redisSimpleClient) Del(ctx context.Context, keys ...string) error { +func (r *redisClientImpl) Del(ctx context.Context, keys ...string) error { return r.client.Del(ctx, keys...).Err() } -func (r *redisSimpleClient) FlushAll(ctx context.Context) error { +func (r *redisClientImpl) FlushAll(ctx context.Context) error { return r.client.FlushAll(ctx).Err() } -func (r *redisClusterClient) Set(ctx context.Context, key string, val string, ttl time.Duration) error { - return r.client.Set(ctx, key, val, ttl).Err() -} - -func (r *redisClusterClient) Incr(ctx context.Context, key string, subtract bool) (int, error) { - var res *redis.IntCmd - if subtract { - res = r.client.Decr(ctx, key) - } else { - res = r.client.Incr(ctx, key) +func (r *redisClientImpl) SubscribeAndWait(ctx context.Context, channel string) error { + pubsub := r.client.Subscribe(ctx, channel) + defer pubsub.Close() + + ch := pubsub.Channel() + + for { + select { + case msg, ok := <-ch: + if !ok { + return fmt.Errorf("redis channel unexpectedly closed") + } + if msg != nil { + return nil + } + case <-ctx.Done(): + return ctx.Err() + } } - val, err := res.Result() - return int(val), err -} - -func (r *redisClusterClient) Get(ctx context.Context, key string, delete bool) (string, error) { - var res *redis.StringCmd - if delete { - res = r.client.GetDel(ctx, key) - } else { - res = r.client.Get(ctx, key) - } - val, err := res.Result() - if err == redis.Nil { - return "", nil - } - return val, err -} - -func (r *redisClusterClient) Del(ctx context.Context, keys ...string) error { - return r.client.Del(ctx, keys...).Err() -} - -func (r *redisClusterClient) FlushAll(ctx context.Context) error { - return r.client.FlushAll(ctx).Err() } diff --git a/command/command.go b/command/command.go index 4faa4d20..bdbc6ab8 100644 --- a/command/command.go +++ b/command/command.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "fmt" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/brave/go-sync/cache" @@ -33,15 +34,22 @@ const ( // handleGetUpdatesRequest handles GetUpdatesMessage and fills // GetUpdatesResponse. Target sync entities in the database will be updated or // deleted based on the client's requests. -func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, db datastore.Datastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessage, guRsp *sync_pb.GetUpdatesResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS // default value, might be changed later isNewClient := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_NEW_CLIENT isPoll := guMsg.GetUpdatesOrigin != nil && *guMsg.GetUpdatesOrigin == sync_pb.SyncEnums_PERIODIC + + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, nil, false) + if err != nil { + return nil, err + } + defer dbHelpers.Trx.Rollback() + if isNewClient { // Reject the request if client has >= 50 devices in the chain. activeDevices := 0 for { - hasChangesRemaining, syncEntities, err := db.GetUpdatesForType(deviceInfoTypeID, 0, false, clientID, int64(maxGUBatchSize)) + hasChangesRemaining, syncEntities, err := dbHelpers.getUpdatesFromDBs(deviceInfoTypeID, 0, false, maxGUBatchSize) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", deviceInfoTypeID) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -68,7 +76,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag } // Insert initial records if needed. - err := InsertServerDefinedUniqueEntities(db, clientID) + err := dbHelpers.InsertServerDefinedUniqueEntities() if err != nil { log.Error().Err(err).Msg("Create server defined unique entities failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -93,10 +101,15 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // Process from_progress_marker guRsp.NewProgressMarker = make([]*sync_pb.DataTypeProgressMarker, len(guMsg.FromProgressMarker)) guRsp.Entries = make([]*sync_pb.SyncEntity, 0, maxSize) + + var dataTypes []int + for i, fromProgressMarker := range guMsg.FromProgressMarker { guRsp.NewProgressMarker[i] = &sync_pb.DataTypeProgressMarker{} guRsp.NewProgressMarker[i].DataTypeId = fromProgressMarker.DataTypeId + dataTypes = append(dataTypes, int(*fromProgressMarker.DataTypeId)) + // Default token value is client's token, otherwise 0. // This token will be updated when we return the updated entities. if len(fromProgressMarker.Token) > 0 { @@ -134,8 +147,8 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag continue } - curMaxSize := int64(maxSize) - int64(len(guRsp.Entries)) - hasChangesRemaining, entities, err := db.GetUpdatesForType(int(*fromProgressMarker.DataTypeId), token, fetchFolders, clientID, curMaxSize) + curMaxSize := maxSize - len(guRsp.Entries) + hasChangesRemaining, syncEntities, err := dbHelpers.getUpdatesFromDBs(int(*fromProgressMarker.DataTypeId), token, fetchFolders, curMaxSize) if err != nil { log.Error().Err(err).Msgf("db.GetUpdatesForType failed for type %v", *fromProgressMarker.DataTypeId) errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -148,7 +161,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // which is essential for clients when initializing sync engine with nigori // type. Return a transient error for clients to re-request in this case. if isNewClient && *fromProgressMarker.DataTypeId == nigoriTypeID && - token == 0 && len(entities) == 0 { + token == 0 && len(syncEntities) == 0 { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, fmt.Errorf("nigori root folder entity is not ready yet") } @@ -159,8 +172,8 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // Fill the PB entry from above DB entries until maxSize is reached. j := 0 - for ; j < len(entities) && len(guRsp.Entries) < cap(guRsp.Entries); j++ { - entity, err := datastore.CreatePBSyncEntity(&entities[j]) + for ; j < len(syncEntities) && len(guRsp.Entries) < cap(guRsp.Entries); j++ { + entity, err := datastore.CreatePBSyncEntity(&syncEntities[j]) if err != nil { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, fmt.Errorf("error creating protobuf sync entity from DB entity: %w", err) @@ -170,7 +183,7 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag // If entities are appended, use the lastest mtime as returned token. if j != 0 { guRsp.NewProgressMarker[i].Token = make([]byte, binary.MaxVarintLen64) - binary.PutVarint(guRsp.NewProgressMarker[i].Token, *entities[j-1].Mtime) + binary.PutVarint(guRsp.NewProgressMarker[i].Token, *syncEntities[j-1].Mtime) } // Save (clientID#dataType, mtime) into cache after querying from DB. @@ -185,44 +198,35 @@ func handleGetUpdatesRequest(cache *cache.Cache, guMsg *sync_pb.GetUpdatesMessag if j == 0 { mtime = token } else { - mtime = *entities[j-1].Mtime + mtime = *syncEntities[j-1].Mtime } cache.SetTypeMtime(context.Background(), clientID, int(*fromProgressMarker.DataTypeId), mtime) } } - return &errCode, nil -} - -func getItemCounts(cache *cache.Cache, db datastore.Datastore, clientID string) (*datastore.ClientItemCounts, int, int, error) { - itemCounts, err := db.GetClientItemCount(clientID) - if err != nil { - return nil, 0, 0, err - } - newNormalCount, newHistoryCount, err := getInterimItemCounts(cache, clientID, false) + migratedEntities, err := dbHelpers.maybeMigrateToSQL(dataTypes) if err != nil { - return nil, 0, 0, err + return nil, fmt.Errorf("failed to perform migration: %w", err) } - return itemCounts, newNormalCount, newHistoryCount, nil -} -func getInterimItemCounts(cache *cache.Cache, clientID string, clear bool) (int, int, error) { - newNormalCount, err := cache.GetInterimCount(context.Background(), clientID, normalCountTypeStr, clear) - if err != nil { - return 0, 0, err + if len(migratedEntities) > 0 { + if err = dynamoDB.DeleteEntities(migratedEntities); err != nil { + log.Error().Err(err).Msgf("Failed to delete migrated items") + } } - newHistoryCount, err := cache.GetInterimCount(context.Background(), clientID, historyCountTypeStr, clear) - if err != nil { - return 0, 0, err + + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err } - return newNormalCount, newHistoryCount, nil + + return &errCode, nil } // handleCommitRequest handles the commit message and fills the commit response. // For each commit entry: // - new sync entity is created and inserted into the database if version is 0. // - existed sync entity will be updated if version is greater than 0. -func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, commitRsp *sync_pb.CommitResponse, db datastore.Datastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, commitRsp *sync_pb.CommitResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { if commitMsg == nil { return nil, fmt.Errorf("nil commitMsg is received") } @@ -232,24 +236,16 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c return &errCode, nil } - itemCounts, newNormalCount, newHistoryCount, err := getItemCounts(cache, db, clientID) - if err != nil { - log.Error().Err(err).Msg("Get client's item count failed") + if !sqlDB.Variations().Ready { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR - return &errCode, fmt.Errorf("error getting client's item count: %w", err) + return &errCode, fmt.Errorf("SQL rollout not ready") } - currentNormalItemCount := itemCounts.ItemCount - currentHistoryItemCount := itemCounts.SumHistoryCounts() - - boostedQuotaAddition := 0 - if currentHistoryItemCount > maxClientHistoryObjectQuota { - // Sync chains with history entities stored before the history count fix - // may have history counts greater than the new history item quota. - // "Boost" the quota with the difference between the history quota and count, - // so users can start syncing other entities immediately, instead of waiting for the - // history TTL to get rid of the excess items. - boostedQuotaAddition = min(maxClientObjectQuota-maxClientHistoryObjectQuota, currentHistoryItemCount-maxClientHistoryObjectQuota) + + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, cache, true) + if err != nil { + return nil, err } + defer dbHelpers.Trx.Rollback() commitRsp.Entryresponse = make([]*sync_pb.CommitResponse_EntryResponse, len(commitMsg.Entries)) @@ -257,11 +253,13 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c idMap := make(map[string]string) // Map to save commit data type ID & mtime typeMtimeMap := make(map[int]int64) + + var migratedEntities []*datastore.SyncEntity for i, v := range commitMsg.Entries { entryRsp := &sync_pb.CommitResponse_EntryResponse{} commitRsp.Entryresponse[i] = entryRsp - entityToCommit, err := datastore.CreateDBSyncEntity(v, commitMsg.CacheGuid, clientID) + entityToCommit, err := datastore.CreateDBSyncEntity(v, commitMsg.CacheGuid, clientID, dbHelpers.ChainID) if err != nil { // Can't unmarshal & marshal the message from PB into DB format rspType := sync_pb.CommitResponse_INVALID_MESSAGE entryRsp.ResponseType = &rspType @@ -269,6 +267,8 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c continue } + createTime := time.Now() + // Check if ParentID is a client-generated ID which appears in previous // commit entries, if so, replace with corresponding server-generated ID. if entityToCommit.ParentID != nil { @@ -279,11 +279,12 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c oldVersion := *entityToCommit.Version isUpdateOp := oldVersion != 0 - isHistoryRelatedItem := *entityToCommit.DataType == datastore.HistoryTypeID || *entityToCommit.DataType == datastore.HistoryDeleteDirectiveTypeID + isHistoryItem := *entityToCommit.DataType == datastore.HistoryTypeID + isHistoryRelatedItem := isHistoryItem || *entityToCommit.DataType == datastore.HistoryDeleteDirectiveTypeID *entityToCommit.Version = *entityToCommit.Mtime - if *entityToCommit.DataType == datastore.HistoryTypeID { - // Check if item exists using client_unique_tag - isUpdateOp, err = db.HasItem(clientID, *entityToCommit.ClientDefinedUniqueTag) + + if isHistoryItem { + isUpdateOp, err = dbHelpers.hasItemInEitherDB(entityToCommit) if err != nil { log.Error().Err(err).Msg("Insert history sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -294,26 +295,30 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c } if !isUpdateOp { // Create - if currentNormalItemCount+currentHistoryItemCount+newNormalCount+newHistoryCount >= maxClientObjectQuota+boostedQuotaAddition { + totalItemCount := dbHelpers.ItemCounts.SumCounts(false) + if totalItemCount >= maxClientObjectQuota { rspType := sync_pb.CommitResponse_OVER_QUOTA entryRsp.ResponseType = &rspType - entryRsp.ErrorMessage = aws.String(fmt.Sprintf("There are already %v non-deleted objects in store", currentNormalItemCount+currentHistoryItemCount)) + entryRsp.ErrorMessage = aws.String(fmt.Sprintf("There are already %v non-deleted objects in store", totalItemCount)) continue } - if !isHistoryRelatedItem || currentHistoryItemCount+newHistoryCount < maxClientHistoryObjectQuota { + if !isHistoryRelatedItem || dbHelpers.ItemCounts.SumCounts(true) < maxClientHistoryObjectQuota { // Insert all non-history items. For history items, ignore any items above history quoto // and lie to the client about the objects being synced instead of returning OVER_QUOTA // so the client can continue to sync other entities. - conflict, err := db.InsertSyncEntity(entityToCommit) - if err != nil { - log.Error().Err(err).Msg("Insert sync entity failed") + var conflict bool + conflict, err = dbHelpers.insertSyncEntity(entityToCommit) + if err != nil || conflict { + if err != nil { + log.Error().Err(err).Msg("Insert sync entity failed") + } rspType := sync_pb.CommitResponse_TRANSIENT_ERROR if conflict { rspType = sync_pb.CommitResponse_CONFLICT } entryRsp.ResponseType = &rspType - entryRsp.ErrorMessage = aws.String(fmt.Sprintf("Insert sync entity failed: %v", err.Error())) + entryRsp.ErrorMessage = aws.String("Insert sync entity failed") continue } @@ -322,15 +327,9 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c if entityToCommit.OriginatorClientItemID != nil { idMap[*entityToCommit.OriginatorClientItemID] = entityToCommit.ID } - - if isHistoryRelatedItem { - newHistoryCount, err = cache.IncrementInterimCount(context.Background(), clientID, historyCountTypeStr, false) - } else { - newNormalCount, err = cache.IncrementInterimCount(context.Background(), clientID, normalCountTypeStr, false) - } } } else { // Update - conflict, deleted, err := db.UpdateSyncEntity(entityToCommit, oldVersion) + conflict, migratedEntity, err := dbHelpers.updateSyncEntity(entityToCommit, oldVersion) if err != nil { log.Error().Err(err).Msg("Update sync entity failed") rspType := sync_pb.CommitResponse_TRANSIENT_ERROR @@ -343,12 +342,8 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.ResponseType = &rspType continue } - if deleted { - if isHistoryRelatedItem { - newHistoryCount, err = cache.IncrementInterimCount(context.Background(), clientID, historyCountTypeStr, true) - } else { - newNormalCount, err = cache.IncrementInterimCount(context.Background(), clientID, normalCountTypeStr, true) - } + if migratedEntity != nil { + migratedEntities = append(migratedEntities, migratedEntity) } } if err != nil { @@ -364,9 +359,18 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c entryRsp.IdString = aws.String(entityToCommit.ID) entryRsp.Version = entityToCommit.Version entryRsp.Mtime = entityToCommit.Mtime + + if time.Since(createTime) < time.Millisecond { + // To ensure that all entities are in perfect order (sorted by mtime), + // we should ensure that the mtime for each entity is unique. + // CreateDBSyncEntity sets the mtime to the current time. + // If processing the entity took less than a millisecond, + // wait a little longer. + time.Sleep(time.Millisecond - time.Since(createTime)) + } } - newNormalCount, newHistoryCount, err = getInterimItemCounts(cache, clientID, true) + err = dbHelpers.ItemCounts.Save() if err != nil { log.Error().Err(err).Msg("Get interim item counts failed") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR @@ -378,35 +382,39 @@ func handleCommitRequest(cache *cache.Cache, commitMsg *sync_pb.CommitMessage, c cache.SetTypeMtime(context.Background(), clientID, dataType, mtime) } - err = db.UpdateClientItemCount(itemCounts, newNormalCount, newHistoryCount) - if err != nil { - // We only impose a soft quota limit on the item count for each client, so - // we only log the error without further actions here. The reason of this - // is we do not want to pay the cost to ensure strong consistency on this - // value and we do not want to give up previous DB operations if we cannot - // update the count this time. In addition, we do not retry this operation - // either because it is acceptable to miss one time of this update and - // chances of failing to update the item count multiple times in a row for - // a single client is quite low. - log.Error().Err(err).Msg("Update client item count failed") + if len(migratedEntities) > 0 { + if err = dynamoDB.DeleteEntities(migratedEntities); err != nil { + log.Error().Err(err).Msgf("Failed to delete migrated items") + } + } + + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err } + return &errCode, nil } // handleClearServerDataRequest handles clearing user data from the datastore and cache // and fills the response -func handleClearServerDataRequest(cache *cache.Cache, db datastore.Datastore, _ *sync_pb.ClearServerDataMessage, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { +func handleClearServerDataRequest(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, _ *sync_pb.ClearServerDataMessage, clientID string) (*sync_pb.SyncEnums_ErrorType, error) { errCode := sync_pb.SyncEnums_SUCCESS var err error - err = db.DisableSyncChain(clientID) + dbHelpers, err := NewDBHelpers(dynamoDB, sqlDB, clientID, nil, false) + if err != nil { + return nil, err + } + defer dbHelpers.Trx.Rollback() + + err = dynamoDB.DisableSyncChain(clientID) if err != nil { log.Error().Err(err).Msg("Failed to disable sync chain") errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, err } - syncEntities, err := db.ClearServerData(clientID) + syncEntities, err := dynamoDB.ClearServerData(clientID) if err != nil { errCode = sync_pb.SyncEnums_TRANSIENT_ERROR return &errCode, err @@ -428,12 +436,22 @@ func handleClearServerDataRequest(cache *cache.Cache, db datastore.Datastore, _ } } + if err = dbHelpers.SQLDB.DeleteChain(dbHelpers.Trx, dbHelpers.ChainID); err != nil { + log.Error().Err(err).Msg("Failed to disable sync chain") + errCode = sync_pb.SyncEnums_TRANSIENT_ERROR + return &errCode, err + } + + if err = dbHelpers.Trx.Commit(); err != nil { + return nil, err + } + return &errCode, nil } // HandleClientToServerMessage handles the protobuf ClientToServerMessage and // fills the protobuf ClientToServerResponse. -func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerMessage, pbRsp *sync_pb.ClientToServerResponse, db datastore.Datastore, clientID string) error { +func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerMessage, pbRsp *sync_pb.ClientToServerResponse, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string) error { // Create ClientToServerResponse and fill general fields for both GU and // Commit. pbRsp.StoreBirthday = aws.String(storeBirthday) @@ -447,7 +465,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_GET_UPDATES { guRsp := &sync_pb.GetUpdatesResponse{} pbRsp.GetUpdates = guRsp - pbRsp.ErrorCode, err = handleGetUpdatesRequest(cache, pb.GetUpdates, guRsp, db, clientID) + pbRsp.ErrorCode, err = handleGetUpdatesRequest(cache, pb.GetUpdates, guRsp, dynamoDB, sqlDB, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) @@ -461,7 +479,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_COMMIT { commitRsp := &sync_pb.CommitResponse{} pbRsp.Commit = commitRsp - pbRsp.ErrorCode, err = handleCommitRequest(cache, pb.Commit, commitRsp, db, clientID) + pbRsp.ErrorCode, err = handleCommitRequest(cache, pb.Commit, commitRsp, dynamoDB, sqlDB, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) @@ -475,7 +493,7 @@ func HandleClientToServerMessage(cache *cache.Cache, pb *sync_pb.ClientToServerM } else if *pb.MessageContents == sync_pb.ClientToServerMessage_CLEAR_SERVER_DATA { csdRsp := &sync_pb.ClearServerDataResponse{} pbRsp.ClearServerData = csdRsp - pbRsp.ErrorCode, err = handleClearServerDataRequest(cache, db, pb.ClearServerData, clientID) + pbRsp.ErrorCode, err = handleClearServerDataRequest(cache, dynamoDB, sqlDB, pb.ClearServerData, clientID) if err != nil { if pbRsp.ErrorCode != nil { pbRsp.ErrorMessage = aws.String(err.Error()) diff --git a/command/command_test.go b/command/command_test.go index 4c9066e4..5b54db9c 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -4,31 +4,54 @@ import ( "context" "encoding/binary" "encoding/json" + "fmt" "sort" "strconv" "strings" "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/expression" "github.com/brave/go-sync/cache" "github.com/brave/go-sync/command" "github.com/brave/go-sync/datastore" "github.com/brave/go-sync/datastore/datastoretest" "github.com/brave/go-sync/schema/protobuf/sync_pb" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" "github.com/stretchr/testify/suite" ) const ( - clientID string = "client" - bookmarkType int32 = 32904 - nigoriType int32 = 47745 - cacheGUID string = "cache_guid" + testClientID string = "client" + bookmarkType int32 = 32904 + nigoriType int32 = 47745 + cacheGUID string = "cache_guid" + testDynamoTable = "client-entity-test-command" ) +func buildRolloutConfigString(dataTypes []int32) string { + var configParts []string + for _, dataType := range dataTypes { + configParts = append(configParts, fmt.Sprintf("%d=1.0", dataType)) + } + return strings.Join(configParts, ",") +} + type CommandTestSuite struct { suite.Suite - dynamo *datastore.Dynamo - cache *cache.Cache + storeInSQL bool + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB +} + +func NewCommandTestSuite(storeInSQL bool) *CommandTestSuite { + return &CommandTestSuite{ + storeInSQL: storeInSQL, + } } type PBSyncAttrs struct { @@ -58,22 +81,35 @@ func NewPBSyncAttrs(name *string, version *int64, deleted *bool, folder *bool, s } func (suite *CommandTestSuite) SetupSuite() { - datastore.Table = "client-entity-test-command" + var rollouts string + if suite.storeInSQL { + rollouts = buildRolloutConfigString([]int32{bookmarkType, nigoriType}) + } + suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) + + datastore.Table = testDynamoTable var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") suite.cache = cache.NewCache(cache.NewRedisClient()) } func (suite *CommandTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") } func (suite *CommandTestSuite) TearDownTest() { + isEmpty, err := verifyNoDataInOtherDB(suite.storeInSQL, suite.dynamoDB, suite.sqlDB) + suite.Require().NoError(err, "Empty table verification should succeed") + suite.Require().True(isEmpty, "Other datastore should be empty") suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") suite.Require().NoError( suite.cache.FlushAll(context.Background()), "Failed to clear cache") } @@ -126,16 +162,28 @@ func getClientToServerCommitMsg(entries []*sync_pb.SyncEntity) *sync_pb.ClientTo } } -func getMarker(suite *CommandTestSuite, tokens []int64) []*sync_pb.DataTypeProgressMarker { - types := []int32{nigoriType, bookmarkType} // hard-coded types used in tests. - suite.Assert().Equal(len(types), len(tokens)) +type MarkerTokens struct { + Nigori *int64 + Bookmark *int64 +} + +func getMarker(tokens MarkerTokens) []*sync_pb.DataTypeProgressMarker { marker := []*sync_pb.DataTypeProgressMarker{} - for i, token := range tokens { - tokenBytes := make([]byte, binary.MaxVarintLen64) - binary.PutVarint(tokenBytes, token) - marker = append(marker, &sync_pb.DataTypeProgressMarker{ - DataTypeId: aws.Int32(types[i]), Token: tokenBytes}) + + createMarker := func(tokenPtr *int64, dataTypeID int32) { + if tokenPtr != nil { + tokenBytes := make([]byte, binary.MaxVarintLen64) + binary.PutVarint(tokenBytes, *tokenPtr) + marker = append(marker, &sync_pb.DataTypeProgressMarker{ + DataTypeId: aws.Int32(dataTypeID), + Token: tokenBytes, + }) + } } + + createMarker(tokens.Nigori, nigoriType) + createMarker(tokens.Bookmark, bookmarkType) + return marker } @@ -154,12 +202,15 @@ func getClientToServerGUMsg(marker []*sync_pb.DataTypeProgressMarker, } } -func getTokensFromNewMarker(suite *CommandTestSuite, newMarker []*sync_pb.DataTypeProgressMarker) (int64, int64) { +func getTokensFromNewMarker(suite *CommandTestSuite, newMarker []*sync_pb.DataTypeProgressMarker) MarkerTokens { nigoriToken, n := binary.Varint(newMarker[0].Token) suite.Assert().Greater(n, 0) bookmarkToken, n := binary.Varint(newMarker[1].Token) suite.Assert().Greater(n, 0) - return nigoriToken, bookmarkToken + return MarkerTokens{ + Nigori: &nigoriToken, + Bookmark: &bookmarkToken, + } } func assertCommonResponse(suite *CommandTestSuite, rsp *sync_pb.ClientToServerResponse, isCommit bool) { @@ -221,7 +272,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { // Commit and check response. suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -236,12 +287,16 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { } // GetUpdates with token 0 should get all of them. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) + msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -265,7 +320,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) @@ -281,13 +336,12 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { // GetUpdates again with previous returned mtimes and check the result, it // should include update items and newly commit items. - nigoriToken, bookmarkToken := getTokensFromNewMarker(suite, newMarker) - marker = getMarker(suite, []int64{nigoriToken, bookmarkToken}) + marker = getMarker(getTokensFromNewMarker(suite, newMarker)) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -312,7 +366,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { msg = getClientToServerCommitMsg(entries) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -322,13 +376,12 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { } // GetUpdates again with previous returned tokens should return 0 updates. - nigoriToken, bookmarkToken = getTokensFromNewMarker(suite, newMarker) - marker = getMarker(suite, []int64{nigoriToken, bookmarkToken}) + marker = getMarker(getTokensFromNewMarker(suite, newMarker)) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -339,13 +392,17 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_Basic() { func (suite *CommandTestSuite) TestHandleClientToServerMessage_NewClient() { // Prepare input message for NEW_CLIENT get updates request. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) + msg := getClientToServerGUMsg( marker, sync_pb.SyncEnums_NEW_CLIENT, true, nil) rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) @@ -389,7 +446,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_GUBatchSize() { // Commit and check response. suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -413,7 +470,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -438,7 +495,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -459,7 +516,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -476,7 +533,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -496,7 +553,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_QuotaLimit() { rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(4, len(rsp.Commit.Entryresponse)) @@ -518,7 +575,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo msg := getClientToServerCommitMsg([]*sync_pb.SyncEntity{child0}) rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -547,7 +604,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(6, len(rsp.Commit.Entryresponse)) @@ -557,12 +614,15 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_ReplaceParentIDTo // Get updates to check if child's parent ID is replaced with the server // generated ID of its parent. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(6, len(rsp.GetUpdates.Entries)) @@ -589,11 +649,35 @@ func assertTypeMtimeCacheValue(suite *CommandTestSuite, key string, mtime int64, func insertSyncEntitiesWithoutUpdateCache( suite *CommandTestSuite, entries []*sync_pb.SyncEntity, clientID string) (ret []*datastore.SyncEntity) { + var chainID *int64 + var tx *sqlx.Tx + if suite.storeInSQL { + var err error + tx, err = suite.sqlDB.DB.Beginx() + suite.Require().NoError(err, "should be able to begin transaction") + defer tx.Rollback() + + chainID, err = suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "should be able to get chain ID") + } for _, entry := range entries { - dbEntry, err := datastore.CreateDBSyncEntity(entry, nil, clientID) + dbEntry, err := datastore.CreateDBSyncEntity(entry, nil, clientID, 1) suite.Require().NoError(err, "Create db entity from pb entity should succeed") - _, err = suite.dynamo.InsertSyncEntity(dbEntry) - suite.Require().NoError(err, "Insert sync entity should succeed") + + if suite.storeInSQL { + id, _ := uuid.NewV7() + dbEntry.ID = id.String() + dbEntry.ChainID = chainID + + conflict, err := suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{dbEntry}) + suite.Require().NoError(err, "Insert sync entity should succeed") + suite.Require().False(conflict, "Insert should not conflict") + + } else { + _, err = suite.dynamoDB.InsertSyncEntity(dbEntry) + suite.Require().NoError(err, "Insert sync entity should succeed") + } + val, err := suite.cache.Get(context.Background(), clientID+"#"+strconv.Itoa(*dbEntry.DataType), false) suite.Require().NoError(err, "Get from cache should succeed") @@ -601,6 +685,10 @@ func insertSyncEntitiesWithoutUpdateCache( "Cache should not be updated") ret = append(ret, dbEntry) } + if tx != nil { + err := tx.Commit() + suite.Require().NoError(err, "Commit transaction should succeed") + } return } @@ -616,7 +704,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(3, len(rsp.Commit.Entryresponse)) @@ -634,10 +722,10 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba } // Latest mtime of each type in the commit should be stored in the cache. - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Successful commit should write the latest mtime into cache") - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(nigoriType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(nigoriType)), latestNigoriMtime, "Successful commit should write the latest mtime into cache") @@ -648,29 +736,32 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba getCommitEntity("id4_bookmark", 0, false, getBookmarkSpecifics()), getCommitEntity("id5_nigori", 0, false, getNigoriSpecifics()), }, - clientID) + testClientID) // GU request with the same or newer token should be short circuited, so // should return no updates. - marker := getMarker(suite, []int64{latestNigoriMtime, latestBookmarkMtime + 1}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(latestNigoriMtime), + Bookmark: aws.Int64(latestBookmarkMtime + 1), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Assert().Equal(0, len(rsp.GetUpdates.Entries)) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "cache is not updated when short circuited") - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(nigoriType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(nigoriType)), latestNigoriMtime, "cache is not updated when short circuited") // Manually update cache for our DB insert. latestBookmarkMtime = *dbEntries[0].Mtime latestNigoriMtime = *dbEntries[1].Mtime - suite.cache.SetTypeMtime(context.Background(), clientID, int(bookmarkType), latestBookmarkMtime) - suite.cache.SetTypeMtime(context.Background(), clientID, int(nigoriType), latestNigoriMtime) + suite.cache.SetTypeMtime(context.Background(), testClientID, int(bookmarkType), latestBookmarkMtime) + suite.cache.SetTypeMtime(context.Background(), testClientID, int(nigoriType), latestNigoriMtime) // Commit another entry and check if cache is updated. entries = []*sync_pb.SyncEntity{ @@ -680,7 +771,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) @@ -688,25 +779,28 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ba suite.Assert().Equal(commitSuccess, *entryRsp.ResponseType) latestBookmarkMtime = *entryRsp.Mtime - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Successful commit should update the cache") // Send GU with an old token will get updates immediately. // Check the cache value again, should be the same as the latest mtime in rsp. - marker = getMarker(suite, []int64{latestNigoriMtime - 1, latestBookmarkMtime - 1}) + marker = getMarker(MarkerTokens{ + Nigori: aws.Int64(latestNigoriMtime - 1), + Bookmark: aws.Int64(latestBookmarkMtime - 1), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, false, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Assert().Equal(2, len(rsp.GetUpdates.Entries)) suite.Assert().Equal(latestNigoriMtime, *rsp.GetUpdates.Entries[0].Mtime) suite.Assert().Equal(latestBookmarkMtime, *rsp.GetUpdates.Entries[1].Mtime) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Cached token should be equal to latest mtime") - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(nigoriType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(nigoriType)), latestNigoriMtime, "Cached token should be equal to latest mtime") } @@ -719,14 +813,14 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Sk rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(1, len(rsp.Commit.Entryresponse)) commitSuccess := sync_pb.CommitResponse_SUCCESS suite.Assert().Equal(commitSuccess, *rsp.Commit.Entryresponse[0].ResponseType) latestBookmarkMtime := *rsp.Commit.Entryresponse[0].Mtime - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Commit should write the latest mtime into cache") @@ -738,20 +832,23 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Sk []*sync_pb.SyncEntity{ getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), }, - clientID) + testClientID) // Check that we will receive the manually inserted item from DB immediately. - marker := getMarker(suite, []int64{0, latestBookmarkMtime}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(latestBookmarkMtime), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_GU_TRIGGER, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(1, len(rsp.GetUpdates.Entries)) suite.Require().Equal(dbEntries[0].Mtime, rsp.GetUpdates.Entries[0].Mtime) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), *dbEntries[0].Mtime, "Successful commit should update the cache") } @@ -765,7 +862,7 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch rsp := &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, true) suite.Assert().Equal(2, len(rsp.Commit.Entryresponse)) @@ -776,45 +873,98 @@ func (suite *CommandTestSuite) TestHandleClientToServerMessage_TypeMtimeCache_Ch suite.Assert().NotEqual(latestBookmarkMtime, *entryRsp.Mtime) latestBookmarkMtime = *entryRsp.Mtime } - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "Commit should write the latest mtime into cache") // Send a GU with batch size set to 1, changesRemaining in rsp should be 1 // and cache should not be updated. - marker := getMarker(suite, []int64{0, 0}) + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(0), + }) clientBatch := int32(2) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, true, &clientBatch) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(2, len(rsp.GetUpdates.Entries)) suite.Require().Equal(int64(0), *rsp.GetUpdates.ChangesRemaining) mtime := *rsp.GetUpdates.Entries[0].Mtime - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "cache should not be updated when changes remaining = 1") // Send a second GU with changesRemaining in rsp = 0 and check cache is now // updated. - marker = getMarker(suite, []int64{0, mtime}) + marker = getMarker(MarkerTokens{ + Nigori: aws.Int64(0), + Bookmark: aws.Int64(mtime), + }) msg = getClientToServerGUMsg( marker, sync_pb.SyncEnums_PERIODIC, true, nil) rsp = &sync_pb.ClientToServerResponse{} suite.Require().NoError( - command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamo, clientID), + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), "HandleClientToServerMessage should succeed") assertCommonResponse(suite, rsp, false) suite.Require().Equal(1, len(rsp.GetUpdates.Entries)) suite.Require().Equal(int64(0), *rsp.GetUpdates.ChangesRemaining) - assertTypeMtimeCacheValue(suite, clientID+"#"+strconv.Itoa(int(bookmarkType)), + assertTypeMtimeCacheValue(suite, testClientID+"#"+strconv.Itoa(int(bookmarkType)), latestBookmarkMtime, "cache should be updated when changes remaining = 0") } +func getDatastoreCount(checkSQL bool, dynamoDB *datastore.Dynamo, sqlDB *datastore.SQLDB, dataTypes []int32) (int64, error) { + var count int64 + if !checkSQL { + filt := expression.Name("DataType").Equal(expression.Value(dataTypes[0])) + for _, dataType := range dataTypes[1:] { + filt = filt.Or(expression.Name("DataType").Equal(expression.Value(dataType))) + } + + expr, err := expression.NewBuilder().WithFilter(filt).Build() + if err != nil { + return 0, err + } + + input := &dynamodb.ScanInput{ + TableName: aws.String(datastore.Table), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + FilterExpression: expr.Filter(), + } + result, err := dynamoDB.Scan(input) + if err != nil { + return 0, err + } + count = *result.Count + } else { + query := "SELECT COUNT(*) FROM entities WHERE data_type = ANY($1)" + err := sqlDB.QueryRow(query, pq.Array(dataTypes)).Scan(&count) + if err != nil { + return 0, err + } + } + return count, nil +} + +func verifyNoDataInOtherDB(storeInSQL bool, dynamoDB *datastore.Dynamo, sqlDB *datastore.SQLDB) (bool, error) { + count, err := getDatastoreCount(!storeInSQL, dynamoDB, sqlDB, []int32{nigoriType, bookmarkType}) + if err != nil { + return false, err + } + return count == 0, nil +} + func TestCommandTestSuite(t *testing.T) { - suite.Run(t, new(CommandTestSuite)) + t.Run("Dynamo", func(t *testing.T) { + suite.Run(t, NewCommandTestSuite(false)) + }) + t.Run("SQL", func(t *testing.T) { + suite.Run(t, NewCommandTestSuite(true)) + }) } diff --git a/command/helpers.go b/command/helpers.go new file mode 100644 index 00000000..52fb62a8 --- /dev/null +++ b/command/helpers.go @@ -0,0 +1,361 @@ +package command + +import ( + "fmt" + "math/rand/v2" + "time" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/datastore" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" +) + +type DBHelpers struct { + dynamoDB datastore.DynamoDatastore + SQLDB datastore.SQLDatastore + Trx *sqlx.Tx + clientID string + ChainID int64 + variationHashDecimal float32 + ItemCounts *ItemCounts +} + +func NewDBHelpers(dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, clientID string, cache *cache.Cache, initItemCounts bool) (*DBHelpers, error) { + trx, err := sqlDB.Beginx() + if err != nil { + return nil, fmt.Errorf("error starting transaction: %w", err) + } + + chainID, err := sqlDB.GetAndLockChainID(trx, clientID) + if err != nil { + trx.Rollback() + return nil, err + } + // Get this value to determine if the client should be included in SQL rollouts + variationHashDecimal := datastore.VariationHashDecimal(clientID) + + var itemCounts *ItemCounts + if initItemCounts { + itemCounts, err = GetItemCounts(cache, dynamoDB, sqlDB, trx, clientID, *chainID) + if err != nil { + trx.Rollback() + return nil, err + } + } + + return &DBHelpers{ + dynamoDB: dynamoDB, + SQLDB: sqlDB, + Trx: trx, + clientID: clientID, + ChainID: *chainID, + variationHashDecimal: variationHashDecimal, + ItemCounts: itemCounts, + }, nil +} + +func (h *DBHelpers) hasItemInEitherDB(entity *datastore.SyncEntity) (exists bool, err error) { + // Check if item exists using client_unique_tag + if h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + exists, err := h.SQLDB.HasItem(h.Trx, h.ChainID, *entity.ClientDefinedUniqueTag) + if err != nil { + return false, err + } + if !exists { + return h.dynamoDB.HasItem(h.clientID, *entity.ClientDefinedUniqueTag) + } + return exists, err + } + return h.dynamoDB.HasItem(h.clientID, *entity.ClientDefinedUniqueTag) +} + +func (h *DBHelpers) getUpdatesFromDBs(dataType int, token int64, fetchFolders bool, curMaxSize int) (hasChangesRemaining bool, syncEntities []datastore.SyncEntity, err error) { + if curMaxSize == 0 { + return false, nil, nil + } + if h.SQLDB.Variations().ShouldSaveToSQL(dataType, h.variationHashDecimal) { + // Get the earliest mtime for entities migrated from Dynamo to SQL, if available. + dynamoMigrationStatuses, err := h.SQLDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, []int{dataType}) + if err != nil { + return false, nil, err + } + + // First, get all entities from Dynamo that are past the given token. + // Only query up until the earliest mtime within SQL. + if migrationStatus := dynamoMigrationStatuses[dataType]; migrationStatus == nil || (migrationStatus.EarliestMtime != nil && *migrationStatus.EarliestMtime > token) { + var earliestMtime *int64 + if migrationStatus != nil { + earliestMtime = migrationStatus.EarliestMtime + } + hasChangesRemaining, syncEntities, err = h.dynamoDB.GetUpdatesForType(dataType, &token, earliestMtime, fetchFolders, h.clientID, curMaxSize, true) + if err != nil { + return false, nil, err + } + curMaxSize -= len(syncEntities) + } + + // Then get all entities from SQL. We can append the items to syncEntities because + // all Dynamo entities are guaranteed to be older (by mtime) than SQL entities. + if curMaxSize > 0 { + sqlHasChangesRemaining, sqlSyncEntities, err := h.SQLDB.GetUpdatesForType(h.Trx, dataType, token, fetchFolders, h.ChainID, curMaxSize) + if err != nil { + return false, nil, err + } + if sqlHasChangesRemaining { + hasChangesRemaining = true + } + syncEntities = append(syncEntities, sqlSyncEntities...) + } + + return hasChangesRemaining, syncEntities, nil + } + return h.dynamoDB.GetUpdatesForType(dataType, &token, nil, fetchFolders, h.clientID, curMaxSize, true) +} + +func (h *DBHelpers) insertSyncEntity(entity *datastore.SyncEntity) (conflict bool, err error) { + savedInSQL := h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + if savedInSQL { + conflict, err = h.SQLDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) + } else { + conflict, err = h.dynamoDB.InsertSyncEntity(entity) + } + if err == nil && !conflict && (entity.Deleted == nil || !*entity.Deleted) { + if err = h.ItemCounts.RecordChange(*entity.DataType, false, savedInSQL); err != nil { + return false, err + } + } + return conflict, nil +} + +func getMigratedEntityID(entity *datastore.SyncEntity) (string, error) { + id := entity.ID + if *entity.DataType == datastore.HistoryTypeID { + // In Dynamo, History entities are stored with the client tag as the ID. + // Since the SQL table uses a UUID for the id, generate a new ID here. + newID, err := uuid.NewV7() + if err != nil { + return "", err + } + id = newID.String() + } + return id, nil +} + +func (h *DBHelpers) updateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, migratedEntity *datastore.SyncEntity, err error) { + var deleted bool + shouldSaveInSQL := h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) + if shouldSaveInSQL { + conflict, deleted, err = h.SQLDB.UpdateSyncEntity(h.Trx, entity, oldVersion) + if err != nil { + return false, nil, err + } + // Conflict might mean that the entity does not exist in SQL but exists in Dynamo. + // Check for a Dynamo entity and migrate it accordingly. + if conflict { + oldEntity, err := h.dynamoDB.GetEntity(datastore.ItemQuery{ + ID: entity.ID, + ClientID: entity.ClientID, + }) + if err != nil { + return false, nil, err + } + if oldEntity == nil { + // The conflict is unrelated to a pending Dynamo to SQL migration. + // Return conflict error to client. + return true, nil, nil + } + if oldEntity.Deleted == nil || !*oldEntity.Deleted { + // If the stored entity was not already deleted, decrement the + // Dynamo item count since we'll be migrating the entity to SQL. + if err = h.ItemCounts.RecordChange(*entity.DataType, true, false); err != nil { + return false, nil, err + } + } + migratedEntityID, err := getMigratedEntityID(entity) + if err != nil { + return false, nil, err + } + entity.ID = migratedEntityID + conflict, err = h.SQLDB.InsertSyncEntities(h.Trx, []*datastore.SyncEntity{entity}) + if err != nil { + return false, nil, err + } + if !conflict && (entity.Deleted == nil || !*entity.Deleted) { + // If the new entity is not considered deleted, increment the + // SQL interim count. + if err = h.ItemCounts.RecordChange(*entity.DataType, false, true); err != nil { + return false, nil, err + } + } + return conflict, oldEntity, err + } + } else { + conflict, deleted, err = h.dynamoDB.UpdateSyncEntity(entity, oldVersion) + if err != nil { + return false, nil, err + } + } + if !conflict && deleted { + if err = h.ItemCounts.RecordChange(*entity.DataType, true, shouldSaveInSQL); err != nil { + return false, nil, err + } + } + return conflict, nil, err +} + +func (h *DBHelpers) maybeMigrateToSQL(dataTypes []int) (migratedEntities []*datastore.SyncEntity, err error) { + if !h.SQLDB.Variations().Ready { + return nil, nil + } + if rand.Float32() > h.SQLDB.MigrateIntervalPercent() { + return nil, nil + } + var applicableDataTypes []int + // Get all applicable data types for migration for a given chain. + for _, dataType := range dataTypes { + if !h.SQLDB.Variations().ShouldMigrateToSQL(dataType, h.variationHashDecimal) { + continue + } + applicableDataTypes = append(applicableDataTypes, dataType) + } + if len(applicableDataTypes) == 0 { + return nil, nil + } + + // Get the earliest mtime for entities that were already migrated. + // We use this so we can apply a max mtime filter to our Dynamo query. + migrationStatuses, err := h.SQLDB.GetDynamoMigrationStatuses(h.Trx, h.ChainID, applicableDataTypes) + if err != nil { + return nil, err + } + + currLimit := h.SQLDB.MigrateChunkSize() + var updatedMigrationStatuses []*datastore.MigrationStatus + + for _, dataType := range applicableDataTypes { + if currLimit <= 0 { + break + } + migrationStatus := migrationStatuses[dataType] + if migrationStatus != nil && migrationStatus.EarliestMtime == nil { + // earliest_mtime = null in migration status means that all entities + // for the data type have already been migrated. skip this data type + continue + } + + var earliestMtime *int64 + if migrationStatus != nil { + earliestMtime = migrationStatus.EarliestMtime + } else { + now := time.Now().UnixMilli() + migrationStatus = &datastore.MigrationStatus{ + ChainID: h.ChainID, + DataType: dataType, + EarliestMtime: &now, + } + } + + // Query the entities in descending order, so we insert the latest items first. + // If the total entity count exceeds the chunk size, then we only want to insert a subset of the latest + // entities from Dynamo for this particular update. + hasChangesRemaining, syncEntities, err := h.dynamoDB.GetUpdatesForType(dataType, nil, earliestMtime, true, h.clientID, currLimit, false) + if err != nil { + return nil, err + } + + currLimit -= len(syncEntities) + + if !hasChangesRemaining { + // No entities from Dynamo remaining. mark earliest_mtime as null to + // indicate that all entities have been moved over. + migrationStatus.EarliestMtime = nil + } else if len(syncEntities) > 0 { + // Since the dynamo query was sorted in descending order, the last item + // will contain the earliest_mtime in the slice. + if lastItem := &syncEntities[len(syncEntities)-1]; lastItem.Mtime != nil { + migrationStatus.EarliestMtime = lastItem.Mtime + } + } + updatedMigrationStatuses = append(updatedMigrationStatuses, migrationStatus) + + var syncEntitiesPtr []*datastore.SyncEntity + for _, syncEntity := range syncEntities { + syncEntity.ChainID = &h.ChainID + newEntity := &syncEntity + migratedEntityID, err := getMigratedEntityID(&syncEntity) + if err != nil { + return nil, err + } + if migratedEntityID != syncEntity.ID { + // Only apply new entity ID to the entity that will be inserted, + // and NOT the original entity which will be deleted later on. + entityClone := syncEntity + entityClone.ID = migratedEntityID + newEntity = &entityClone + } + syncEntitiesPtr = append(syncEntitiesPtr, newEntity) + migratedEntities = append(migratedEntities, &syncEntity) + } + + if len(syncEntitiesPtr) > 0 { + if _, err = h.SQLDB.InsertSyncEntities(h.Trx, syncEntitiesPtr); err != nil { + return nil, err + } + } + } + if len(updatedMigrationStatuses) > 0 { + if err = h.SQLDB.UpdateDynamoMigrationStatuses(h.Trx, updatedMigrationStatuses); err != nil { + return nil, err + } + } + return migratedEntities, nil +} + +// InsertServerDefinedUniqueEntities inserts the server defined unique tag +// entities if it is not in the DB yet for a specific client. +func (h *DBHelpers) InsertServerDefinedUniqueEntities() error { + if !h.SQLDB.Variations().Ready { + return fmt.Errorf("SQL rollout not ready") + } + // Check if they're existed already for this client. + // If yes, just return directly. + ready, err := h.dynamoDB.HasServerDefinedUniqueTag(h.clientID, nigoriTag) + if err != nil { + return fmt.Errorf("error checking if entity with a server tag existed: %w", err) + } + if ready { + return nil + } + + entities, err := CreateServerDefinedUniqueEntities(h.clientID, h.ChainID) + if err != nil { + return err + } + + var dynamoEntities []*datastore.SyncEntity + var sqlEntities []*datastore.SyncEntity + for _, entity := range entities { + if h.SQLDB.Variations().ShouldSaveToSQL(*entity.DataType, h.variationHashDecimal) { + sqlEntities = append(sqlEntities, entity) + } else { + dynamoEntities = append(dynamoEntities, entity) + } + } + + if len(dynamoEntities) > 0 { + err = h.dynamoDB.InsertSyncEntitiesWithServerTags(dynamoEntities) + if err != nil { + return fmt.Errorf("error inserting entities with server tags to DynamoDB: %w", err) + } + } + + if len(sqlEntities) > 0 { + _, err = h.SQLDB.InsertSyncEntities(h.Trx, sqlEntities) + if err != nil { + return fmt.Errorf("error inserting entities with server tags to SQL: %w", err) + } + } + + return nil +} diff --git a/command/item_count.go b/command/item_count.go new file mode 100644 index 00000000..87485e1a --- /dev/null +++ b/command/item_count.go @@ -0,0 +1,128 @@ +package command + +import ( + "context" + "fmt" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/datastore" + "github.com/jmoiron/sqlx" + "github.com/rs/zerolog/log" +) + +type ItemCounts struct { + cache *cache.Cache + dynamoDB datastore.DynamoDatastore + dynamoItemCounts *datastore.DynamoItemCounts + sqlItemCounts *datastore.SQLItemCounts + clientID string + cacheNewNormalCount int + cacheNewHistoryCount int + sqlTxNewNormalCount int + sqlTxNewHistoryCount int +} + +// GetItemCounts returns consolidated item counts from Dynamo and SQL +func GetItemCounts(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore, tx *sqlx.Tx, clientID string, chainID int64) (*ItemCounts, error) { + dynamoItemCounts, err := dynamoDB.GetClientItemCount(clientID) + if err != nil { + return nil, err + } + + sqlItemCounts, err := sqlDB.GetItemCounts(tx, chainID) + if err != nil { + return nil, err + } + + itemCounts := ItemCounts{ + cache: cache, + dynamoDB: dynamoDB, + dynamoItemCounts: dynamoItemCounts, + sqlItemCounts: sqlItemCounts, + clientID: clientID, + cacheNewNormalCount: 0, + cacheNewHistoryCount: 0, + sqlTxNewNormalCount: 0, + sqlTxNewHistoryCount: 0, + } + err = itemCounts.updateInterimItemCounts(false) + if err != nil { + return nil, err + } + + return &itemCounts, nil +} + +func (itemCounts *ItemCounts) updateInterimItemCounts(clear bool) error { + newNormalCount, err := itemCounts.cache.GetInterimCount(context.Background(), itemCounts.clientID, normalCountTypeStr, clear) + if err != nil { + return err + } + newHistoryCount, err := itemCounts.cache.GetInterimCount(context.Background(), itemCounts.clientID, historyCountTypeStr, clear) + if err != nil { + return err + } + itemCounts.cacheNewNormalCount = newNormalCount + itemCounts.cacheNewHistoryCount = newHistoryCount + return nil +} + +// RecordChange updates the interim count according to the addition of deletion of an entity +func (itemCounts *ItemCounts) RecordChange(dataType int, subtract bool, isStoredInSQL bool) error { + isHistory := dataType == datastore.HistoryTypeID || dataType == datastore.HistoryDeleteDirectiveTypeID + if isStoredInSQL { + delta := 1 + if subtract { + delta = -1 + } + if isHistory { + itemCounts.sqlTxNewHistoryCount += delta + } else { + itemCounts.sqlTxNewNormalCount += delta + } + } else { + countType := normalCountTypeStr + if isHistory { + countType = historyCountTypeStr + } + newCount, err := itemCounts.cache.IncrementInterimCount(context.Background(), itemCounts.clientID, countType, subtract) + if err != nil { + return fmt.Errorf("failed to increment history cache count") + } + if isHistory { + itemCounts.cacheNewHistoryCount = newCount + } else { + itemCounts.cacheNewNormalCount = newCount + } + } + return nil +} + +// SumCounts returns of count of entities for a chain +func (itemCounts *ItemCounts) SumCounts(historyOnly bool) int { + sum := itemCounts.dynamoItemCounts.SumHistoryCounts() + itemCounts.sqlItemCounts.HistoryItemCount + itemCounts.sqlTxNewHistoryCount + itemCounts.cacheNewHistoryCount + if !historyOnly { + sum += itemCounts.dynamoItemCounts.ItemCount + itemCounts.sqlItemCounts.NormalItemCount + itemCounts.sqlTxNewNormalCount + itemCounts.cacheNewNormalCount + } + return sum +} + +// Save persists the interim counts to Dynamo +func (itemCounts *ItemCounts) Save() error { + err := itemCounts.updateInterimItemCounts(true) + if err != nil { + return fmt.Errorf("error getting interim item count: %w", err) + } + if err = itemCounts.dynamoDB.UpdateClientItemCount(itemCounts.dynamoItemCounts, itemCounts.cacheNewNormalCount, itemCounts.cacheNewHistoryCount); err != nil { + // We only impose a soft quota limit on the item count for each client, so + // we only log the error without further actions here. The reason of this + // is we do not want to pay the cost to ensure strong consistency on this + // value and we do not want to give up previous DB operations if we cannot + // update the count this time. In addition, we do not retry this operation + // either because it is acceptable to miss one time of this update and + // chances of failing to update the item count multiple times in a row for + // a single client is quite low. + log.Error().Err(err).Msg("Update client item count failed") + } + return nil +} diff --git a/command/item_count_test.go b/command/item_count_test.go new file mode 100644 index 00000000..2ca6472a --- /dev/null +++ b/command/item_count_test.go @@ -0,0 +1,273 @@ +package command_test + +import ( + "context" + "testing" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/command" + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/suite" +) + +type ItemCountTestSuite struct { + suite.Suite + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB +} + +func (suite *ItemCountTestSuite) SetupSuite() { + var rollouts string + suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, rollouts) + + datastore.Table = "client-entity-test-command" + var err error + suite.dynamoDB, err = datastore.NewDynamo(true) + suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") + + suite.cache = cache.NewCache(cache.NewRedisClient()) +} + +func (suite *ItemCountTestSuite) SetupTest() { + suite.Require().NoError( + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") +} + +func (suite *ItemCountTestSuite) TearDownTest() { + suite.Require().NoError( + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") + suite.Require().NoError( + suite.cache.FlushAll(context.Background()), "Failed to clear cache") +} + +func (suite *ItemCountTestSuite) insertSyncEntity(tx *sqlx.Tx, itemCounts *command.ItemCounts, insertInSQL bool, dataType int, clientID string, chainID int64) *datastore.SyncEntity { + id, err := uuid.NewV7() + suite.Require().NoError(err, "Failed to generate UUID") + + entity := &datastore.SyncEntity{ + ChainID: &chainID, + ClientID: clientID, + ID: id.String(), + DataType: &dataType, + Version: &[]int64{1}[0], + Mtime: &[]int64{123}[0], + Ctime: &[]int64{123}[0], + Specifics: []byte{1, 2}, + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + ClientDefinedUniqueTag: &[]string{id.String()}[0], + DataTypeMtime: &[]string{"123#12345678"}[0], + } + + if insertInSQL { + conflict, err := suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{entity}) + suite.Require().NoError(err, "Failed to insert sync entity in SQL") + suite.Require().False(conflict, "Unexpected conflict when inserting sync entity in SQL") + } else { + conflict, err := suite.dynamoDB.InsertSyncEntity(entity) + suite.Require().NoError(err, "Failed to insert sync entity in DynamoDB") + suite.Require().False(conflict, "Unexpected conflict when inserting sync entity in DynamoDB") + } + suite.Require().NoError(itemCounts.RecordChange(dataType, false, insertInSQL), "Should be able record change") + return entity +} + +func (suite *ItemCountTestSuite) deleteSyncEntity(tx *sqlx.Tx, itemCounts *command.ItemCounts, deleteInSQL bool, entity *datastore.SyncEntity) { + *entity.Version = 2 + *entity.Deleted = true + if deleteInSQL { + conflict, deleted, err := suite.sqlDB.UpdateSyncEntity(tx, entity, 1) + suite.Require().NoError(err, "Failed to delete sync entity in SQL") + suite.Require().False(conflict, "Unexpected conflict when deleting sync entity in SQL") + suite.Require().True(deleted, "Expected entity to be marked as deleted in SQL") + } else { + conflict, deleted, err := suite.dynamoDB.UpdateSyncEntity(entity, 1) + suite.Require().NoError(err, "Failed to delete sync entity in DynamoDB") + suite.Require().False(conflict, "Unexpected conflict when deleting sync entity in DynamoDB") + suite.Require().True(deleted, "Expected entity to be marked as deleted in DynamoDB") + } + suite.Require().NoError(itemCounts.RecordChange(*entity.DataType, true, deleteInSQL), "Should be able to record change") +} + +func (suite *ItemCountTestSuite) TestPreloaded() { + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, testClientID, *chainID) + suite.Require().NoError(err) + + suite.Equal(0, itemCounts.SumCounts(false), "Expected initial sum of item counts to be zero") + suite.Equal(0, itemCounts.SumCounts(true), "Expected initial sum of item counts to be zero") +} + +func (suite *ItemCountTestSuite) TestInsertAndCountItems() { + clientID := "client1" + + // Start a new transaction for insertions + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for insertions") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get item counts") + + // Insert items + suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, true, 124, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, 123, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID) + + suite.Equal(4, itemCounts.SumCounts(true), "Expected history total count of 4") + suite.Equal(7, itemCounts.SumCounts(false), "Expected total count of 7") + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + // Start a new transaction for counting + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for counting") + defer tx.Rollback() + + itemCounts, err = command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get item counts") + + suite.Equal(4, itemCounts.SumCounts(true), "Expected history total count of 4") + suite.Equal(7, itemCounts.SumCounts(false), "Expected total count of 7") + + clientID = "client2" + chainID, err = suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID for other client") + + otherItemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get item counts for other client") + + suite.Equal(0, otherItemCounts.SumCounts(true), "Expected history total count of 0 for other client") + suite.Equal(0, otherItemCounts.SumCounts(false), "Expected total count of 0 for other client") +} + +func (suite *ItemCountTestSuite) TestDeleteAfterInsertCommit() { + clientID := "client1" + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get initial item counts") + + var sqlEntitiesToDelete []*datastore.SyncEntity + var dynamoEntitiesToDelete []*datastore.SyncEntity + + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, 124, clientID, *chainID) + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID)) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryTypeID, clientID, *chainID) + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + for _, entity := range sqlEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, true, entity) + } + for _, entity := range dynamoEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, false, entity) + } + + suite.Equal(2, itemCounts.SumCounts(true), "Expected history count of 2 after deletions") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 after deletions") + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for final count") + defer tx.Rollback() + + itemCounts, err = command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get final item counts") + + suite.Equal(2, itemCounts.SumCounts(true), "Expected history count of 2 after deletions") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 after deletions") +} + +func (suite *ItemCountTestSuite) TestDeleteBeforeInsertCommit() { + clientID := "client1" + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, clientID) + suite.Require().NoError(err, "Failed to get chain ID") + + itemCounts, err := command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get initial item counts") + + var sqlEntitiesToDelete []*datastore.SyncEntity + var dynamoEntitiesToDelete []*datastore.SyncEntity + + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, 123, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, 124, clientID, *chainID) + sqlEntitiesToDelete = append(sqlEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, true, datastore.HistoryTypeID, clientID, *chainID) + suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, 125, clientID, *chainID)) + dynamoEntitiesToDelete = append(dynamoEntitiesToDelete, suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryDeleteDirectiveTypeID, clientID, *chainID)) + suite.insertSyncEntity(tx, itemCounts, false, datastore.HistoryTypeID, clientID, *chainID) + + for _, entity := range sqlEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, true, entity) + } + for _, entity := range dynamoEntitiesToDelete { + suite.deleteSyncEntity(tx, itemCounts, false, entity) + } + + // Check counts before commit + suite.Equal(2, itemCounts.SumCounts(true), "Expected SQL count of 2 before commit") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 before commit") + + suite.Require().NoError(tx.Commit(), "Failed to commit transaction") + + // Start a new transaction for final count + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Failed to start transaction for final count") + defer tx.Rollback() + + itemCounts, err = command.GetItemCounts(suite.cache, suite.dynamoDB, suite.sqlDB, tx, clientID, *chainID) + suite.Require().NoError(err, "Failed to get final item counts") + + // Check counts after commit + suite.Equal(2, itemCounts.SumCounts(true), "Expected SQL count of 2 after commit") + suite.Equal(4, itemCounts.SumCounts(false), "Expected total count of 4 after commit") +} + +func TestItemCountTestSuite(t *testing.T) { + suite.Run(t, new(ItemCountTestSuite)) +} diff --git a/command/migrate_test.go b/command/migrate_test.go new file mode 100644 index 00000000..38dc92e0 --- /dev/null +++ b/command/migrate_test.go @@ -0,0 +1,314 @@ +package command_test + +import ( + "context" + "math" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/command" + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/brave/go-sync/schema/protobuf/sync_pb" + "github.com/stretchr/testify/suite" +) + +type CommandMigrateTestSuite struct { + suite.Suite + dynamoDB *datastore.Dynamo + cache *cache.Cache + sqlDB *datastore.SQLDB +} + +func (suite *CommandMigrateTestSuite) SetupSuite() { + datastore.Table = testDynamoTable + var err error + suite.dynamoDB, err = datastore.NewDynamo(true) + suite.Require().NoError(err, "Failed to get dynamoDB session") + + suite.cache = cache.NewCache(cache.NewRedisClient()) +} + +type ExpectedCounts struct { + SQLNigori int64 + SQLBookmark int64 + DynamoNigori int64 + DynamoBookmark int64 +} + +func (suite *CommandMigrateTestSuite) assertDatastoreCounts(expected ExpectedCounts) { + sqlNigoriCount, err := getDatastoreCount(true, suite.dynamoDB, suite.sqlDB, []int32{nigoriType}) + suite.Require().NoError(err, "Failed to get SQL nigori count") + + sqlBookmarkCount, err := getDatastoreCount(true, suite.dynamoDB, suite.sqlDB, []int32{bookmarkType}) + suite.Require().NoError(err, "Failed to get SQL bookmark count") + + dynamoNigoriCount, err := getDatastoreCount(false, suite.dynamoDB, suite.sqlDB, []int32{nigoriType}) + suite.Require().NoError(err, "Failed to get DynamoDB nigori count") + + dynamoBookmarkCount, err := getDatastoreCount(false, suite.dynamoDB, suite.sqlDB, []int32{bookmarkType}) + suite.Require().NoError(err, "Failed to get DynamoDB bookmark count") + + suite.Assert().Equal(expected.SQLNigori, sqlNigoriCount, "SQL nigori count mismatch") + suite.Assert().Equal(expected.SQLBookmark, sqlBookmarkCount, "SQL bookmark count mismatch") + suite.Assert().Equal(expected.DynamoNigori, dynamoNigoriCount, "DynamoDB nigori count mismatch") + suite.Assert().Equal(expected.DynamoBookmark, dynamoBookmarkCount, "DynamoDB bookmark count mismatch") +} + +func (suite *CommandMigrateTestSuite) assertSQLMigrationStatus(dataType int32, checkForFullMigration bool, shouldExist bool) { + var count int + query := ` + SELECT COUNT(*) + FROM dynamo_migration_statuses + WHERE data_type = $1` + + if checkForFullMigration { + query += ` AND earliest_mtime IS NULL` + } + + err := suite.sqlDB.QueryRow(query, dataType).Scan(&count) + + var expectedCount int + if shouldExist { + expectedCount = 1 + } + + suite.Require().NoError(err, "Failed to query dynamo_migration_statuses") + suite.Assert().Equal(expectedCount, count, "Migration status row count should match") +} + +func (suite *CommandMigrateTestSuite) createSQLDB(migrateDataTypes []int32) { + rollouts := buildRolloutConfigString(migrateDataTypes) + suite.T().Setenv(datastore.SQLSaveRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, rollouts) + suite.T().Setenv(datastore.SQLMigrateChunkSizeEnvKey, "2") + suite.T().Setenv(datastore.SQLMigrateUpdateIntervalEnvKey, "1") + + isFirstRun := suite.sqlDB == nil + + var err error + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") + + if isFirstRun { + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") + } +} + +func (suite *CommandMigrateTestSuite) SetupTest() { + suite.Require().NoError( + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") +} + +func (suite *CommandMigrateTestSuite) TearDownTest() { + suite.Require().NoError( + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") + suite.Require().NoError( + suite.cache.FlushAll(context.Background()), "Failed to clear cache") + suite.sqlDB = nil +} + +func (suite *CommandMigrateTestSuite) sendMessageAndAssertEmptyResponse(msg *sync_pb.ClientToServerMessage) { + rsp := &sync_pb.ClientToServerResponse{} + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + suite.Assert().Equal(sync_pb.SyncEnums_SUCCESS, *rsp.ErrorCode, "errorCode should match") + suite.Assert().NotNil(rsp.GetUpdates) + suite.Assert().Empty(rsp.GetUpdates.Entries) +} + +func (suite *CommandMigrateTestSuite) TestBasicMigrate() { + suite.createSQLDB([]int32{}) + entries := []*sync_pb.SyncEntity{ + getCommitEntity("id1_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id3_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id4_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id5_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id6_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id7_nigori", 0, false, getNigoriSpecifics()), + } + msg := getClientToServerCommitMsg(entries) + rsp := &sync_pb.ClientToServerResponse{} + + // Commit and check response. + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + // GetUpdates should return nothing. + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(math.MaxInt64 - 1000), + Bookmark: aws.Int64(math.MaxInt64 - 1000), + }) + + msg = getClientToServerGUMsg( + marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) + suite.sendMessageAndAssertEmptyResponse(msg) + + isSQLEmpty, err := verifyNoDataInOtherDB(false, suite.dynamoDB, suite.sqlDB) + suite.Require().NoError(err, "Empty database verification should succeed") + suite.Assert().True(isSQLEmpty, "SQL database should be empty") + + suite.createSQLDB([]int32{nigoriType, bookmarkType}) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 2, + SQLBookmark: 0, + DynamoNigori: 2, + DynamoBookmark: 3, + }) + suite.assertSQLMigrationStatus(bookmarkType, false, false) + suite.assertSQLMigrationStatus(nigoriType, false, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 0, + DynamoNigori: 0, + DynamoBookmark: 3, + }) + suite.assertSQLMigrationStatus(bookmarkType, false, false) + suite.assertSQLMigrationStatus(nigoriType, false, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 2, + DynamoNigori: 0, + DynamoBookmark: 1, + }) + suite.assertSQLMigrationStatus(bookmarkType, false, true) + suite.assertSQLMigrationStatus(nigoriType, true, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 3, + DynamoNigori: 0, + DynamoBookmark: 0, + }) + suite.assertSQLMigrationStatus(bookmarkType, true, true) + suite.assertSQLMigrationStatus(nigoriType, true, true) + + suite.sendMessageAndAssertEmptyResponse(msg) + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 4, + SQLBookmark: 3, + DynamoNigori: 0, + DynamoBookmark: 0, + }) + // fully migrated + suite.assertSQLMigrationStatus(bookmarkType, true, true) + suite.assertSQLMigrationStatus(nigoriType, true, true) +} + +func (suite *CommandMigrateTestSuite) TestBookmarkOnlyMigration() { + suite.createSQLDB([]int32{}) + entries := []*sync_pb.SyncEntity{ + getCommitEntity("id1_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id3_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id4_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id5_nigori", 0, false, getNigoriSpecifics()), + } + msg := getClientToServerCommitMsg(entries) + rsp := &sync_pb.ClientToServerResponse{} + + // Commit initial entities + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + // Enable migration for bookmarks only + suite.createSQLDB([]int32{bookmarkType}) + + // GetUpdates message + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(math.MaxInt64 - 1000), + Bookmark: aws.Int64(math.MaxInt64 - 1000), + }) + msg = getClientToServerGUMsg(marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) + + // Initial counts + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 0, + SQLBookmark: 0, + DynamoNigori: 2, + DynamoBookmark: 3, + }) + + // Migrate bookmarks + for i := 0; i < 4; i++ { + suite.sendMessageAndAssertEmptyResponse(msg) + if i == 0 { + suite.assertSQLMigrationStatus(bookmarkType, false, true) + } + } + + // Final counts + suite.assertDatastoreCounts(ExpectedCounts{ + SQLNigori: 0, + SQLBookmark: 3, + DynamoNigori: 2, + DynamoBookmark: 0, + }) + + suite.assertSQLMigrationStatus(bookmarkType, true, true) + suite.assertSQLMigrationStatus(nigoriType, false, false) +} + +func (suite *CommandMigrateTestSuite) TestMigrateDisabled() { + suite.createSQLDB([]int32{}) + entries := []*sync_pb.SyncEntity{ + getCommitEntity("id1_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id2_bookmark", 0, false, getBookmarkSpecifics()), + getCommitEntity("id3_nigori", 0, false, getNigoriSpecifics()), + getCommitEntity("id4_nigori", 0, false, getNigoriSpecifics()), + } + msg := getClientToServerCommitMsg(entries) + rsp := &sync_pb.ClientToServerResponse{} + + // Commit initial entities + suite.Require().NoError( + command.HandleClientToServerMessage(suite.cache, msg, rsp, suite.dynamoDB, suite.sqlDB, testClientID), + "HandleClientToServerMessage should succeed") + + // GetUpdates message + marker := getMarker(MarkerTokens{ + Nigori: aws.Int64(math.MaxInt64 - 1000), + Bookmark: aws.Int64(math.MaxInt64 - 1000), + }) + msg = getClientToServerGUMsg(marker, sync_pb.SyncEnums_GU_TRIGGER, false, nil) + + // Initial counts + initialCounts := ExpectedCounts{ + SQLNigori: 0, + SQLBookmark: 0, + DynamoNigori: 2, + DynamoBookmark: 2, + } + suite.assertDatastoreCounts(initialCounts) + + // Send multiple GetUpdates messages + for i := 0; i < 5; i++ { + suite.sendMessageAndAssertEmptyResponse(msg) + + // Assert that counts haven't changed + suite.assertDatastoreCounts(initialCounts) + } + + suite.assertSQLMigrationStatus(bookmarkType, false, false) + suite.assertSQLMigrationStatus(nigoriType, false, false) +} + +// test migration of only one type + +func TestCommandMigrateTestSuite(t *testing.T) { + suite.Run(t, new(CommandMigrateTestSuite)) +} diff --git a/command/server_defined_unique_entity.go b/command/server_defined_unique_entity.go index 394171ad..e91c98b3 100644 --- a/command/server_defined_unique_entity.go +++ b/command/server_defined_unique_entity.go @@ -8,7 +8,7 @@ import ( "github.com/brave/go-sync/datastore" "github.com/brave/go-sync/schema/protobuf/sync_pb" "github.com/brave/go-sync/utils" - "github.com/satori/go.uuid" + "github.com/google/uuid" ) const ( @@ -24,12 +24,16 @@ const ( bookmarkBarTag string = "bookmark_bar" ) -func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clientID string, parentID string, specifics *sync_pb.EntitySpecifics) (*datastore.SyncEntity, error) { +func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clientID string, chainID int64, parentID string, specifics *sync_pb.EntitySpecifics) (*datastore.SyncEntity, error) { now := utils.UnixMilli(time.Now()) deleted := false folder := true version := int64(1) - idString := uuid.NewV4().String() + idUUID, err := uuid.NewV7() + if err != nil { + return nil, err + } + idString := idUUID.String() pbEntity := &sync_pb.SyncEntity{ Ctime: &now, Mtime: &now, Deleted: &deleted, Folder: &folder, @@ -37,30 +41,17 @@ func createServerDefinedUniqueEntity(name string, serverDefinedTag string, clien Version: &version, ParentIdString: &parentID, IdString: &idString, Specifics: specifics} - return datastore.CreateDBSyncEntity(pbEntity, nil, clientID) + return datastore.CreateDBSyncEntity(pbEntity, nil, clientID, chainID) } -// InsertServerDefinedUniqueEntities inserts the server defined unique tag -// entities if it is not in the DB yet for a specific client. -func InsertServerDefinedUniqueEntities(db datastore.Datastore, clientID string) error { - var entities []*datastore.SyncEntity - // Check if they're existed already for this client. - // If yes, just return directly. - ready, err := db.HasServerDefinedUniqueTag(clientID, nigoriTag) - if err != nil { - return fmt.Errorf("error checking if entity with a server tag existed: %w", err) - } - if ready { - return nil - } - +func CreateServerDefinedUniqueEntities(clientID string, chainID int64) (entities []*datastore.SyncEntity, err error) { // Create nigori top-level folder nigoriSpecific := &sync_pb.NigoriSpecifics{} nigoriEntitySpecific := &sync_pb.EntitySpecifics_Nigori{Nigori: nigoriSpecific} specifics := &sync_pb.EntitySpecifics{SpecificsVariant: nigoriEntitySpecific} - entity, err := createServerDefinedUniqueEntity(nigoriName, nigoriTag, clientID, "0", specifics) + entity, err := createServerDefinedUniqueEntity(nigoriName, nigoriTag, clientID, chainID, "0", specifics) if err != nil { - return fmt.Errorf("error creating entity with a server tag: %w", err) + return nil, fmt.Errorf("error creating entity with a server tag: %w", err) } entities = append(entities, entity) @@ -68,9 +59,9 @@ func InsertServerDefinedUniqueEntities(db datastore.Datastore, clientID string) bookmarkSpecific := &sync_pb.BookmarkSpecifics{} bookmarkEntitySpecific := &sync_pb.EntitySpecifics_Bookmark{Bookmark: bookmarkSpecific} specifics = &sync_pb.EntitySpecifics{SpecificsVariant: bookmarkEntitySpecific} - entity, err = createServerDefinedUniqueEntity(bookmarksName, bookmarksTag, clientID, "0", specifics) + entity, err = createServerDefinedUniqueEntity(bookmarksName, bookmarksTag, clientID, chainID, "0", specifics) if err != nil { - return fmt.Errorf("error creating entity with a server tag: %w", err) + return nil, fmt.Errorf("error creating entity with a server tag: %w", err) } entities = append(entities, entity) @@ -82,17 +73,11 @@ func InsertServerDefinedUniqueEntities(db datastore.Datastore, clientID string) bookmarkBarName: bookmarkBarTag} for name, tag := range bookmarkSecondLevelFolders { entity, err := createServerDefinedUniqueEntity( - name, tag, clientID, bookmarkRootID, specifics) + name, tag, clientID, chainID, bookmarkRootID, specifics) if err != nil { - return fmt.Errorf("error creating entity with a server tag: %w", err) + return nil, fmt.Errorf("error creating entity with a server tag: %w", err) } entities = append(entities, entity) } - - // Start a transaction to insert all server defined unique entities - err = db.InsertSyncEntitiesWithServerTags(entities) - if err != nil { - return fmt.Errorf("error inserting entities with server tags: %w", err) - } - return nil + return entities, nil } diff --git a/command/server_defined_unique_entity_test.go b/command/server_defined_unique_entity_test.go index 94832250..35165c8b 100644 --- a/command/server_defined_unique_entity_test.go +++ b/command/server_defined_unique_entity_test.go @@ -13,7 +13,8 @@ import ( type ServerDefinedUniqueEntityTestSuite struct { suite.Suite - dynamo *datastore.Dynamo + sqlDB *datastore.SQLDB + dynamoDB *datastore.Dynamo } type SyncAttrs struct { @@ -29,26 +30,34 @@ type SyncAttrs struct { func (suite *ServerDefinedUniqueEntityTestSuite) SetupSuite() { datastore.Table = "client-entity-test-command" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") } func (suite *ServerDefinedUniqueEntityTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") } func (suite *ServerDefinedUniqueEntityTestSuite) TearDownTest() { suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") } func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEntities() { + dbHelpers, err := command.NewDBHelpers(suite.dynamoDB, suite.sqlDB, "client1", nil, false) + suite.Require().NoError(err, "NewDBHelpers should succeed") + defer dbHelpers.Trx.Rollback() + suite.Require().NoError( - command.InsertServerDefinedUniqueEntities(suite.dynamo, "client1"), + dbHelpers.InsertServerDefinedUniqueEntities(), "InsertServerDefinedUniqueEntities should succeed") suite.Require().NoError( - command.InsertServerDefinedUniqueEntities(suite.dynamo, "client1"), + dbHelpers.InsertServerDefinedUniqueEntities(), "InsertServerDefinedUniqueEntities again for a same client should succeed") expectedSyncAttrsMap := map[string]*SyncAttrs{ @@ -102,7 +111,7 @@ func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEn expectedTagItems = append(expectedTagItems, datastore.ServerClientUniqueTagItem{ClientID: "client1", ID: "Server#" + key}) } - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + tagItems, err := datastoretest.ScanTagItems(suite.dynamoDB) suite.Require().NoError(err, "ScanTagItems should succeed") // Check that Ctime and Mtime have been set, reset to zero value for subsequent @@ -119,7 +128,7 @@ func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEn sort.Sort(datastore.TagItemByClientIDID(expectedTagItems)) suite.Assert().Equal(tagItems, expectedTagItems) - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamoDB) suite.Require().NoError(err, "ScanSyncEntities should succeed") // Find bookmark root folder to update parentID of its subfolders. @@ -154,8 +163,14 @@ func (suite *ServerDefinedUniqueEntityTestSuite) TestInsertServerDefinedUniqueEn } suite.Assert().Equal(0, len(expectedSyncAttrsMap)) + suite.Require().NoError(dbHelpers.Trx.Commit(), "Transaction commit should succeed") + + dbHelpers, err = command.NewDBHelpers(suite.dynamoDB, suite.sqlDB, "client2", nil, false) + suite.Require().NoError(err, "NewDBHelpers should succeed") + defer dbHelpers.Trx.Rollback() + suite.Require().NoError( - command.InsertServerDefinedUniqueEntities(suite.dynamo, "client2"), + dbHelpers.InsertServerDefinedUniqueEntities(), "InsertServerDefinedUniqueEntities should succeed for another client") } diff --git a/controller/controller.go b/controller/controller.go index 7566072c..6b4fefa1 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -24,16 +24,16 @@ const ( ) // SyncRouter add routers for command and auth endpoint requests. -func SyncRouter(cache *cache.Cache, datastore datastore.Datastore) chi.Router { +func SyncRouter(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore) chi.Router { r := chi.NewRouter() r.Use(syncMiddleware.Auth) r.Use(syncMiddleware.DisabledChain) - r.Method("POST", "/command/", middleware.InstrumentHandler("Command", Command(cache, datastore))) + r.Method("POST", "/command/", middleware.InstrumentHandler("Command", Command(cache, dynamoDB, sqlDB))) return r } // Command handles GetUpdates and Commit requests from sync clients. -func Command(cache *cache.Cache, db datastore.Datastore) http.HandlerFunc { +func Command(cache *cache.Cache, dynamoDB datastore.DynamoDatastore, sqlDB datastore.SQLDatastore) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() clientID, ok := ctx.Value(syncContext.ContextKeyClientID).(string) @@ -72,7 +72,7 @@ func Command(cache *cache.Cache, db datastore.Datastore) http.HandlerFunc { } pbRsp := &sync_pb.ClientToServerResponse{} - err = command.HandleClientToServerMessage(cache, pb, pbRsp, db, clientID) + err = command.HandleClientToServerMessage(cache, pb, pbRsp, dynamoDB, sqlDB, clientID) if err != nil { log.Error().Err(err).Msg("Handle command message failed") http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/controller/controller_test.go b/controller/controller_test.go index e30826d1..fef95392 100644 --- a/controller/controller_test.go +++ b/controller/controller_test.go @@ -24,27 +24,32 @@ import ( type ControllerTestSuite struct { suite.Suite - dynamo *datastore.Dynamo - cache *cache.Cache + sqlDB *datastore.SQLDB + dynamoDB *datastore.Dynamo + cache *cache.Cache } func (suite *ControllerTestSuite) SetupSuite() { datastore.Table = "client-entity-test-controllor" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamoDB, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to get SQL DB session") suite.cache = cache.NewCache(cache.NewRedisClient()) } func (suite *ControllerTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamoDB), "Failed to reset Dynamo table") + suite.Require().NoError( + datastoretest.ResetSQLTables(suite.sqlDB), "Failed to reset SQL tables") } func (suite *ControllerTestSuite) TearDownTest() { suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") + datastoretest.DeleteTable(suite.dynamoDB), "Failed to delete table") suite.Require().NoError( suite.cache.FlushAll(context.Background()), "Failed to clear cache") } @@ -83,7 +88,7 @@ func (suite *ControllerTestSuite) TestCommand() { suite.Require().NoError(err, "NewRequest should succeed") req.Header.Set("Authorization", "Bearer token") - handler := controller.Command(suite.cache, suite.dynamo) + handler := controller.Command(suite.cache, suite.dynamoDB, suite.sqlDB) // Test unauthorized response. rr := httptest.NewRecorder() diff --git a/datastore/datastore.go b/datastore/datastore.go deleted file mode 100644 index 04796120..00000000 --- a/datastore/datastore.go +++ /dev/null @@ -1,30 +0,0 @@ -package datastore - -// Datastore abstracts over the underlying datastore. -type Datastore interface { - // Insert a new sync entity. - InsertSyncEntity(entity *SyncEntity) (bool, error) - // Insert a series of sync entities in a write transaction. - InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error - // Update an existing sync entity. - UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) - // Get updates for a specific type which are modified after the time of - // client token for a given client. Besides the array of sync entities, a - // boolean value indicating whether there are more updates to query in the - // next batch is returned. - GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []SyncEntity, error) - // Check if a server-defined unique tag is in the datastore. - HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) - // Get the count of sync items for a client. - GetClientItemCount(clientID string) (*ClientItemCounts, error) - // Update the count of sync items for a client. - UpdateClientItemCount(counts *ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) error - // ClearServerData deletes all items for a given clientID - ClearServerData(clientID string) ([]SyncEntity, error) - // DisableSyncChain marks a chain as disabled so no further updates or commits can happen - DisableSyncChain(clientID string) error - // IsSyncChainDisabled checks whether a given sync chain is deleted - IsSyncChainDisabled(clientID string) (bool, error) - // Checks if sync item exists for a client - HasItem(clientID string, ID string) (bool, error) -} diff --git a/datastore/datastoretest/dynamo.go b/datastore/datastoretest/dynamo.go index 2c831ab0..9f935e33 100644 --- a/datastore/datastoretest/dynamo.go +++ b/datastore/datastoretest/dynamo.go @@ -25,9 +25,9 @@ func DeleteTable(dynamo *datastore.Dynamo) error { if aerr.Code() == dynamodb.ErrCodeResourceNotFoundException { return nil } - } else { - return fmt.Errorf("error deleting table: %w", err) + return err } + return fmt.Errorf("error deleting table: %w", err) } return dynamo.WaitUntilTableNotExists( @@ -59,8 +59,8 @@ func CreateTable(dynamo *datastore.Dynamo) error { &dynamodb.DescribeTableInput{TableName: aws.String(datastore.Table)}) } -// ResetTable deletes and creates datastore.Table in dynamoDB. -func ResetTable(dynamo *datastore.Dynamo) error { +// ResetDynamoTable deletes and creates datastore.Table in dynamoDB. +func ResetDynamoTable(dynamo *datastore.Dynamo) error { if err := DeleteTable(dynamo); err != nil { return fmt.Errorf("error deleting table to reset table: %w", err) } @@ -125,7 +125,7 @@ func ScanTagItems(dynamo *datastore.Dynamo) ([]datastore.ServerClientUniqueTagIt // ScanClientItemCounts scans the dynamoDB table and returns all client item // counts. -func ScanClientItemCounts(dynamo *datastore.Dynamo) ([]datastore.ClientItemCounts, error) { +func ScanClientItemCounts(dynamo *datastore.Dynamo) ([]datastore.DynamoItemCounts, error) { filter := expression.AttributeExists(expression.Name("ItemCount")) expr, err := expression.NewBuilder().WithFilter(filter).Build() if err != nil { @@ -142,7 +142,7 @@ func ScanClientItemCounts(dynamo *datastore.Dynamo) ([]datastore.ClientItemCount if err != nil { return nil, fmt.Errorf("error doing scan for item counts: %w", err) } - clientItemCounts := []datastore.ClientItemCounts{} + clientItemCounts := []datastore.DynamoItemCounts{} err = dynamodbattribute.UnmarshalListOfMaps(out.Items, &clientItemCounts) if err != nil { return nil, fmt.Errorf("error unmarshalling item counts: %w", err) diff --git a/datastore/datastoretest/mock_datastore.go b/datastore/datastoretest/mock_datastore.go index 60e8e854..c64cf418 100644 --- a/datastore/datastoretest/mock_datastore.go +++ b/datastore/datastoretest/mock_datastore.go @@ -23,14 +23,14 @@ func (m *MockDatastore) InsertSyncEntitiesWithServerTags(entities []*datastore.S } // UpdateSyncEntity mocks calls to UpdateSyncEntity -func (m *MockDatastore) UpdateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) { +func (m *MockDatastore) UpdateSyncEntity(entity *datastore.SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { args := m.Called(entity, oldVersion) return args.Bool(0), args.Bool(1), args.Error(2) } // GetUpdatesForType mocks calls to GetUpdatesForType -func (m *MockDatastore) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []datastore.SyncEntity, error) { - args := m.Called(dataType, clientToken, fetchFolders, clientID, maxSize) +func (m *MockDatastore) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (bool, []datastore.SyncEntity, error) { + args := m.Called(dataType, minMtime, maxMtime, fetchFolders, clientID, maxSize, ascOrder) return args.Bool(0), args.Get(1).([]datastore.SyncEntity), args.Error(2) } @@ -46,13 +46,13 @@ func (m *MockDatastore) HasItem(clientID string, ID string) (bool, error) { } // GetClientItemCount mocks calls to GetClientItemCount -func (m *MockDatastore) GetClientItemCount(clientID string) (*datastore.ClientItemCounts, error) { +func (m *MockDatastore) GetClientItemCount(clientID string) (*datastore.DynamoItemCounts, error) { args := m.Called(clientID) - return &datastore.ClientItemCounts{ClientID: clientID, ID: clientID}, args.Error(1) + return &datastore.DynamoItemCounts{ClientID: clientID, ID: clientID}, args.Error(1) } // UpdateClientItemCount mocks calls to UpdateClientItemCount -func (m *MockDatastore) UpdateClientItemCount(counts *datastore.ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) error { +func (m *MockDatastore) UpdateClientItemCount(counts *datastore.DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error { args := m.Called(counts, newNormalItemCount, newHistoryItemCount) return args.Error(0) } @@ -74,3 +74,18 @@ func (m *MockDatastore) IsSyncChainDisabled(clientID string) (bool, error) { args := m.Called(clientID) return args.Bool(0), args.Error(1) } + +// DeleteEntities mocks the deletion of sync entities +func (m *MockDatastore) DeleteEntities(entities []*datastore.SyncEntity) error { + args := m.Called(entities) + return args.Error(0) +} + +// GetEntity mocks the retrieval of a sync entity +func (m *MockDatastore) GetEntity(query datastore.ItemQuery) (*datastore.SyncEntity, error) { + args := m.Called(query) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*datastore.SyncEntity), args.Error(1) +} diff --git a/datastore/datastoretest/sql.go b/datastore/datastoretest/sql.go new file mode 100644 index 00000000..71f72967 --- /dev/null +++ b/datastore/datastoretest/sql.go @@ -0,0 +1,9 @@ +package datastoretest + +import "github.com/brave/go-sync/datastore" + +// ResetSQLTables clears SQL tables. +func ResetSQLTables(sqlDB *datastore.SQLDB) error { + _, err := sqlDB.Exec("DELETE FROM chains") + return err +} diff --git a/datastore/dynamo.go b/datastore/dynamo.go index 27ad05a1..f2416290 100644 --- a/datastore/dynamo.go +++ b/datastore/dynamo.go @@ -7,6 +7,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" ) @@ -25,7 +26,9 @@ const ( var ( // Table is the name of the table in dynamoDB, could be modified in tests. - Table = os.Getenv("TABLE_NAME") + Table = os.Getenv("TABLE_NAME") + defaultTestEndpoint = "http://localhost:8000" + defaultTestRegion = "us-west-2" ) // PrimaryKey struct is used to represent the primary key of our table. @@ -40,7 +43,7 @@ type Dynamo struct { } // NewDynamo returns a dynamoDB client to be used. -func NewDynamo() (*Dynamo, error) { +func NewDynamo(isTesting bool) (*Dynamo, error) { httpClient := &http.Client{ Timeout: 30 * time.Second, Transport: &http.Transport{ @@ -49,7 +52,19 @@ func NewDynamo() (*Dynamo, error) { }, } - awsConfig := aws.NewConfig().WithRegion(os.Getenv("AWS_REGION")).WithEndpoint(os.Getenv("AWS_ENDPOINT")).WithHTTPClient(httpClient) + endpoint := os.Getenv("AWS_ENDPOINT") + region := os.Getenv("AWS_REGION") + if endpoint == "" && region == "" && isTesting { + endpoint = defaultTestEndpoint + region = defaultTestRegion + } + + awsConfig := aws.NewConfig().WithRegion(region).WithEndpoint(endpoint).WithHTTPClient(httpClient) + + if isTesting { + awsConfig = awsConfig.WithCredentials(credentials.NewStaticCredentials("GOSYNC", "GOSYNC", "GOSYNC")) + } + sess, err := session.NewSession(awsConfig) if err != nil { diff --git a/datastore/dynamo_migration_status.go b/datastore/dynamo_migration_status.go new file mode 100644 index 00000000..cc217862 --- /dev/null +++ b/datastore/dynamo_migration_status.go @@ -0,0 +1,52 @@ +package datastore + +import ( + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/lib/pq" +) + +type MigrationStatus struct { + ChainID int64 `db:"chain_id"` + DataType int `db:"data_type"` + EarliestMtime *int64 `db:"earliest_mtime"` +} + +// GetDynamoMigrationStatuses retrieves migration statuses for specified data types +func (sqlDB *SQLDB) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (dataTypeToStatusMap map[int]*MigrationStatus, err error) { + dataTypeToStatusMap = make(map[int]*MigrationStatus) + + var statuses []MigrationStatus + err = tx.Select(&statuses, ` + SELECT chain_id, data_type, earliest_mtime + FROM dynamo_migration_statuses + WHERE chain_id = $1 AND data_type = ANY($2) + `, chainID, pq.Array(dataTypes)) + + if err != nil { + return nil, fmt.Errorf("failed to get dynamo migration status: %w", err) + } + + for _, status := range statuses { + dataTypeToStatusMap[status.DataType] = &status + } + + return dataTypeToStatusMap, nil +} + +// UpdateDynamoMigrationStatuses updates migration statuses in the database +func (sqlDB *SQLDB) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) error { + _, err := tx.NamedExec(` + INSERT INTO dynamo_migration_statuses (chain_id, data_type, earliest_mtime) + VALUES (:chain_id, :data_type, :earliest_mtime) + ON CONFLICT (chain_id, data_type) DO UPDATE + SET earliest_mtime = $3 + WHERE dynamo_migration_statuses.earliest_mtime IS NOT NULL AND (dynamo_migration_statuses.earliest_mtime > EXCLUDED.earliest_mtime OR EXCLUDED.earliest_mtime IS NULL) + `, statuses) + if err != nil { + return fmt.Errorf("failed to update dynamo migration statuses: %w", err) + } + + return nil +} diff --git a/datastore/instrumented_datastore.go b/datastore/instrumented_datastore.go deleted file mode 100644 index 6ce5efe7..00000000 --- a/datastore/instrumented_datastore.go +++ /dev/null @@ -1,192 +0,0 @@ -package datastore - -// DO NOT EDIT! -// This code is generated with http://github.com/hexdigest/gowrap tool -// using ../.prom-gowrap.tmpl template - -//go:generate gowrap gen -p github.com/brave/go-sync/datastore -i Datastore -t ../.prom-gowrap.tmpl -o instrumented_datastore.go - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -// DatastoreWithPrometheus implements Datastore interface with all methods wrapped -// with Prometheus metrics -type DatastoreWithPrometheus struct { - base Datastore - instanceName string -} - -var datastoreDurationSummaryVec = promauto.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "datastore_duration_seconds", - Help: "datastore runtime duration and result", - MaxAge: time.Minute, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"instance_name", "method", "result"}) - -// NewDatastoreWithPrometheus returns an instance of the Datastore decorated with prometheus summary metric -func NewDatastoreWithPrometheus(base Datastore, instanceName string) DatastoreWithPrometheus { - return DatastoreWithPrometheus{ - base: base, - instanceName: instanceName, - } -} - -// ClearServerData implements Datastore -func (_d DatastoreWithPrometheus) ClearServerData(clientID string) (sa1 []SyncEntity, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "ClearServerData", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.ClearServerData(clientID) -} - -// DisableSyncChain implements Datastore -func (_d DatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DisableSyncChain", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.DisableSyncChain(clientID) -} - -// GetClientItemCount implements Datastore -func (_d DatastoreWithPrometheus) GetClientItemCount(clientID string) (counts *ClientItemCounts, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetClientItemCount", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.GetClientItemCount(clientID) -} - -// GetUpdatesForType implements Datastore -func (_d DatastoreWithPrometheus) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (b1 bool, sa1 []SyncEntity, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.GetUpdatesForType(dataType, clientToken, fetchFolders, clientID, maxSize) -} - -// HasItem implements Datastore -func (_d DatastoreWithPrometheus) HasItem(clientID string, ID string) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.HasItem(clientID, ID) -} - -// HasServerDefinedUniqueTag implements Datastore -func (_d DatastoreWithPrometheus) HasServerDefinedUniqueTag(clientID string, tag string) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasServerDefinedUniqueTag", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.HasServerDefinedUniqueTag(clientID, tag) -} - -// InsertSyncEntitiesWithServerTags implements Datastore -func (_d DatastoreWithPrometheus) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) (err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntitiesWithServerTags", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.InsertSyncEntitiesWithServerTags(entities) -} - -// InsertSyncEntity implements Datastore -func (_d DatastoreWithPrometheus) InsertSyncEntity(entity *SyncEntity) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntity", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.InsertSyncEntity(entity) -} - -// IsSyncChainDisabled implements Datastore -func (_d DatastoreWithPrometheus) IsSyncChainDisabled(clientID string) (b1 bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "IsSyncChainDisabled", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.IsSyncChainDisabled(clientID) -} - -// UpdateClientItemCount implements Datastore -func (_d DatastoreWithPrometheus) UpdateClientItemCount(counts *ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) (err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateClientItemCount", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.UpdateClientItemCount(counts, newNormalItemCount, newHistoryItemCount) -} - -// UpdateSyncEntity implements Datastore -func (_d DatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, delete bool, err error) { - _since := time.Now() - defer func() { - result := "ok" - if err != nil { - result = "error" - } - - datastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateSyncEntity", result).Observe(time.Since(_since).Seconds()) - }() - return _d.base.UpdateSyncEntity(entity, oldVersion) -} diff --git a/datastore/instrumented_dynamo_datastore.go b/datastore/instrumented_dynamo_datastore.go new file mode 100644 index 00000000..014137cb --- /dev/null +++ b/datastore/instrumented_dynamo_datastore.go @@ -0,0 +1,220 @@ +// Code generated by gowrap. DO NOT EDIT. +// template: ../.prom-gowrap.tmpl +// gowrap: http://github.com/hexdigest/gowrap + +package datastore + +//go:generate gowrap gen -p github.com/brave/go-sync/datastore -i DynamoDatastore -t ../.prom-gowrap.tmpl -o instrumented_dynamo_datastore.go -l "" + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// DynamoDatastoreWithPrometheus implements DynamoDatastore interface with all methods wrapped +// with Prometheus metrics +type DynamoDatastoreWithPrometheus struct { + base DynamoDatastore + instanceName string +} + +var dynamodatastoreDurationSummaryVec = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "dynamodatastore_duration_seconds", + Help: "dynamodatastore runtime duration and result", + MaxAge: time.Minute, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"instance_name", "method", "result"}) + +// NewDynamoDatastoreWithPrometheus returns an instance of the DynamoDatastore decorated with prometheus summary metric +func NewDynamoDatastoreWithPrometheus(base DynamoDatastore, instanceName string) DynamoDatastoreWithPrometheus { + return DynamoDatastoreWithPrometheus{ + base: base, + instanceName: instanceName, + } +} + +// ClearServerData implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) ClearServerData(clientID string) (sa1 []SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "ClearServerData", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.ClearServerData(clientID) +} + +// DeleteEntities implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) DeleteEntities(entities []*SyncEntity) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteEntities", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DeleteEntities(entities) +} + +// DisableSyncChain implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) DisableSyncChain(clientID string) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DisableSyncChain", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DisableSyncChain(clientID) +} + +// GetClientItemCount implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetClientItemCount(clientID string) (dp1 *DynamoItemCounts, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetClientItemCount", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetClientItemCount(clientID) +} + +// GetEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetEntity(query ItemQuery) (sp1 *SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetEntity(query) +} + +// GetUpdatesForType implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (hasChangesRemaining bool, entities []SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetUpdatesForType(dataType, minMtime, maxMtime, fetchFolders, clientID, maxSize, ascOrder) +} + +// HasItem implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) HasItem(clientID string, ID string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.HasItem(clientID, ID) +} + +// HasServerDefinedUniqueTag implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) HasServerDefinedUniqueTag(clientID string, tag string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasServerDefinedUniqueTag", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.HasServerDefinedUniqueTag(clientID, tag) +} + +// InsertSyncEntitiesWithServerTags implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntitiesWithServerTags", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.InsertSyncEntitiesWithServerTags(entities) +} + +// InsertSyncEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) InsertSyncEntity(entity *SyncEntity) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.InsertSyncEntity(entity) +} + +// IsSyncChainDisabled implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) IsSyncChainDisabled(clientID string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "IsSyncChainDisabled", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.IsSyncChainDisabled(clientID) +} + +// UpdateClientItemCount implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateClientItemCount", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateClientItemCount(counts, newNormalItemCount, newHistoryItemCount) +} + +// UpdateSyncEntity implements DynamoDatastore +func (_d DynamoDatastoreWithPrometheus) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + dynamodatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateSyncEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateSyncEntity(entity, oldVersion) +} diff --git a/datastore/instrumented_sql_datastore.go b/datastore/instrumented_sql_datastore.go new file mode 100644 index 00000000..6be6b70c --- /dev/null +++ b/datastore/instrumented_sql_datastore.go @@ -0,0 +1,209 @@ +// Code generated by gowrap. DO NOT EDIT. +// template: ../.prom-gowrap.tmpl +// gowrap: http://github.com/hexdigest/gowrap + +package datastore + +//go:generate gowrap gen -p github.com/brave/go-sync/datastore -i SQLDatastore -t ../.prom-gowrap.tmpl -o instrumented_sql_datastore.go -l "" + +import ( + "time" + + "github.com/jmoiron/sqlx" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// SQLDatastoreWithPrometheus implements SQLDatastore interface with all methods wrapped +// with Prometheus metrics +type SQLDatastoreWithPrometheus struct { + base SQLDatastore + instanceName string +} + +var sqldatastoreDurationSummaryVec = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "sqldatastore_duration_seconds", + Help: "sqldatastore runtime duration and result", + MaxAge: time.Minute, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"instance_name", "method", "result"}) + +// NewSQLDatastoreWithPrometheus returns an instance of the SQLDatastore decorated with prometheus summary metric +func NewSQLDatastoreWithPrometheus(base SQLDatastore, instanceName string) SQLDatastoreWithPrometheus { + return SQLDatastoreWithPrometheus{ + base: base, + instanceName: instanceName, + } +} + +// Beginx implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) Beginx() (tp1 *sqlx.Tx, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "Beginx", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.Beginx() +} + +// DeleteChain implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) DeleteChain(tx *sqlx.Tx, chainID int64) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "DeleteChain", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.DeleteChain(tx, chainID) +} + +// GetAndLockChainID implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetAndLockChainID(tx *sqlx.Tx, clientID string) (ip1 *int64, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetAndLockChainID", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetAndLockChainID(tx, clientID) +} + +// GetDynamoMigrationStatuses implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (m1 map[int]*MigrationStatus, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetDynamoMigrationStatuses", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetDynamoMigrationStatuses(tx, chainID, dataTypes) +} + +// GetItemCounts implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetItemCounts(tx *sqlx.Tx, chainID int64) (sp1 *SQLItemCounts, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetItemCounts", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetItemCounts(tx, chainID) +} + +// GetUpdatesForType implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "GetUpdatesForType", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.GetUpdatesForType(tx, dataType, clientToken, fetchFolders, chainID, maxSize) +} + +// HasItem implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "HasItem", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.HasItem(tx, chainID, clientTag) +} + +// InsertSyncEntities implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (b1 bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "InsertSyncEntities", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.InsertSyncEntities(tx, entities) +} + +// MigrateChunkSize implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) MigrateChunkSize() (i1 int) { + _since := time.Now() + defer func() { + result := "ok" + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "MigrateChunkSize", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.MigrateChunkSize() +} + +// MigrateIntervalPercent implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) MigrateIntervalPercent() (f1 float32) { + _since := time.Now() + defer func() { + result := "ok" + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "MigrateIntervalPercent", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.MigrateIntervalPercent() +} + +// UpdateDynamoMigrationStatuses implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) (err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateDynamoMigrationStatuses", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateDynamoMigrationStatuses(tx, statuses) +} + +// UpdateSyncEntity implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { + _since := time.Now() + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "UpdateSyncEntity", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.UpdateSyncEntity(tx, entity, oldVersion) +} + +// Variations implements SQLDatastore +func (_d SQLDatastoreWithPrometheus) Variations() (sp1 *SQLVariations) { + _since := time.Now() + defer func() { + result := "ok" + sqldatastoreDurationSummaryVec.WithLabelValues(_d.instanceName, "Variations", result).Observe(time.Since(_since).Seconds()) + }() + return _d.base.Variations() +} diff --git a/datastore/interfaces.go b/datastore/interfaces.go new file mode 100644 index 00000000..787f7251 --- /dev/null +++ b/datastore/interfaces.go @@ -0,0 +1,67 @@ +package datastore + +import "github.com/jmoiron/sqlx" + +// DynamoDatastore abstracts over the underlying datastore. +type DynamoDatastore interface { + // Insert a new sync entity. + InsertSyncEntity(entity *SyncEntity) (bool, error) + // Insert a series of sync entities in a write transaction. + InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error + // Update an existing sync entity. + UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) + // Get updates for a specific type which are modified after the time of + // client token for a given client. Besides the array of sync entities, a + // boolean value indicating whether there are more updates to query in the + // next batch is returned. + GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (hasChangesRemaining bool, entities []SyncEntity, err error) + // Check if a server-defined unique tag is in the datastore. + HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) + // Get the count of sync items for a client. + GetClientItemCount(clientID string) (*DynamoItemCounts, error) + // Update the count of sync items for a client. + UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error + // ClearServerData deletes all items for a given clientID + ClearServerData(clientID string) ([]SyncEntity, error) + // DisableSyncChain marks a chain as disabled so no further updates or commits can happen + DisableSyncChain(clientID string) error + // IsSyncChainDisabled checks whether a given sync chain is deleted + IsSyncChainDisabled(clientID string) (bool, error) + // HasItem checks if sync item exists for a client + HasItem(clientID string, ID string) (bool, error) + // GetEntity gets an existing entity + GetEntity(query ItemQuery) (*SyncEntity, error) + // DeleteEntities deletes multiple existing items + DeleteEntities(entities []*SyncEntity) error +} + +// SQLDatastore abstracts over the underlying datastore. +type SQLDatastore interface { + // InsertSyncEntities inserts multiple sync entities into the database + InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (bool, error) + // HasItem checks if an item exists in the database + HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (bool, error) + // UpdateSyncEntity updates a sync entity in the database + UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) + // GetAndLockChainID retrieves and locks a chain ID for a given client ID + GetAndLockChainID(tx *sqlx.Tx, clientID string) (*int64, error) + // GetUpdatesForType retrieves updates for a specific data type + GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) + // GetDynamoMigrationStatuses retrieves migration statuses for specified data types + GetDynamoMigrationStatuses(tx *sqlx.Tx, chainID int64, dataTypes []int) (map[int]*MigrationStatus, error) + // UpdateDynamoMigrationStatuses updates migration statuses in the database + UpdateDynamoMigrationStatuses(tx *sqlx.Tx, statuses []*MigrationStatus) error + // GetItemCounts provides the counts of items associated with a chain + GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, error) + // Beginx initializes a database transaction + Beginx() (*sqlx.Tx, error) + // Variations returns the SQLVariations utility + Variations() *SQLVariations + // MigrateIntervalPercent returns the percentage of update requests that will perform + // a chunked migration + MigrateIntervalPercent() float32 + // MigrateChunkSize returns the max chunk size of migration attempts + MigrateChunkSize() int + // DeleteChain removes a chain and its associated data from the database + DeleteChain(tx *sqlx.Tx, chainID int64) error +} diff --git a/datastore/item_count.go b/datastore/item_count_dynamo.go similarity index 93% rename from datastore/item_count.go rename to datastore/item_count_dynamo.go index 1cda9903..daad3f99 100644 --- a/datastore/item_count.go +++ b/datastore/item_count_dynamo.go @@ -18,9 +18,9 @@ const ( CurrentCountVersion int = 2 ) -// ClientItemCounts is used to marshal and unmarshal ClientItemCounts items in +// DynamoItemCounts is used to marshal and unmarshal DynamoItemCounts items in // dynamoDB. -type ClientItemCounts struct { +type DynamoItemCounts struct { ClientID string ID string ItemCount int @@ -34,7 +34,7 @@ type ClientItemCounts struct { // ClientItemCountByClientID implements sort.Interface for []ClientItemCount // based on ClientID. -type ClientItemCountByClientID []ClientItemCounts +type ClientItemCountByClientID []DynamoItemCounts func (a ClientItemCountByClientID) Len() int { return len(a) } func (a ClientItemCountByClientID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } @@ -42,14 +42,14 @@ func (a ClientItemCountByClientID) Less(i, j int) bool { return a[i].ClientID < a[j].ClientID } -func (counts *ClientItemCounts) SumHistoryCounts() int { +func (counts *DynamoItemCounts) SumHistoryCounts() int { return counts.HistoryItemCountPeriod1 + counts.HistoryItemCountPeriod2 + counts.HistoryItemCountPeriod3 + counts.HistoryItemCountPeriod4 } -func (dynamo *Dynamo) initRealCountsAndUpdateHistoryCounts(counts *ClientItemCounts) error { +func (dynamo *Dynamo) initRealCountsAndUpdateHistoryCounts(counts *DynamoItemCounts) error { now := time.Now().Unix() if counts.Version < CurrentCountVersion { if counts.ItemCount > 0 { @@ -128,7 +128,7 @@ func (dynamo *Dynamo) initRealCountsAndUpdateHistoryCounts(counts *ClientItemCou // GetClientItemCount returns the count of non-deleted sync items stored for // a given client. -func (dynamo *Dynamo) GetClientItemCount(clientID string) (*ClientItemCounts, error) { +func (dynamo *Dynamo) GetClientItemCount(clientID string) (*DynamoItemCounts, error) { primaryKey := PrimaryKey{ClientID: clientID, ID: clientID} key, err := dynamodbattribute.MarshalMap(primaryKey) if err != nil { @@ -145,7 +145,7 @@ func (dynamo *Dynamo) GetClientItemCount(clientID string) (*ClientItemCounts, er return nil, fmt.Errorf("error getting an item-count item: %w", err) } - clientItemCounts := &ClientItemCounts{} + clientItemCounts := &DynamoItemCounts{} err = dynamodbattribute.UnmarshalMap(out.Item, clientItemCounts) if err != nil { return nil, fmt.Errorf("error unmarshalling item-count item: %w", err) @@ -165,7 +165,7 @@ func (dynamo *Dynamo) GetClientItemCount(clientID string) (*ClientItemCounts, er // UpdateClientItemCount updates the count of non-deleted sync items for a // given client stored in the dynamoDB. -func (dynamo *Dynamo) UpdateClientItemCount(counts *ClientItemCounts, newNormalItemCount int, newHistoryItemCount int) error { +func (dynamo *Dynamo) UpdateClientItemCount(counts *DynamoItemCounts, newNormalItemCount int, newHistoryItemCount int) error { counts.HistoryItemCountPeriod4 += newHistoryItemCount counts.ItemCount += newNormalItemCount diff --git a/datastore/item_count_test.go b/datastore/item_count_dynamo_test.go similarity index 76% rename from datastore/item_count_test.go rename to datastore/item_count_dynamo_test.go index ccb78ac2..f7a62cba 100644 --- a/datastore/item_count_test.go +++ b/datastore/item_count_dynamo_test.go @@ -9,36 +9,36 @@ import ( "github.com/stretchr/testify/suite" ) -type ItemCountTestSuite struct { +type ItemCountDynamoTestSuite struct { suite.Suite dynamo *datastore.Dynamo } -func (suite *ItemCountTestSuite) SetupSuite() { +func (suite *ItemCountDynamoTestSuite) SetupSuite() { datastore.Table = "client-entity-test-datastore" var err error - suite.dynamo, err = datastore.NewDynamo() + suite.dynamo, err = datastore.NewDynamo(true) suite.Require().NoError(err, "Failed to get dynamoDB session") } -func (suite *ItemCountTestSuite) SetupTest() { +func (suite *ItemCountDynamoTestSuite) SetupTest() { suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") + datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") } -func (suite *ItemCountTestSuite) TearDownTest() { +func (suite *ItemCountDynamoTestSuite) TearDownTest() { suite.Require().NoError( datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") } -func (suite *ItemCountTestSuite) TestGetClientItemCount() { +func (suite *ItemCountDynamoTestSuite) TestGetClientItemCount() { // Insert two items for test. - items := []datastore.ClientItemCounts{ + items := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 5}, {ClientID: "client2", ID: "client2", ItemCount: 10}, } for _, item := range items { - existing := datastore.ClientItemCounts{ClientID: item.ClientID, ID: item.ID, Version: datastore.CurrentCountVersion} + existing := datastore.DynamoItemCounts{ClientID: item.ClientID, ID: item.ID, Version: datastore.CurrentCountVersion} suite.Require().NoError( suite.dynamo.UpdateClientItemCount(&existing, item.ItemCount, 0)) } @@ -55,13 +55,13 @@ func (suite *ItemCountTestSuite) TestGetClientItemCount() { suite.Assert().Equal(count.ItemCount, 0) } -func (suite *ItemCountTestSuite) TestUpdateClientItemCount() { - items := []datastore.ClientItemCounts{ +func (suite *ItemCountDynamoTestSuite) TestUpdateClientItemCount() { + items := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 1}, {ClientID: "client1", ID: "client1", ItemCount: 5}, {ClientID: "client2", ID: "client2", ItemCount: 10}, } - expectedItems := []datastore.ClientItemCounts{ + expectedItems := []datastore.DynamoItemCounts{ {ClientID: "client1", ID: "client1", ItemCount: 6}, {ClientID: "client2", ID: "client2", ItemCount: 10}, } @@ -85,5 +85,5 @@ func (suite *ItemCountTestSuite) TestUpdateClientItemCount() { } func TestItemCountTestSuite(t *testing.T) { - suite.Run(t, new(ItemCountTestSuite)) + suite.Run(t, new(ItemCountDynamoTestSuite)) } diff --git a/datastore/item_count_sql.go b/datastore/item_count_sql.go new file mode 100644 index 00000000..53ddae51 --- /dev/null +++ b/datastore/item_count_sql.go @@ -0,0 +1,28 @@ +package datastore + +import ( + "fmt" + + "github.com/jmoiron/sqlx" +) + +type SQLItemCounts struct { + NormalItemCount int `db:"normal_item_count"` + HistoryItemCount int `db:"history_item_count"` +} + +// GetItemCounts returns the counts of items in the SQL database for a given chain ID +func (sqlDB *SQLDB) GetItemCounts(tx *sqlx.Tx, chainID int64) (*SQLItemCounts, error) { + counts := SQLItemCounts{} + err := tx.Get(&counts, ` + SELECT + COUNT(*) FILTER (WHERE data_type NOT IN ($1, $2)) AS normal_item_count, + COUNT(*) FILTER (WHERE data_type IN ($1, $2)) AS history_item_count + FROM entities + WHERE chain_id = $3 AND deleted = false + `, HistoryTypeID, HistoryDeleteDirectiveTypeID, chainID) + if err != nil { + return nil, fmt.Errorf("failed to get item counts: %w", err) + } + return &counts, nil +} diff --git a/datastore/migrations/20240904202925_init.down.sql b/datastore/migrations/20240904202925_init.down.sql new file mode 100644 index 00000000..072e325f --- /dev/null +++ b/datastore/migrations/20240904202925_init.down.sql @@ -0,0 +1,7 @@ +DROP TABLE entities; +DROP TABLE dynamo_migration_statuses; +DROP TABLE chains; + +DROP EXTENSION pg_partman; +DROP EXTENSION pg_cron; +DROP SCHEMA partman CASCADE; diff --git a/datastore/migrations/20240904202925_init.up.sql b/datastore/migrations/20240904202925_init.up.sql new file mode 100644 index 00000000..001ecb9d --- /dev/null +++ b/datastore/migrations/20240904202925_init.up.sql @@ -0,0 +1,75 @@ +CREATE SCHEMA IF NOT EXISTS partman; +CREATE EXTENSION IF NOT EXISTS pg_partman SCHEMA partman; + +CREATE TABLE chains ( + id BIGSERIAL PRIMARY KEY, + last_usage_time TIMESTAMP NOT NULL, + client_id BYTEA NOT NULL, + UNIQUE (client_id) +); + +CREATE TABLE dynamo_migration_statuses ( + chain_id BIGINT REFERENCES chains(id) ON DELETE CASCADE, + -- null earliest_mtime indicates that all entities have been migrated + earliest_mtime BIGINT, + data_type INTEGER, + PRIMARY KEY (chain_id, data_type) +); + +CREATE TABLE entities ( + id UUID, + chain_id BIGINT NOT NULL REFERENCES chains(id) ON DELETE CASCADE, + ctime BIGINT NOT NULL, + mtime BIGINT NOT NULL, + version BIGINT NOT NULL, + data_type INTEGER NOT NULL, + specifics BYTEA NOT NULL, + client_defined_unique_tag TEXT, + server_defined_unique_tag TEXT, + name TEXT, + originator_cache_guid TEXT, + originator_client_item_id TEXT, + parent_id TEXT, + non_unique_name TEXT, + unique_position BYTEA, + folder BOOLEAN, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id, chain_id), + UNIQUE (chain_id, client_defined_unique_tag) +) +PARTITION BY RANGE (chain_id); + +ALTER TABLE entities ALTER specifics SET STORAGE EXTERNAL; +ALTER TABLE entities ALTER client_defined_unique_tag SET STORAGE PLAIN; +ALTER TABLE entities ALTER server_defined_unique_tag SET STORAGE PLAIN; +ALTER TABLE entities ALTER name SET STORAGE PLAIN; +ALTER TABLE entities ALTER originator_cache_guid SET STORAGE PLAIN; +ALTER TABLE entities ALTER originator_client_item_id SET STORAGE PLAIN; +ALTER TABLE entities ALTER parent_id SET STORAGE PLAIN; +ALTER TABLE entities ALTER non_unique_name SET STORAGE PLAIN; +ALTER TABLE entities ALTER unique_position SET STORAGE PLAIN; + +CREATE INDEX entities_chain_id_data_type_mtime_idx ON entities (chain_id, data_type, mtime); + +DO $$ +BEGIN + -- for vanilla postgres + PERFORM partman.create_parent( + p_parent_table := 'public.entities', + p_control := 'chain_id', + p_interval := '3500', + p_type := 'range' + ); +EXCEPTION WHEN OTHERS THEN + -- for Aurora + PERFORM partman.create_parent( + p_parent_table := 'public.entities', + p_control := 'chain_id', + p_interval := '3500', + p_type := 'native' + ); +END $$; + +CREATE EXTENSION IF NOT EXISTS pg_cron; + +SELECT cron.schedule('@hourly', $$CALL partman.run_maintenance_proc()$$); diff --git a/datastore/rds.go b/datastore/rds.go new file mode 100644 index 00000000..0642e416 --- /dev/null +++ b/datastore/rds.go @@ -0,0 +1,93 @@ +package datastore + +import ( + "context" + "fmt" + "net/url" + "os" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/rds/auth" + "github.com/jackc/pgx/v5" +) + +const defaultRegion = "us-west-2" + +const ( + rdsPortKey = "RDS_DATABASE_PORT" + rdsHostKey = "RDS_WRITER_ENDPOINT" + rdsUserKey = "RDS_USER" + rdsDbNameKey = "RDS_DATABASE_NAME" + regionKey = "AWS_REGION" +) + +type rdsConnector struct { + hostAndPort string + dbName string + user string + token string + region string + tokenCacheTime time.Time + mu sync.Mutex +} + +func newRDSConnector() *rdsConnector { + port := os.Getenv(rdsPortKey) + host := os.Getenv(rdsHostKey) + user := os.Getenv(rdsUserKey) + dbName := os.Getenv(rdsDbNameKey) + region := os.Getenv(regionKey) + + if region == "" { + region = defaultRegion + } + hostAndPort := fmt.Sprintf("%s:%s", host, port) + return &rdsConnector{ + hostAndPort: hostAndPort, + dbName: dbName, + user: user, + region: region, + } +} + +func (c *rdsConnector) getAuthToken(ctx context.Context) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if time.Since(c.tokenCacheTime) > 10*time.Minute { + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return "", fmt.Errorf("failed to load AWS config") + } + + token, err := auth.BuildAuthToken( + ctx, c.hostAndPort, c.region, c.user, cfg.Credentials) + if err != nil { + return "", fmt.Errorf("failed to create authentication token: %w", err) + } + c.token = token + c.tokenCacheTime = time.Now() + } + return c.token, nil +} + +func (c *rdsConnector) getConnectionString(ctx context.Context) (string, error) { + token, err := c.getAuthToken(ctx) + if err != nil { + return "", err + } + + return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", c.user, url.QueryEscape(token), c.hostAndPort, c.dbName), nil +} + +func (c *rdsConnector) updateConnConfig(ctx context.Context, config *pgx.ConnConfig) error { + token, err := c.getAuthToken(ctx) + if err != nil { + return err + } + config.Password = token + + return nil +} diff --git a/datastore/sql.go b/datastore/sql.go new file mode 100644 index 00000000..b127b1a2 --- /dev/null +++ b/datastore/sql.go @@ -0,0 +1,144 @@ +package datastore + +import ( + "context" + "embed" + "errors" + "fmt" + "os" + "strconv" + + "github.com/golang-migrate/migrate/v4" + // import postgres package for migrations + _ "github.com/golang-migrate/migrate/v4/database/postgres" + "github.com/golang-migrate/migrate/v4/source/iofs" + + // import pgx so it can be used with sqlx + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/stdlib" + "github.com/jmoiron/sqlx" +) + +const ( + sqlURLEnvKey = "SQL_DATABASE_URL" + sqlTestURLEnvKey = "SQL_TEST_DATABASE_URL" + // Default value is defined here, since the .env file will not be loaded + // because tests are run in the subdirectories where the tests live + defaultSQLTestURL = "postgres://sync:password@localhost:5434/testing?sslmode=disable" + // SQLMigrateUpdateIntervalEnvKey is the env var name used to define the frequency + // of chunked migration within "get update" requests + SQLMigrateUpdateIntervalEnvKey = "SQL_MIGRATE_UPDATE_INTERVAL" + // SQLMigrateChunkSizeEnvKey is the env var name used to define the max migration + // chunk size + SQLMigrateChunkSizeEnvKey = "SQL_MIGRATE_CHUNK_SIZE" + defaultMigrateUpdateInterval = 4 + defaultMigrateChunkSize = 100 +) + +//go:embed migrations/* +var migrationFiles embed.FS + +// SQLDB is a Datastore wrapper around a SQL-based database. +type SQLDB struct { + *sqlx.DB + insertQuery string + variations *SQLVariations + migrateIntervalPercent float32 + migrateChunkSize int +} + +// NewSQLDB returns a SQLDB client to be used. +func NewSQLDB(isTesting bool) (*SQLDB, error) { + variations, err := LoadSQLVariations() + if err != nil { + return nil, err + } + + var envKey string + if isTesting { + envKey = sqlTestURLEnvKey + } else { + envKey = sqlURLEnvKey + } + + var rdsConnector *rdsConnector + if os.Getenv(rdsHostKey) != "" { + rdsConnector = newRDSConnector() + } + + sqlURL := os.Getenv(envKey) + if rdsConnector != nil { + sqlURL, err = rdsConnector.getConnectionString(context.Background()) + if err != nil { + return nil, err + } + } else if sqlURL == "" { + if isTesting { + sqlURL = defaultSQLTestURL + } else { + return nil, fmt.Errorf("%s or %s must be defined", envKey, rdsHostKey) + } + } + iofsDriver, err := iofs.New(migrationFiles, "migrations") + if err != nil { + return nil, fmt.Errorf("failed to load iofs driver for migrations: %w", err) + } + migration, err := migrate.NewWithSourceInstance( + "iofs", + iofsDriver, + sqlURL, + ) + if err != nil { + return nil, fmt.Errorf("Failed to init migrations: %w", err) + } + if err = migration.Up(); err != nil { + if !errors.Is(err, migrate.ErrNoChange) { + return nil, fmt.Errorf("Failed to run migrations: %w", err) + } + err = nil + } + + var db *sqlx.DB + if rdsConnector != nil { + config, err := pgx.ParseConfig(sqlURL) + if err != nil { + return nil, err + } + baseDB := stdlib.OpenDB(*config, stdlib.OptionBeforeConnect(rdsConnector.updateConnConfig)) + db = sqlx.NewDb(baseDB, "pgx") + } else { + db, err = sqlx.Connect("pgx", sqlURL) + } + if err != nil { + return nil, fmt.Errorf("Failed to connect to SQL DB: %w", err) + } + + if isTesting { + variations.Ready = true + } + + migrateInterval, _ := strconv.Atoi(os.Getenv(SQLMigrateUpdateIntervalEnvKey)) + migrateChunkSize, _ := strconv.Atoi(os.Getenv(SQLMigrateChunkSizeEnvKey)) + + if migrateInterval <= 0 { + migrateInterval = defaultMigrateUpdateInterval + } + migrateIntervalPercent := 1 / float32(migrateInterval) + if migrateChunkSize <= 0 { + migrateChunkSize = defaultMigrateChunkSize + } + + wrappedDB := SQLDB{db, buildInsertQuery(), variations, migrateIntervalPercent, migrateChunkSize} + return &wrappedDB, nil +} + +// MigrateIntervalPercent returns the percentage of update requests that will perform +// a chunked migration +func (db *SQLDB) MigrateIntervalPercent() float32 { + return db.migrateIntervalPercent +} + +// MigrateChunkSize returns the max chunk size of migration attempts +func (db *SQLDB) MigrateChunkSize() int { + return db.migrateChunkSize +} diff --git a/datastore/sql_variations.go b/datastore/sql_variations.go new file mode 100644 index 00000000..c648e0ff --- /dev/null +++ b/datastore/sql_variations.go @@ -0,0 +1,107 @@ +package datastore + +import ( + "fmt" + "hash/fnv" + "math" + "os" + "strconv" + "strings" +) + +// SQLSaveRolloutsEnvKey defines the data types and rollout percentages for saving +// new items into the SQL database, instead of Dynamo. +const SQLSaveRolloutsEnvKey = "SQL_SAVE_ROLLOUTS" + +// SQLSaveRolloutsEnvKey defines the data types and rollout percentages for periodic +// chunked migration from Dynamo to SQL. +const SQLMigrateRolloutsEnvKey = "SQL_MIGRATE_ROLLOUTS" + +// VariationHashDecimal returns a decimal from 0.0 to 1.0 for a given client ID. +// The decimal is typically checked against a rollout percentage to determine if a user +// should be included in a rollout. +func VariationHashDecimal(input string) float32 { + h := fnv.New32a() + h.Write([]byte(input)) + hashValue := h.Sum32() + + // Convert hash to a decimal between 0 and 1 + return float32(hashValue) / math.MaxUint32 +} + +// SQLVariations handles SQL variation rollout functions +type SQLVariations struct { + sqlSaveRollouts map[int]float32 + sqlMigrateRollouts map[int]float32 + Ready bool +} + +func parseRollouts(envKey string) (map[int]float32, error) { + rollouts := make(map[int]float32) + envVal := os.Getenv(envKey) + + if len(envVal) > 0 { + pairs := strings.Split(envVal, ",") + + for _, pair := range pairs { + parts := strings.Split(strings.TrimSpace(pair), "=") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid format in %s: %s", envKey, pair) + } + + key, err := strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return nil, fmt.Errorf("Invalid integer in %s: %s", envKey, parts[0]) + } + + value, err := strconv.ParseFloat(strings.TrimSpace(parts[1]), 32) + if err != nil { + return nil, fmt.Errorf("Invalid float in %s: %s", envKey, parts[1]) + } + + rollouts[key] = float32(value) + } + } + + return rollouts, nil +} + +// LoadSQLVariations creates a SQLVariations struct, configured by env vars +func LoadSQLVariations() (*SQLVariations, error) { + sqlSaveRollouts, err := parseRollouts(SQLSaveRolloutsEnvKey) + if err != nil { + return nil, err + } + sqlMigrateRollouts, err := parseRollouts(SQLMigrateRolloutsEnvKey) + if err != nil { + return nil, err + } + + return &SQLVariations{ + sqlSaveRollouts: sqlSaveRollouts, + sqlMigrateRollouts: sqlMigrateRollouts, + Ready: false, + }, nil +} + +// ShouldSaveToSQL returns true if a client should save the entity to the SQL database for a given data type +func (sqlVariations *SQLVariations) ShouldSaveToSQL(dataType int, variationHashDecimal float32) bool { + rolloutPercent, exists := sqlVariations.sqlSaveRollouts[dataType] + return exists && variationHashDecimal <= rolloutPercent +} + +// ShouldMigrateToSQL returns true if chunked migration from Dynamo to SQL should occur for a given data type +func (sqlVariations *SQLVariations) ShouldMigrateToSQL(dataType int, variationHashDecimal float32) bool { + rolloutPercent, exists := sqlVariations.sqlMigrateRollouts[dataType] + return exists && variationHashDecimal <= rolloutPercent +} + +// GetStateDigest returns a string that combines the env vars related to variations +func (sqlVariations *SQLVariations) GetStateDigest() string { + return SQLSaveRolloutsEnvKey + ":" + os.Getenv(SQLSaveRolloutsEnvKey) + ";" + + SQLMigrateRolloutsEnvKey + ":" + os.Getenv(SQLMigrateRolloutsEnvKey) +} + +func (sqlDB *SQLDB) Variations() *SQLVariations { + return sqlDB.variations +} diff --git a/datastore/sql_variations_test.go b/datastore/sql_variations_test.go new file mode 100644 index 00000000..3f629fed --- /dev/null +++ b/datastore/sql_variations_test.go @@ -0,0 +1,57 @@ +package datastore_test + +import ( + "os" + "testing" + + "github.com/brave/go-sync/datastore" + "github.com/stretchr/testify/suite" +) + +type SQLVariationsSuite struct { + suite.Suite + variations *datastore.SQLVariations +} + +func (s *SQLVariationsSuite) SetupTest() { + s.T().Setenv(datastore.SQLSaveRolloutsEnvKey, "1=0.5,2=0.75") + s.T().Setenv(datastore.SQLMigrateRolloutsEnvKey, "1=0.25,3=1.0") + var err error + s.variations, err = datastore.LoadSQLVariations() + s.Require().NoError(err) +} + +func (s *SQLVariationsSuite) TestShouldSaveToSQL() { + s.True(s.variations.ShouldSaveToSQL(1, 0.4)) + s.False(s.variations.ShouldSaveToSQL(1, 0.6)) + s.True(s.variations.ShouldSaveToSQL(2, 0.7)) + s.False(s.variations.ShouldSaveToSQL(2, 0.8)) + s.False(s.variations.ShouldSaveToSQL(3, 0.5)) // Non-existent key +} + +func (s *SQLVariationsSuite) TestShouldMigrateToSQL() { + s.True(s.variations.ShouldMigrateToSQL(1, 0.2)) + s.False(s.variations.ShouldMigrateToSQL(1, 0.3)) + s.True(s.variations.ShouldMigrateToSQL(3, 0.9)) + s.False(s.variations.ShouldMigrateToSQL(2, 0.5)) // Non-existent key +} + +func (s *SQLVariationsSuite) TestVariationHashDecimal() { + hash1 := datastore.VariationHashDecimal("test1") + hash2 := datastore.VariationHashDecimal("test2") + s.NotEqual(hash1, hash2) + s.Less(hash1, float32(1.0)) + s.Less(hash2, float32(1.0)) + s.GreaterOrEqual(hash1, float32(0.0)) + s.GreaterOrEqual(hash2, float32(0.0)) +} + +func (s *SQLVariationsSuite) TestParseRolloutsError() { + os.Setenv(datastore.SQLSaveRolloutsEnvKey, "invalid=format") + _, err := datastore.LoadSQLVariations() + s.Error(err) +} + +func TestSQLVariationsSuite(t *testing.T) { + suite.Run(t, new(SQLVariationsSuite)) +} diff --git a/datastore/sync_entity.go b/datastore/sync_entity.go index 3d032619..9c78e368 100644 --- a/datastore/sync_entity.go +++ b/datastore/sync_entity.go @@ -3,31 +3,18 @@ package datastore import ( "fmt" "reflect" - "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/expression" "github.com/brave/go-sync/schema/protobuf/sync_pb" - "github.com/brave/go-sync/utils" + "github.com/google/uuid" "github.com/rs/zerolog/log" - uuid "github.com/satori/go.uuid" "google.golang.org/protobuf/proto" ) const ( - maxBatchGetItemSize = 100 // Limited by AWS. - maxTransactDeleteItemSize = 10 // Limited by AWS. - clientTagItemPrefix = "Client#" - serverTagItemPrefix = "Server#" - conditionalCheckFailed = "ConditionalCheckFailed" - disabledChainID = "disabled_chain" - reasonDeleted = "deleted" HistoryTypeID int = 963985 HistoryDeleteDirectiveTypeID int = 150251 // Expiration time for history and history delete directive @@ -37,698 +24,28 @@ const ( // SyncEntity is used to marshal and unmarshal sync items in dynamoDB. type SyncEntity struct { - ClientID string + ClientID string + // ChainID is a synthetic key that is connected to the client id in the SQL db. + ChainID *int64 `dynamodbav:"-" db:"chain_id"` ID string - ParentID *string `dynamodbav:",omitempty"` + ParentID *string `dynamodbav:",omitempty" db:"parent_id"` Version *int64 Mtime *int64 Ctime *int64 Name *string `dynamodbav:",omitempty"` - NonUniqueName *string `dynamodbav:",omitempty"` - ServerDefinedUniqueTag *string `dynamodbav:",omitempty"` + NonUniqueName *string `dynamodbav:",omitempty" db:"non_unique_name"` + ServerDefinedUniqueTag *string `dynamodbav:",omitempty" db:"server_defined_unique_tag"` Deleted *bool - OriginatorCacheGUID *string `dynamodbav:",omitempty"` - OriginatorClientItemID *string `dynamodbav:",omitempty"` + OriginatorCacheGUID *string `dynamodbav:",omitempty" db:"originator_cache_guid"` + OriginatorClientItemID *string `dynamodbav:",omitempty" db:"originator_client_item_id"` Specifics []byte - DataType *int + DataType *int `db:"data_type"` Folder *bool - ClientDefinedUniqueTag *string `dynamodbav:",omitempty"` - UniquePosition []byte `dynamodbav:",omitempty"` + ClientDefinedUniqueTag *string `dynamodbav:",omitempty" db:"client_defined_unique_tag"` + UniquePosition []byte `dynamodbav:",omitempty" db:"unique_position"` DataTypeMtime *string ExpirationTime *int64 -} - -// SyncEntityByClientIDID implements sort.Interface for []SyncEntity based on -// the string concatenation of ClientID and ID fields. -type SyncEntityByClientIDID []SyncEntity - -func (a SyncEntityByClientIDID) Len() int { return len(a) } -func (a SyncEntityByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a SyncEntityByClientIDID) Less(i, j int) bool { - return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID -} - -// SyncEntityByMtime implements sort.Interface for []SyncEntity based on Mtime. -type SyncEntityByMtime []SyncEntity - -func (a SyncEntityByMtime) Len() int { return len(a) } -func (a SyncEntityByMtime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a SyncEntityByMtime) Less(i, j int) bool { - return *a[i].Mtime < *a[j].Mtime -} - -// DisabledMarkerItem is used to mark sync chain as deleted in Dynamodb -type DisabledMarkerItem struct { - ClientID string - ID string - Reason string - Mtime *int64 - Ctime *int64 -} - -// DisabledMarkerItemQuery is used to query for disabled marker item in -// DynamoDB -type DisabledMarkerItemQuery struct { - ClientID string - ID string -} - -// ServerClientUniqueTagItem is used to marshal and unmarshal tag items in -// dynamoDB. -type ServerClientUniqueTagItem struct { - ClientID string // Hash key - ID string // Range key - Mtime *int64 - Ctime *int64 -} - -// ServerClientUniqueTagItemQuery is used to query for unique tag items in -// dynamoDB. -type ServerClientUniqueTagItemQuery struct { - ClientID string // Hash key - ID string // Range key -} - -// TagItemByClientIDID implements sort.Interface for []ServerClientUniqueTagItem -// based on the string concatenation of ClientID and ID fields. -type TagItemByClientIDID []ServerClientUniqueTagItem - -func (a TagItemByClientIDID) Len() int { return len(a) } -func (a TagItemByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a TagItemByClientIDID) Less(i, j int) bool { - return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID -} - -// getTagPrefix is a helper method to give the proper prefix for unique tag -func getTagPrefix(isServer bool) string { - if isServer { - return serverTagItemPrefix - } - return clientTagItemPrefix -} - -// NewServerClientUniqueTagItem creates a tag item which is used to ensure the -// uniqueness of server-defined or client-defined unique tags for a client. -func NewServerClientUniqueTagItem(clientID string, tag string, isServer bool) *ServerClientUniqueTagItem { - prefix := getTagPrefix(isServer) - now := aws.Int64(utils.UnixMilli(time.Now())) - - return &ServerClientUniqueTagItem{ - ClientID: clientID, - ID: prefix + tag, - Mtime: now, - Ctime: now, - } -} - -// NewServerClientUniqueTagItemQuery creates a tag item query which is used to -// determine whether a sync entity has a unique tag item or not -func NewServerClientUniqueTagItemQuery(clientID string, tag string, isServer bool) *ServerClientUniqueTagItemQuery { - prefix := getTagPrefix(isServer) - - return &ServerClientUniqueTagItemQuery{ - ClientID: clientID, - ID: prefix + tag, - } -} - -// InsertSyncEntity inserts a new sync entity into dynamoDB. -// If ClientDefinedUniqueTag is not null, we will use a write transaction to -// write a sync item along with a tag item to ensure the uniqueness of the -// client tag. Otherwise, only a sync item is written into DB without using -// transactions. -func (dynamo *Dynamo) InsertSyncEntity(entity *SyncEntity) (bool, error) { - // Create a condition for inserting new items only. - cond := expression.AttributeNotExists(expression.Name(pk)) - expr, err := expression.NewBuilder().WithCondition(cond).Build() - if err != nil { - return false, fmt.Errorf("error building expression to insert sync entity: %w", err) - } - - // Write tag item for all data types, except for - // the history type, which does not use tag items. - if entity.ClientDefinedUniqueTag != nil && *entity.DataType != HistoryTypeID { - items := []*dynamodb.TransactWriteItem{} - // Additional item for ensuring tag's uniqueness for a specific client. - item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ClientDefinedUniqueTag, false) - av, err := dynamodbattribute.MarshalMap(*item) - if err != nil { - return false, fmt.Errorf("error marshalling unique tag item to insert sync entity: %w", err) - } - tagItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - - // Normal sync item - av, err = dynamodbattribute.MarshalMap(*entity) - if err != nil { - return false, fmt.Errorf("error marshlling sync item to insert sync entity: %w", err) - } - syncItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - items = append(items, tagItem) - items = append(items, syncItem) - - _, err = dynamo.TransactWriteItems( - &dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - // Return conflict if insert condition failed. - if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { - for _, reason := range canceledException.CancellationReasons { - if reason.Code != nil && *reason.Code == conditionalCheckFailed { - return true, fmt.Errorf("error inserting sync item with client tag: %w", err) - } - } - } - return false, fmt.Errorf("error writing tag item and sync item in a transaction to insert sync entity: %w", err) - } - - return false, nil - } - - // Normal sync item - av, err := dynamodbattribute.MarshalMap(*entity) - if err != nil { - return false, fmt.Errorf("error marshalling sync item to insert sync entity: %w", err) - } - input := &dynamodb.PutItemInput{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - } - _, err = dynamo.PutItem(input) - if err != nil { - return false, fmt.Errorf("error calling PutItem to insert sync item: %w", err) - } - return false, nil -} - -// HasServerDefinedUniqueTag check the tag item to see if there is already a -// tag item exists with the tag value for a specific client. -func (dynamo *Dynamo) HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) { - tagItem := NewServerClientUniqueTagItemQuery(clientID, tag, true) - key, err := dynamodbattribute.MarshalMap(tagItem) - if err != nil { - return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) - } - - input := &dynamodb.GetItemInput{ - Key: key, - ProjectionExpression: aws.String(projPk), - TableName: aws.String(Table), - } - - out, err := dynamo.GetItem(input) - if err != nil { - return false, fmt.Errorf("error calling GetItem to check if server tag existed: %w", err) - } - - return out.Item != nil, nil -} - -func (dynamo *Dynamo) HasItem(clientID string, ID string) (bool, error) { - primaryKey := PrimaryKey{ClientID: clientID, ID: ID} - key, err := dynamodbattribute.MarshalMap(primaryKey) - - if err != nil { - return false, fmt.Errorf("error marshalling key to check if item existed: %w", err) - } - - input := &dynamodb.GetItemInput{ - Key: key, - ProjectionExpression: aws.String(projPk), - TableName: aws.String(Table), - } - - out, err := dynamo.GetItem(input) - if err != nil { - return false, fmt.Errorf("error calling GetItem to check if item existed: %w", err) - } - - return out.Item != nil, nil -} - -// InsertSyncEntitiesWithServerTags is used to insert sync entities with -// server-defined unique tags. To ensure the uniqueness, for each sync entity, -// we will write a tag item and a sync item. Items for all the entities in the -// array would be written into DB in one transaction. -func (dynamo *Dynamo) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error { - items := []*dynamodb.TransactWriteItem{} - for _, entity := range entities { - // Create a condition for inserting new items only. - cond := expression.AttributeNotExists(expression.Name(pk)) - expr, err := expression.NewBuilder().WithCondition(cond).Build() - if err != nil { - return fmt.Errorf("error building expression to insert sync entity with server tag: %w", err) - } - - // Additional item for ensuring tag's uniqueness for a specific client. - item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ServerDefinedUniqueTag, true) - av, err := dynamodbattribute.MarshalMap(*item) - if err != nil { - return fmt.Errorf("error marshalling tag item to insert sync entity with server tag: %w", err) - } - tagItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - - // Normal sync item - av, err = dynamodbattribute.MarshalMap(*entity) - if err != nil { - return fmt.Errorf("error marshalling sync item to insert sync entity with server tag: %w", err) - } - syncItem := &dynamodb.TransactWriteItem{ - Put: &dynamodb.Put{ - Item: av, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - TableName: aws.String(Table), - }, - } - - items = append(items, tagItem) - items = append(items, syncItem) - } - - _, err := dynamo.TransactWriteItems( - &dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - return fmt.Errorf("error writing sync entities with server tags in a transaction: %w", err) - } - return nil -} - -// DisableSyncChain marks a chain as disabled so no further updates or commits can happen -func (dynamo *Dynamo) DisableSyncChain(clientID string) error { - now := aws.Int64(utils.UnixMilli(time.Now())) - disabledMarker := DisabledMarkerItem{ - ClientID: clientID, - ID: disabledChainID, - Reason: reasonDeleted, - Mtime: now, - Ctime: now, - } - - av, err := dynamodbattribute.MarshalMap(disabledMarker) - if err != nil { - return fmt.Errorf("error marshalling disabled marker: %w", err) - } - - markerInput := &dynamodb.PutItemInput{ - Item: av, - TableName: aws.String(Table), - } - - _, err = dynamo.PutItem(markerInput) - if err != nil { - return fmt.Errorf("error calling PutItem to insert sync item: %w", err) - } - - return nil -} - -// ClearServerData deletes all items for a given clientID -func (dynamo *Dynamo) ClearServerData(clientID string) ([]SyncEntity, error) { - syncEntities := []SyncEntity{} - pkb := expression.Key(pk) - pkv := expression.Value(clientID) - keyCond := expression.KeyEqual(pkb, pkv) - exprs := expression.NewBuilder().WithKeyCondition(keyCond) - expr, err := exprs.Build() - if err != nil { - return syncEntities, fmt.Errorf("error building expression to get updates: %w", err) - } - - input := &dynamodb.QueryInput{ - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - KeyConditionExpression: expr.KeyCondition(), - FilterExpression: expr.Filter(), - TableName: aws.String(Table), - } - - out, err := dynamo.Query(input) - if err != nil { - return syncEntities, fmt.Errorf("error doing query to get updates: %w", err) - } - count := *out.Count - - err = dynamodbattribute.UnmarshalListOfMaps(out.Items, &syncEntities) - if err != nil { - return syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) - } - - var i, j int64 - for i = 0; i < count; i += maxTransactDeleteItemSize { - j = i + maxTransactDeleteItemSize - if j > count { - j = count - } - - items := []*dynamodb.TransactWriteItem{} - for _, item := range syncEntities[i:j] { - if item.ID == disabledChainID { - continue - } - - // Fail delete if race condition detected (modified time has changed). - if item.Version != nil { - cond := expression.Name("Mtime").Equal(expression.Value(*item.Mtime)) - expr, err := expression.NewBuilder().WithCondition(cond).Build() - if err != nil { - return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) - } - - writeItem := dynamodb.TransactWriteItem{ - Delete: &dynamodb.Delete{ - ConditionExpression: expr.Condition(), - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - TableName: aws.String(Table), - Key: map[string]*dynamodb.AttributeValue{ - pk: { - S: aws.String(item.ClientID), - }, - sk: { - S: aws.String(item.ID), - }, - }, - }, - } - - items = append(items, &writeItem) - } else { - // If row doesn't hold Mtime, delete as usual. - writeItem := dynamodb.TransactWriteItem{ - Delete: &dynamodb.Delete{ - TableName: aws.String(Table), - Key: map[string]*dynamodb.AttributeValue{ - pk: { - S: aws.String(item.ClientID), - }, - sk: { - S: aws.String(item.ID), - }, - }, - }, - } - - items = append(items, &writeItem) - } - - } - - _, err = dynamo.TransactWriteItems(&dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) - } - } - - return syncEntities, nil -} - -// IsSyncChainDisabled checks whether a given sync chain has been deleted -func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { - key, err := dynamodbattribute.MarshalMap(DisabledMarkerItemQuery{ - ClientID: clientID, - ID: disabledChainID, - }) - if err != nil { - return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) - } - - input := &dynamodb.GetItemInput{ - Key: key, - TableName: aws.String(Table), - } - - out, err := dynamo.GetItem(input) - if err != nil { - return false, fmt.Errorf("error calling GetItem to check if sync chain disabled: %w", err) - } - - return len(out.Item) > 0, nil -} - -// UpdateSyncEntity updates a sync item in dynamoDB. -func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (bool, bool, error) { - primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: entity.ID} - key, err := dynamodbattribute.MarshalMap(primaryKey) - if err != nil { - return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) - } - - // condition to ensure the request is update only... - cond := expression.AttributeExists(expression.Name(pk)) - // ...and the version matches, if applicable - if *entity.DataType != HistoryTypeID { - cond = expression.And(cond, expression.Name("Version").Equal(expression.Value(oldVersion))) - } - - update := expression.Set(expression.Name("Version"), expression.Value(entity.Version)) - update = update.Set(expression.Name("Mtime"), expression.Value(entity.Mtime)) - update = update.Set(expression.Name("Specifics"), expression.Value(entity.Specifics)) - update = update.Set(expression.Name("DataTypeMtime"), expression.Value(entity.DataTypeMtime)) - - // Update optional fields only if the value is not null. - if entity.UniquePosition != nil { - update = update.Set(expression.Name("UniquePosition"), expression.Value(entity.UniquePosition)) - } - if entity.ParentID != nil { - update = update.Set(expression.Name("ParentID"), expression.Value(entity.ParentID)) - } - if entity.Name != nil { - update = update.Set(expression.Name("Name"), expression.Value(entity.Name)) - } - if entity.NonUniqueName != nil { - update = update.Set(expression.Name("NonUniqueName"), expression.Value(entity.NonUniqueName)) - } - if entity.Deleted != nil { - update = update.Set(expression.Name("Deleted"), expression.Value(entity.Deleted)) - } - if entity.Folder != nil { - update = update.Set(expression.Name("Folder"), expression.Value(entity.Folder)) - } - - expr, err := expression.NewBuilder().WithCondition(cond).WithUpdate(update).Build() - if err != nil { - return false, false, fmt.Errorf("error building expression to update sync entity: %w", err) - } - - // Soft-delete a sync item with a client tag, use a transaction to delete its - // tag item too. - if entity.Deleted != nil && entity.ClientDefinedUniqueTag != nil && *entity.Deleted && *entity.DataType != HistoryTypeID { - pk := PrimaryKey{ - ClientID: entity.ClientID, ID: clientTagItemPrefix + *entity.ClientDefinedUniqueTag} - tagItemKey, err := dynamodbattribute.MarshalMap(pk) - if err != nil { - return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) - } - - items := []*dynamodb.TransactWriteItem{} - updateSyncItem := &dynamodb.TransactWriteItem{ - Update: &dynamodb.Update{ - Key: key, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - UpdateExpression: expr.Update(), - ReturnValuesOnConditionCheckFailure: aws.String(dynamodb.ReturnValueAllOld), - TableName: aws.String(Table), - }, - } - deleteTagItem := &dynamodb.TransactWriteItem{ - Delete: &dynamodb.Delete{ - Key: tagItemKey, - TableName: aws.String(Table), - }, - } - items = append(items, updateSyncItem) - items = append(items, deleteTagItem) - - _, err = dynamo.TransactWriteItems( - &dynamodb.TransactWriteItemsInput{TransactItems: items}) - if err != nil { - // Return conflict if the update condition fails. - if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { - for _, reason := range canceledException.CancellationReasons { - if reason.Code != nil && *reason.Code == conditionalCheckFailed { - return true, false, nil - } - } - } - - return false, false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) - } - - // Successfully soft-delete the sync item and delete the tag item. - return false, true, nil - } - - // Not deleting a sync item with a client tag, do a normal update on sync - // item. - input := &dynamodb.UpdateItemInput{ - Key: key, - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - ConditionExpression: expr.Condition(), - UpdateExpression: expr.Update(), - ReturnValues: aws.String(dynamodb.ReturnValueAllOld), - TableName: aws.String(Table), - } - - out, err := dynamo.UpdateItem(input) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - // Return conflict if the write condition fails. - if aerr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return true, false, nil - } - } - return false, false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) - } - - // Unmarshal out.Attributes - oldEntity := &SyncEntity{} - err = dynamodbattribute.UnmarshalMap(out.Attributes, oldEntity) - if err != nil { - return false, false, fmt.Errorf("error unmarshalling old sync entity: %w", err) - } - var deleted bool - if entity.Deleted == nil { // No updates on Deleted this time. - deleted = false - } else if oldEntity.Deleted == nil { // Consider it as Deleted = false. - deleted = *entity.Deleted - } else { - deleted = !*oldEntity.Deleted && *entity.Deleted - } - return false, deleted, nil -} - -// GetUpdatesForType returns sync entities of a data type where it's mtime is -// later than the client token. -// To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a -// list of (ClientID, ID) primary keys with the given condition, then read the -// actual sync item using the list of primary keys. -func (dynamo *Dynamo) GetUpdatesForType(dataType int, clientToken int64, fetchFolders bool, clientID string, maxSize int64) (bool, []SyncEntity, error) { - syncEntities := []SyncEntity{} - - // Get (ClientID, ID) pairs which are updates after mtime for a data type, - // sorted by dataType#mTime. e.g. sorted by mtime since dataType is the same. - dataTypeMtimeLowerBound := strconv.Itoa(dataType) + "#" + strconv.FormatInt(clientToken+1, 10) - dataTypeMtimeUpperBound := strconv.Itoa(dataType+1) + "#0" - pkCond := expression.Key(clientIDDataTypeMtimeIdxPk).Equal(expression.Value(clientID)) - skCond := expression.KeyBetween( - expression.Key(clientIDDataTypeMtimeIdxSk), - expression.Value(dataTypeMtimeLowerBound), - expression.Value(dataTypeMtimeUpperBound)) - keyCond := expression.KeyAnd(pkCond, skCond) - exprs := expression.NewBuilder().WithKeyCondition(keyCond) - - if !fetchFolders { // Filter folder entities out if fetchFolder is false. - exprs = exprs.WithFilter( - expression.Equal(expression.Name("Folder"), expression.Value(false))) - } - - expr, err := exprs.Build() - if err != nil { - return false, syncEntities, fmt.Errorf("error building expression to get updates: %w", err) - } - - input := &dynamodb.QueryInput{ - IndexName: aws.String(clientIDDataTypeMtimeIdx), - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - KeyConditionExpression: expr.KeyCondition(), - FilterExpression: expr.Filter(), - ProjectionExpression: aws.String(projPk), - TableName: aws.String(Table), - Limit: aws.Int64(maxSize), - } - - out, err := dynamo.Query(input) - if err != nil { - return false, syncEntities, fmt.Errorf("error doing query to get updates: %w", err) - } - - hasChangesRemaining := false - if out.LastEvaluatedKey != nil && len(out.LastEvaluatedKey) > 0 { - hasChangesRemaining = true - } - - count := *(out.Count) - if count == 0 { // No updates - return hasChangesRemaining, syncEntities, nil - } - - // Use return (ClientID, ID) primary keys to get the actual items. - var outAv []map[string]*dynamodb.AttributeValue - var i, j int64 - for i = 0; i < count; i += maxBatchGetItemSize { - j = i + maxBatchGetItemSize - if j > count { - j = count - } - - batchInput := &dynamodb.BatchGetItemInput{ - RequestItems: map[string]*dynamodb.KeysAndAttributes{ - Table: { - Keys: out.Items[i:j], - }, - }, - } - - err := dynamo.BatchGetItemPages(batchInput, - func(batchOut *dynamodb.BatchGetItemOutput, last bool) bool { - outAv = append(outAv, batchOut.Responses[Table]...) - return last - }) - if err != nil { - return false, syncEntities, fmt.Errorf("error getting update items in a batch: %w", err) - } - } - - err = dynamodbattribute.UnmarshalListOfMaps(outAv, &syncEntities) - if err != nil { - return false, syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) - } - - // filter out any expired items, i.e. history sync entities over 90 days old - nowUnix := time.Now().Unix() - var filteredSyncEntities []SyncEntity - for _, syncEntity := range syncEntities { - if syncEntity.ExpirationTime != nil && *syncEntity.ExpirationTime > 0 { - if *syncEntity.ExpirationTime < nowUnix { - continue - } - } - filteredSyncEntities = append(filteredSyncEntities, syncEntity) - } - - sort.Sort(SyncEntityByMtime(filteredSyncEntities)) - return hasChangesRemaining, filteredSyncEntities, nil + OldVersion *int64 `dynamodbav:"-" db:"old_version"` } func validatePBEntity(entity *sync_pb.SyncEntity) error { @@ -752,7 +69,7 @@ func validatePBEntity(entity *sync_pb.SyncEntity) error { } // CreateDBSyncEntity converts a protobuf sync entity into a DB sync item. -func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID string) (*SyncEntity, error) { +func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID string, chainID int64) (*SyncEntity, error) { err := validatePBEntity(entity) if err != nil { log.Error().Err(err).Msg("Invalid sync_pb.SyncEntity received") @@ -786,18 +103,16 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID var originatorCacheGUID, originatorClientItemID *string if cacheGUID != nil { if *entity.Version == 0 { - id = uuid.NewV4().String() + idUUID, err := uuid.NewV7() + if err != nil { + return nil, err + } + id = idUUID.String() } originatorCacheGUID = cacheGUID originatorClientItemID = entity.IdString } - // The client tag hash must be used as the primary key - // for the history type. - if dataType == HistoryTypeID { - id = *entity.ClientTagHash - } - now := time.Now() var expirationTime *int64 @@ -831,6 +146,7 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID return &SyncEntity{ ClientID: clientID, + ChainID: &chainID, ID: id, ParentID: entity.ParentIdString, Version: entity.Version, @@ -854,8 +170,15 @@ func CreateDBSyncEntity(entity *sync_pb.SyncEntity, cacheGUID *string, clientID // CreatePBSyncEntity converts a DB sync item to a protobuf sync entity. func CreatePBSyncEntity(entity *SyncEntity) (*sync_pb.SyncEntity, error) { + id := &entity.ID + // The client tag hash must be used as the primary key + // for the history type. + if *entity.DataType == HistoryTypeID { + id = entity.ClientDefinedUniqueTag + } + pbEntity := &sync_pb.SyncEntity{ - IdString: &entity.ID, + IdString: id, ParentIdString: entity.ParentID, Version: entity.Version, Mtime: entity.Mtime, diff --git a/datastore/sync_entity_dynamo.go b/datastore/sync_entity_dynamo.go new file mode 100644 index 00000000..d13200ed --- /dev/null +++ b/datastore/sync_entity_dynamo.go @@ -0,0 +1,795 @@ +package datastore + +import ( + "fmt" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "github.com/aws/aws-sdk-go/service/dynamodb/expression" + "github.com/brave/go-sync/utils" +) + +const ( + maxBatchGetItemSize = 100 // Limited by AWS. + maxTransactDeleteItemSize = 10 // Limited by AWS. + clientTagItemPrefix = "Client#" + serverTagItemPrefix = "Server#" + conditionalCheckFailed = "ConditionalCheckFailed" + disabledChainID = "disabled_chain" + reasonDeleted = "deleted" +) + +// SyncEntityByClientIDID implements sort.Interface for []SyncEntity based on +// the string concatenation of ClientID and ID fields. +type SyncEntityByClientIDID []SyncEntity + +func (a SyncEntityByClientIDID) Len() int { return len(a) } +func (a SyncEntityByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SyncEntityByClientIDID) Less(i, j int) bool { + return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID +} + +// SyncEntityByMtime implements sort.Interface for []SyncEntity based on Mtime. +type SyncEntityByMtime []SyncEntity + +func (a SyncEntityByMtime) Len() int { return len(a) } +func (a SyncEntityByMtime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SyncEntityByMtime) Less(i, j int) bool { + return *a[i].Mtime < *a[j].Mtime +} + +// DisabledMarkerItem is used to mark sync chain as deleted in Dynamodb +type DisabledMarkerItem struct { + ClientID string + ID string + Reason string + Mtime *int64 + Ctime *int64 +} + +// ServerClientUniqueTagItem is used to marshal and unmarshal tag items in +// dynamoDB. +type ServerClientUniqueTagItem struct { + ClientID string // Hash key + ID string // Range key + Mtime *int64 + Ctime *int64 +} + +// ItemQuery is used to query for items in dynamoDB. +type ItemQuery struct { + ClientID string // Hash key + ID string // Range key +} + +// TagItemByClientIDID implements sort.Interface for []ServerClientUniqueTagItem +// based on the string concatenation of ClientID and ID fields. +type TagItemByClientIDID []ServerClientUniqueTagItem + +func (a TagItemByClientIDID) Len() int { return len(a) } +func (a TagItemByClientIDID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a TagItemByClientIDID) Less(i, j int) bool { + return a[i].ClientID+a[i].ID < a[j].ClientID+a[j].ID +} + +// getTagPrefix is a helper method to give the proper prefix for unique tag +func getTagPrefix(isServer bool) string { + if isServer { + return serverTagItemPrefix + } + return clientTagItemPrefix +} + +// NewServerClientUniqueTagItem creates a tag item which is used to ensure the +// uniqueness of server-defined or client-defined unique tags for a client. +func NewServerClientUniqueTagItem(clientID string, tag string, isServer bool) *ServerClientUniqueTagItem { + prefix := getTagPrefix(isServer) + now := aws.Int64(utils.UnixMilli(time.Now())) + + return &ServerClientUniqueTagItem{ + ClientID: clientID, + ID: prefix + tag, + Mtime: now, + Ctime: now, + } +} + +// NewServerClientUniqueTagItemQuery creates a tag item query which is used to +// determine whether a sync entity has a unique tag item or not +func NewServerClientUniqueTagItemQuery(clientID string, tag string, isServer bool) *ItemQuery { + prefix := getTagPrefix(isServer) + + return &ItemQuery{ + ClientID: clientID, + ID: prefix + tag, + } +} + +// InsertSyncEntity inserts a new sync entity into dynamoDB. +// If ClientDefinedUniqueTag is not null, we will use a write transaction to +// write a sync item along with a tag item to ensure the uniqueness of the +// client tag. Otherwise, only a sync item is written into DB without using +// transactions. +func (dynamo *Dynamo) InsertSyncEntity(entity *SyncEntity) (bool, error) { + // Create a condition for inserting new items only. + cond := expression.AttributeNotExists(expression.Name(pk)) + expr, err := expression.NewBuilder().WithCondition(cond).Build() + if err != nil { + return false, fmt.Errorf("error building expression to insert sync entity: %w", err) + } + + // Write tag item for all data types, except for + // the history type, which does not use tag items. + if entity.ClientDefinedUniqueTag != nil && *entity.DataType != HistoryTypeID { + items := []*dynamodb.TransactWriteItem{} + // Additional item for ensuring tag's uniqueness for a specific client. + item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ClientDefinedUniqueTag, false) + av, err := dynamodbattribute.MarshalMap(*item) + if err != nil { + return false, fmt.Errorf("error marshalling unique tag item to insert sync entity: %w", err) + } + tagItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + + // Normal sync item + av, err = dynamodbattribute.MarshalMap(*entity) + if err != nil { + return false, fmt.Errorf("error marshlling sync item to insert sync entity: %w", err) + } + syncItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + items = append(items, tagItem) + items = append(items, syncItem) + + _, err = dynamo.TransactWriteItems( + &dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + // Return conflict if insert condition failed. + if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { + for _, reason := range canceledException.CancellationReasons { + if reason.Code != nil && *reason.Code == conditionalCheckFailed { + return true, fmt.Errorf("error inserting sync item with client tag: %w", err) + } + } + } + return false, fmt.Errorf("error writing tag item and sync item in a transaction to insert sync entity: %w", err) + } + + return false, nil + } + + actualID := entity.ID + if *entity.DataType == HistoryTypeID { + entity.ID = *entity.ClientDefinedUniqueTag + } + + // Normal sync item + av, err := dynamodbattribute.MarshalMap(*entity) + if *entity.DataType == HistoryTypeID { + entity.ID = actualID + } + + if err != nil { + return false, fmt.Errorf("error marshalling sync item to insert sync entity: %w", err) + } + input := &dynamodb.PutItemInput{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + } + _, err = dynamo.PutItem(input) + if err != nil { + return false, fmt.Errorf("error calling PutItem to insert sync item: %w", err) + } + return false, nil +} + +// HasServerDefinedUniqueTag check the tag item to see if there is already a +// tag item exists with the tag value for a specific client. +func (dynamo *Dynamo) HasServerDefinedUniqueTag(clientID string, tag string) (bool, error) { + tagItem := NewServerClientUniqueTagItemQuery(clientID, tag, true) + key, err := dynamodbattribute.MarshalMap(tagItem) + if err != nil { + return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) + } + + input := &dynamodb.GetItemInput{ + Key: key, + ProjectionExpression: aws.String(projPk), + TableName: aws.String(Table), + } + + out, err := dynamo.GetItem(input) + if err != nil { + return false, fmt.Errorf("error calling GetItem to check if server tag existed: %w", err) + } + + return out.Item != nil, nil +} + +func (dynamo *Dynamo) HasItem(clientID string, ID string) (bool, error) { + primaryKey := PrimaryKey{ClientID: clientID, ID: ID} + key, err := dynamodbattribute.MarshalMap(primaryKey) + + if err != nil { + return false, fmt.Errorf("error marshalling key to check if item existed: %w", err) + } + + input := &dynamodb.GetItemInput{ + Key: key, + ProjectionExpression: aws.String(projPk), + TableName: aws.String(Table), + } + + out, err := dynamo.GetItem(input) + if err != nil { + return false, fmt.Errorf("error calling GetItem to check if item existed: %w", err) + } + + return out.Item != nil, nil +} + +// InsertSyncEntitiesWithServerTags is used to insert sync entities with +// server-defined unique tags. To ensure the uniqueness, for each sync entity, +// we will write a tag item and a sync item. Items for all the entities in the +// array would be written into DB in one transaction. +func (dynamo *Dynamo) InsertSyncEntitiesWithServerTags(entities []*SyncEntity) error { + items := []*dynamodb.TransactWriteItem{} + for _, entity := range entities { + // Create a condition for inserting new items only. + cond := expression.AttributeNotExists(expression.Name(pk)) + expr, err := expression.NewBuilder().WithCondition(cond).Build() + if err != nil { + return fmt.Errorf("error building expression to insert sync entity with server tag: %w", err) + } + + // Additional item for ensuring tag's uniqueness for a specific client. + item := NewServerClientUniqueTagItem(entity.ClientID, *entity.ServerDefinedUniqueTag, true) + av, err := dynamodbattribute.MarshalMap(*item) + if err != nil { + return fmt.Errorf("error marshalling tag item to insert sync entity with server tag: %w", err) + } + tagItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + + // Normal sync item + av, err = dynamodbattribute.MarshalMap(*entity) + if err != nil { + return fmt.Errorf("error marshalling sync item to insert sync entity with server tag: %w", err) + } + syncItem := &dynamodb.TransactWriteItem{ + Put: &dynamodb.Put{ + Item: av, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + TableName: aws.String(Table), + }, + } + + items = append(items, tagItem) + items = append(items, syncItem) + } + + _, err := dynamo.TransactWriteItems( + &dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + return fmt.Errorf("error writing sync entities with server tags in a transaction: %w", err) + } + return nil +} + +// DisableSyncChain marks a chain as disabled so no further updates or commits can happen +func (dynamo *Dynamo) DisableSyncChain(clientID string) error { + now := aws.Int64(utils.UnixMilli(time.Now())) + disabledMarker := DisabledMarkerItem{ + ClientID: clientID, + ID: disabledChainID, + Reason: reasonDeleted, + Mtime: now, + Ctime: now, + } + + av, err := dynamodbattribute.MarshalMap(disabledMarker) + if err != nil { + return fmt.Errorf("error marshalling disabled marker: %w", err) + } + + markerInput := &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String(Table), + } + + _, err = dynamo.PutItem(markerInput) + if err != nil { + return fmt.Errorf("error calling PutItem to insert sync item: %w", err) + } + + return nil +} + +// ClearServerData deletes all items for a given clientID +func (dynamo *Dynamo) ClearServerData(clientID string) ([]SyncEntity, error) { + syncEntities := []SyncEntity{} + pkb := expression.Key(pk) + pkv := expression.Value(clientID) + keyCond := expression.KeyEqual(pkb, pkv) + exprs := expression.NewBuilder().WithKeyCondition(keyCond) + expr, err := exprs.Build() + if err != nil { + return syncEntities, fmt.Errorf("error building expression to get updates: %w", err) + } + + input := &dynamodb.QueryInput{ + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + KeyConditionExpression: expr.KeyCondition(), + FilterExpression: expr.Filter(), + TableName: aws.String(Table), + } + + out, err := dynamo.Query(input) + if err != nil { + return syncEntities, fmt.Errorf("error doing query to get updates: %w", err) + } + count := *out.Count + + err = dynamodbattribute.UnmarshalListOfMaps(out.Items, &syncEntities) + if err != nil { + return syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) + } + + var i, j int64 + for i = 0; i < count; i += maxTransactDeleteItemSize { + j = i + maxTransactDeleteItemSize + if j > count { + j = count + } + + items := []*dynamodb.TransactWriteItem{} + for _, item := range syncEntities[i:j] { + if item.ID == disabledChainID { + continue + } + + // Fail delete if race condition detected (modified time has changed). + if item.Version != nil && item.Mtime != nil { + cond := expression.Name("Mtime").Equal(expression.Value(*item.Mtime)) + expr, err := expression.NewBuilder().WithCondition(cond).Build() + if err != nil { + return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) + } + + writeItem := dynamodb.TransactWriteItem{ + Delete: &dynamodb.Delete{ + ConditionExpression: expr.Condition(), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + TableName: aws.String(Table), + Key: map[string]*dynamodb.AttributeValue{ + pk: { + S: aws.String(item.ClientID), + }, + sk: { + S: aws.String(item.ID), + }, + }, + }, + } + + items = append(items, &writeItem) + } else { + // If row doesn't hold Mtime, delete as usual. + writeItem := dynamodb.TransactWriteItem{ + Delete: &dynamodb.Delete{ + TableName: aws.String(Table), + Key: map[string]*dynamodb.AttributeValue{ + pk: { + S: aws.String(item.ClientID), + }, + sk: { + S: aws.String(item.ID), + }, + }, + }, + } + + items = append(items, &writeItem) + } + + } + + _, err = dynamo.TransactWriteItems(&dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + return syncEntities, fmt.Errorf("error deleting sync entities for client %s: %w", clientID, err) + } + } + + return syncEntities, nil +} + +// IsSyncChainDisabled checks whether a given sync chain has been deleted +func (dynamo *Dynamo) IsSyncChainDisabled(clientID string) (bool, error) { + key, err := dynamodbattribute.MarshalMap(ItemQuery{ + ClientID: clientID, + ID: disabledChainID, + }) + if err != nil { + return false, fmt.Errorf("error marshalling key to check if server tag existed: %w", err) + } + + input := &dynamodb.GetItemInput{ + Key: key, + TableName: aws.String(Table), + } + + out, err := dynamo.GetItem(input) + if err != nil { + return false, fmt.Errorf("error calling GetItem to check if sync chain disabled: %w", err) + } + + return len(out.Item) > 0, nil +} + +// UpdateSyncEntity updates a sync item in dynamoDB. +func (dynamo *Dynamo) UpdateSyncEntity(entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { + id := entity.ID + if *entity.DataType == HistoryTypeID { + id = *entity.ClientDefinedUniqueTag + } + primaryKey := PrimaryKey{ClientID: entity.ClientID, ID: id} + key, err := dynamodbattribute.MarshalMap(primaryKey) + if err != nil { + return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + } + + // condition to ensure the request is update only... + cond := expression.AttributeExists(expression.Name(pk)) + // ...and the version matches, if applicable + if *entity.DataType != HistoryTypeID { + cond = expression.And(cond, expression.Name("Version").Equal(expression.Value(oldVersion))) + } + + update := expression.Set(expression.Name("Version"), expression.Value(entity.Version)) + update = update.Set(expression.Name("Mtime"), expression.Value(entity.Mtime)) + update = update.Set(expression.Name("Specifics"), expression.Value(entity.Specifics)) + update = update.Set(expression.Name("DataTypeMtime"), expression.Value(entity.DataTypeMtime)) + + // Update optional fields only if the value is not null. + if entity.UniquePosition != nil { + update = update.Set(expression.Name("UniquePosition"), expression.Value(entity.UniquePosition)) + } + if entity.ParentID != nil { + update = update.Set(expression.Name("ParentID"), expression.Value(entity.ParentID)) + } + if entity.Name != nil { + update = update.Set(expression.Name("Name"), expression.Value(entity.Name)) + } + if entity.NonUniqueName != nil { + update = update.Set(expression.Name("NonUniqueName"), expression.Value(entity.NonUniqueName)) + } + if entity.Deleted != nil { + update = update.Set(expression.Name("Deleted"), expression.Value(entity.Deleted)) + } + if entity.Folder != nil { + update = update.Set(expression.Name("Folder"), expression.Value(entity.Folder)) + } + + expr, err := expression.NewBuilder().WithCondition(cond).WithUpdate(update).Build() + if err != nil { + return false, false, fmt.Errorf("error building expression to update sync entity: %w", err) + } + + // Soft-delete a sync item with a client tag, use a transaction to delete its + // tag item too. + if entity.Deleted != nil && entity.ClientDefinedUniqueTag != nil && *entity.Deleted && *entity.DataType != HistoryTypeID { + pk := PrimaryKey{ + ClientID: entity.ClientID, ID: clientTagItemPrefix + *entity.ClientDefinedUniqueTag} + tagItemKey, err := dynamodbattribute.MarshalMap(pk) + if err != nil { + return false, false, fmt.Errorf("error marshalling key to update sync entity: %w", err) + } + + items := []*dynamodb.TransactWriteItem{} + updateSyncItem := &dynamodb.TransactWriteItem{ + Update: &dynamodb.Update{ + Key: key, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + UpdateExpression: expr.Update(), + ReturnValuesOnConditionCheckFailure: aws.String(dynamodb.ReturnValueAllOld), + TableName: aws.String(Table), + }, + } + deleteTagItem := &dynamodb.TransactWriteItem{ + Delete: &dynamodb.Delete{ + Key: tagItemKey, + TableName: aws.String(Table), + }, + } + items = append(items, updateSyncItem) + items = append(items, deleteTagItem) + + _, err = dynamo.TransactWriteItems( + &dynamodb.TransactWriteItemsInput{TransactItems: items}) + if err != nil { + // Return conflict if the update condition fails. + if canceledException, ok := err.(*dynamodb.TransactionCanceledException); ok { + for _, reason := range canceledException.CancellationReasons { + if reason.Code != nil && *reason.Code == conditionalCheckFailed { + return true, false, nil + } + } + } + + return false, false, fmt.Errorf("error deleting sync item and tag item in a transaction: %w", err) + } + + // Successfully soft-delete the sync item and delete the tag item. + return false, true, nil + } + + // Not deleting a sync item with a client tag, do a normal update on sync + // item. + input := &dynamodb.UpdateItemInput{ + Key: key, + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + ConditionExpression: expr.Condition(), + UpdateExpression: expr.Update(), + ReturnValues: aws.String(dynamodb.ReturnValueAllOld), + TableName: aws.String(Table), + } + + out, err := dynamo.UpdateItem(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + // Return conflict if the write condition fails. + if aerr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { + return true, false, nil + } + } + return false, false, fmt.Errorf("error calling UpdateItem to update sync entity: %w", err) + } + + // Unmarshal out.Attributes + oldEntity := &SyncEntity{} + err = dynamodbattribute.UnmarshalMap(out.Attributes, oldEntity) + if err != nil { + return false, false, fmt.Errorf("error unmarshalling old sync entity: %w", err) + } + if entity.Deleted == nil { // No updates on Deleted this time. + deleted = false + } else if oldEntity.Deleted == nil { // Consider it as Deleted = false. + deleted = *entity.Deleted + } else { + deleted = !*oldEntity.Deleted && *entity.Deleted + } + return false, deleted, nil +} + +// GetUpdatesForType returns sync entities of a data type where it's mtime is +// later than the client token. +// To do this in dynamoDB, we use (ClientID, DataType#Mtime) as GSI to get a +// list of (ClientID, ID) primary keys with the given condition, then read the +// actual sync item using the list of primary keys. +func (dynamo *Dynamo) GetUpdatesForType(dataType int, minMtime *int64, maxMtime *int64, fetchFolders bool, clientID string, maxSize int, ascOrder bool) (bool, []SyncEntity, error) { + syncEntities := []SyncEntity{} + + // Get (ClientID, ID) pairs which are updates after mtime for a data type, + // sorted by dataType#mTime. e.g. sorted by mtime since dataType is the same. + var dataTypeMtimeUpperBound, dataTypeMtimeLowerBound string + if minMtime != nil { + dataTypeMtimeLowerBound = strconv.Itoa(dataType) + "#" + strconv.FormatInt(*minMtime+1, 10) + } else { + dataTypeMtimeLowerBound = strconv.Itoa(dataType) + "#0" + } + if maxMtime != nil { + dataTypeMtimeUpperBound = strconv.Itoa(dataType) + "#" + strconv.FormatInt(*maxMtime-1, 10) + } else { + dataTypeMtimeUpperBound = strconv.Itoa(dataType+1) + "#0" + } + pkCond := expression.Key(clientIDDataTypeMtimeIdxPk).Equal(expression.Value(clientID)) + skCond := expression.KeyBetween( + expression.Key(clientIDDataTypeMtimeIdxSk), + expression.Value(dataTypeMtimeLowerBound), + expression.Value(dataTypeMtimeUpperBound)) + keyCond := expression.KeyAnd(pkCond, skCond) + exprs := expression.NewBuilder().WithKeyCondition(keyCond) + + if !fetchFolders { // Filter folder entities out if fetchFolder is false. + exprs = exprs.WithFilter( + expression.Equal(expression.Name("Folder"), expression.Value(false))) + } + + expr, err := exprs.Build() + if err != nil { + return false, syncEntities, fmt.Errorf("error building expression to get updates: %w", err) + } + + input := &dynamodb.QueryInput{ + IndexName: aws.String(clientIDDataTypeMtimeIdx), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + KeyConditionExpression: expr.KeyCondition(), + FilterExpression: expr.Filter(), + ProjectionExpression: aws.String(projPk), + TableName: aws.String(Table), + Limit: aws.Int64(int64(maxSize)), + ScanIndexForward: &ascOrder, + } + + out, err := dynamo.Query(input) + if err != nil { + return false, syncEntities, fmt.Errorf("error doing query to get updates: %w", err) + } + + hasChangesRemaining := false + if out.LastEvaluatedKey != nil && len(out.LastEvaluatedKey) > 0 { + hasChangesRemaining = true + } + + count := *(out.Count) + if count == 0 { // No updates + return hasChangesRemaining, syncEntities, nil + } + + // Use return (ClientID, ID) primary keys to get the actual items. + var outAv []map[string]*dynamodb.AttributeValue + var i, j int64 + for i = 0; i < count; i += maxBatchGetItemSize { + j = i + maxBatchGetItemSize + if j > count { + j = count + } + + batchInput := &dynamodb.BatchGetItemInput{ + RequestItems: map[string]*dynamodb.KeysAndAttributes{ + Table: { + Keys: out.Items[i:j], + }, + }, + } + + err := dynamo.BatchGetItemPages(batchInput, + func(batchOut *dynamodb.BatchGetItemOutput, last bool) bool { + outAv = append(outAv, batchOut.Responses[Table]...) + return last + }) + if err != nil { + return false, syncEntities, fmt.Errorf("error getting update items in a batch: %w", err) + } + } + + err = dynamodbattribute.UnmarshalListOfMaps(outAv, &syncEntities) + if err != nil { + return false, syncEntities, fmt.Errorf("error unmarshalling updated sync entities: %w", err) + } + + // filter out any expired items, i.e. history sync entities over 90 days old + nowUnix := time.Now().Unix() + var filteredSyncEntities []SyncEntity + for _, syncEntity := range syncEntities { + if syncEntity.ExpirationTime != nil && *syncEntity.ExpirationTime > 0 { + if *syncEntity.ExpirationTime < nowUnix { + continue + } + } + filteredSyncEntities = append(filteredSyncEntities, syncEntity) + } + + var sortInterface sort.Interface = SyncEntityByMtime(filteredSyncEntities) + if !ascOrder { + sortInterface = sort.Reverse(sortInterface) + } + sort.Sort(sortInterface) + return hasChangesRemaining, filteredSyncEntities, nil +} + +func (dynamo *Dynamo) GetEntity(query ItemQuery) (*SyncEntity, error) { + key, err := dynamodbattribute.MarshalMap(query) + if err != nil { + return nil, fmt.Errorf("error marshalling key for GetEntity: %w", err) + } + + input := &dynamodb.GetItemInput{ + TableName: aws.String(Table), + Key: key, + } + + result, err := dynamo.GetItem(input) + if err != nil { + return nil, fmt.Errorf("error getting item: %w", err) + } + + if result.Item == nil { + return nil, nil + } + + var entity SyncEntity + err = dynamodbattribute.UnmarshalMap(result.Item, &entity) + if err != nil { + return nil, fmt.Errorf("error unmarshalling item: %w", err) + } + + return &entity, nil +} + +func (dynamo *Dynamo) DeleteEntities(entities []*SyncEntity) error { + var writeRequests []*dynamodb.WriteRequest + + for _, entity := range entities { + key, err := dynamodbattribute.MarshalMap(ItemQuery{ + ClientID: entity.ClientID, + ID: entity.ID, + }) + if err != nil { + return fmt.Errorf("error marshalling key for deletion: %w", err) + } + writeRequests = append(writeRequests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: key, + }, + }) + if entity.ClientDefinedUniqueTag != nil && len(*entity.ClientDefinedUniqueTag) > 0 { + key, err := dynamodbattribute.MarshalMap(NewServerClientUniqueTagItemQuery(entity.ClientID, *entity.ClientDefinedUniqueTag, false)) + if err != nil { + return fmt.Errorf("error marshalling client tag key for deletion: %w", err) + } + writeRequests = append(writeRequests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: key, + }, + }) + } + } + + const batchSize = 25 + for i := 0; i < len(writeRequests); i += batchSize { + end := i + batchSize + if end > len(writeRequests) { + end = len(writeRequests) + } + input := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ + Table: writeRequests[i:end], + }, + } + + _, err := dynamo.BatchWriteItem(input) + if err != nil { + return fmt.Errorf("failed to delete entities: %w", err) + } + } + + return nil +} diff --git a/datastore/sync_entity_dynamo_test.go b/datastore/sync_entity_dynamo_test.go new file mode 100644 index 00000000..8666d1fd --- /dev/null +++ b/datastore/sync_entity_dynamo_test.go @@ -0,0 +1,718 @@ +package datastore_test + +import ( + "sort" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/brave/go-sync/utils" + "github.com/stretchr/testify/suite" +) + +type SyncEntityDynamoTestSuite struct { + suite.Suite + dynamo *datastore.Dynamo +} + +func (suite *SyncEntityDynamoTestSuite) SetupSuite() { + datastore.Table = "client-entity-test-datastore" + var err error + suite.dynamo, err = datastore.NewDynamo(true) + suite.Require().NoError(err, "Failed to get dynamoDB session") +} + +func (suite *SyncEntityDynamoTestSuite) SetupTest() { + suite.Require().NoError( + datastoretest.ResetDynamoTable(suite.dynamo), "Failed to reset table") +} + +func (suite *SyncEntityDynamoTestSuite) TearDownTest() { + suite.Require().NoError( + datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") +} + +func (suite *SyncEntityDynamoTestSuite) TestInsertSyncEntity() { + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + } + entity2 := entity1 + entity2.ID = "id2" + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity with other ID should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().Error(err, "InsertSyncEntity with the same ClientID and ID should fail") + + // Each InsertSyncEntity without client tag should result in one sync item saved. + tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal( + 0, len(tagItems), "Insert without client tag should not insert tag items") + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) + + // Insert entity with client tag should result in one sync item and one tag + // item saved. + entity3 := entity1 + entity3.ID = "id3" + entity3.ClientDefinedUniqueTag = aws.String("tag1") + _, err = suite.dynamo.InsertSyncEntity(&entity3) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + // Insert entity with different tag for same ClientID should succeed. + entity4 := entity3 + entity4.ID = "id4" + entity4.ClientDefinedUniqueTag = aws.String("tag2") + _, err = suite.dynamo.InsertSyncEntity(&entity4) + suite.Require().NoError(err, "InsertSyncEntity with different server tag should succeed") + + // Insert entity with the same client tag and ClientID should fail with conflict. + entity4Copy := entity4 + entity4Copy.ID = "id4_copy" + conflict, err := suite.dynamo.InsertSyncEntity(&entity4Copy) + suite.Require().Error(err, "InsertSyncEntity with the same client tag and ClientID should fail") + suite.Assert().True(conflict, "Return conflict for duplicate client tag") + + // Insert entity with the same client tag for other client should not fail. + entity5 := entity3 + entity5.ClientID = "client2" + entity5.ID = "id5" + _, err = suite.dynamo.InsertSyncEntity(&entity5) + suite.Require().NoError(err, + "InsertSyncEntity with the same client tag for another client should succeed") + + // Check sync items are saved for entity1, entity2, entity3, entity4, entity5. + syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3, entity4, entity5} + sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) + suite.Assert().Equal(syncItems, expectedSyncItems) + + // Check tag items should be saved for entity3, entity4, entity5. + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + + // Check that Ctime and Mtime have been set, reset to zero value for subsequent + // tests + for i := 0; i < len(tagItems); i++ { + suite.Assert().NotNil(tagItems[i].Ctime) + suite.Assert().NotNil(tagItems[i].Mtime) + + tagItems[i].Ctime = nil + tagItems[i].Mtime = nil + } + + suite.Require().NoError(err, "ScanTagItems should succeed") + expectedTagItems := []datastore.ServerClientUniqueTagItem{ + {ClientID: "client1", ID: "Client#tag1"}, + {ClientID: "client1", ID: "Client#tag2"}, + {ClientID: "client2", ID: "Client#tag1"}, + } + sort.Sort(datastore.TagItemByClientIDID(tagItems)) + suite.Assert().Equal(expectedTagItems, tagItems) +} + +func (suite *SyncEntityDynamoTestSuite) TestHasServerDefinedUniqueTag() { + // Insert entities with server tags using InsertSyncEntitiesWithServerTags. + tag1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(true), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + ServerDefinedUniqueTag: aws.String("tag1"), + } + tag2 := tag1 + tag2.ClientID = "client2" + tag2.ID = "id2" + tag2.ServerDefinedUniqueTag = aws.String("tag2") + entities := []*datastore.SyncEntity{&tag1, &tag2} + + err := suite.dynamo.InsertSyncEntitiesWithServerTags(entities) + suite.Require().NoError(err, "Insert sync entities should succeed") + + hasTag, err := suite.dynamo.HasServerDefinedUniqueTag("client1", "tag1") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, true) + + hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client1", "tag2") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, false) + + hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag1") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, false) + + hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag2") + suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") + suite.Assert().Equal(hasTag, true) +} + +func (suite *SyncEntityDynamoTestSuite) TestHasItem() { + // Insert entity which will be checked later + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + entity2 := entity1 + entity2.ClientID = "client2" + entity2.ID = "id2" + + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + hasTag, err := suite.dynamo.HasItem("client1", "id1") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, true) + + hasTag, err = suite.dynamo.HasItem("client2", "id2") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, true) + + hasTag, err = suite.dynamo.HasItem("client2", "id3") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, false) + + hasTag, err = suite.dynamo.HasItem("client3", "id2") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().Equal(hasTag, false) +} + +func (suite *SyncEntityDynamoTestSuite) TestInsertSyncEntitiesWithServerTags() { + // Insert with same ClientID and server tag would fail. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + ServerDefinedUniqueTag: aws.String("tag1"), + } + entity2 := entity1 + entity2.ID = "id2" + entities := []*datastore.SyncEntity{&entity1, &entity2} + suite.Require().Error( + suite.dynamo.InsertSyncEntitiesWithServerTags(entities), + "Insert with same ClientID and server tag would fail") + + // Check nothing is written to DB when it fails. + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(0, len(syncItems), "No items should be written if fail") + tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(0, len(tagItems), "No items should be written if fail") + + entity2.ServerDefinedUniqueTag = aws.String("tag2") + entity3 := entity1 + entity3.ClientID = "client2" + entity3.ID = "id3" + entities = []*datastore.SyncEntity{&entity1, &entity2, &entity3} + suite.Require().NoError( + suite.dynamo.InsertSyncEntitiesWithServerTags(entities), + "InsertSyncEntitiesWithServerTags should succeed") + + // Scan DB and check all items are saved + syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3} + sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) + suite.Assert().Equal(syncItems, expectedSyncItems) + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + + // Check that Ctime and Mtime have been set, reset to zero value for subsequent + // tests + for i := 0; i < len(tagItems); i++ { + suite.Assert().NotNil(tagItems[i].Ctime) + suite.Assert().NotNil(tagItems[i].Mtime) + + tagItems[i].Ctime = nil + tagItems[i].Mtime = nil + } + + expectedTagItems := []datastore.ServerClientUniqueTagItem{ + {ClientID: "client1", ID: "Server#tag1"}, + {ClientID: "client1", ID: "Server#tag2"}, + {ClientID: "client2", ID: "Server#tag1"}, + } + sort.Sort(datastore.TagItemByClientIDID(tagItems)) + suite.Assert().Equal(expectedTagItems, tagItems) +} + +func (suite *SyncEntityDynamoTestSuite) TestUpdateSyncEntity_Basic() { + // Insert three new items. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + entity2 := entity1 + entity2.ID = "id2" + entity3 := entity1 + entity3.ID = "id3" + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity3) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + // Check sync entities are inserted correctly in DB. + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2, entity3}) + + // Update without optional fields. + updateEntity1 := entity1 + updateEntity1.Version = aws.Int64(23456789) + updateEntity1.Mtime = aws.Int64(23456789) + updateEntity1.Folder = aws.Bool(true) + updateEntity1.Deleted = aws.Bool(true) + updateEntity1.DataTypeMtime = aws.String("123#23456789") + updateEntity1.Specifics = []byte{3, 4} + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") + + // Update with optional fields. + updateEntity2 := updateEntity1 + updateEntity2.ID = "id2" + updateEntity2.Deleted = aws.Bool(false) + updateEntity2.Folder = aws.Bool(false) + updateEntity2.UniquePosition = []byte{5, 6} + updateEntity2.ParentID = aws.String("parentID") + updateEntity2.Name = aws.String("name") + updateEntity2.NonUniqueName = aws.String("non_unique_name") + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // Update with nil Folder and Deleted + updateEntity3 := updateEntity1 + updateEntity3.ID = "id3" + updateEntity3.Folder = nil + updateEntity3.Deleted = nil + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + // Reset these back to false because they will be the expected value in DB. + updateEntity3.Folder = aws.Bool(false) + updateEntity3.Deleted = aws.Bool(false) + + // Update entity again with the wrong old version as (version mismatch) + // should return false. + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update with the same version should return conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + // suite.Assert().False(deleted, "Successful update should not result in delete") + + // Check sync entities are updated correctly in DB. + syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity1, updateEntity2, updateEntity3}) +} + +func (suite *SyncEntityDynamoTestSuite) TestUpdateSyncEntity_HistoryType() { + // Insert a history item + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + ClientDefinedUniqueTag: aws.String("client_tag1"), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(963985), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + conflict, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Successful insert should not have conflict") + + updateEntity1 := entity1 + updateEntity1.Version = aws.Int64(2) + updateEntity1.Folder = aws.Bool(true) + updateEntity1.Mtime = aws.Int64(24242424) + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // should still succeed with the same version number, + // since the version number should be ignored + updateEntity2 := updateEntity1 + updateEntity2.Mtime = aws.Int64(42424242) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) + suite.Require().NoError(err, "UpdateSyncEntity should not return an error") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + updateEntity3 := entity1 + updateEntity3.Deleted = aws.Bool(true) + + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") + + syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + updateEntity3.ID = *updateEntity3.ClientDefinedUniqueTag + suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity3}) +} + +func (suite *SyncEntityDynamoTestSuite) TestUpdateSyncEntity_ReuseClientTag() { + // Insert an item with client tag. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + ClientDefinedUniqueTag: aws.String("client_tag"), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + conflict, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Successful insert should not have conflict") + + // Check a tag item is inserted. + tagItems, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") + + // Update it to version 23456789. + updateEntity1 := entity1 + updateEntity1.Version = aws.Int64(23456789) + updateEntity1.Mtime = aws.Int64(23456789) + updateEntity1.Folder = aws.Bool(true) + updateEntity1.DataTypeMtime = aws.String("123#23456789") + updateEntity1.Specifics = []byte{3, 4} + conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // Soft-delete the item with wrong version should get conflict. + updateEntity1.Deleted = aws.Bool(true) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Version mismatched update should have conflict") + suite.Assert().False(deleted, "Successful update should not result in delete") + + // Soft-delete the item with matched version. + updateEntity1.Version = aws.Int64(34567890) + conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Successful update should not have conflict") + suite.Assert().True(deleted, "Successful update should result in delete") + + // Check tag item is deleted. + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(0, len(tagItems), "Tag item should be deleted") + + // Insert another item with the same client tag again. + entity2 := entity1 + entity2.ID = "id2" + conflict, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Successful insert should not have conflict") + + // Check a tag item is inserted. + tagItems, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") +} + +func (suite *SyncEntityDynamoTestSuite) TestGetUpdatesForType() { + // Insert items for testing. + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(true), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + Specifics: []byte{1, 2}, + } + + entity2 := entity1 + entity2.ID = "id2" + entity2.Folder = aws.Bool(false) + entity2.Mtime = aws.Int64(12345679) + entity2.DataTypeMtime = aws.String("123#12345679") + + entity3 := entity2 + entity3.ID = "id3" + entity3.DataType = aws.Int(124) + entity3.DataTypeMtime = aws.String("124#12345679") + + // non-expired item + entity4 := entity2 + entity4.ClientID = "client2" + entity4.ID = "id4" + entity4.ExpirationTime = aws.Int64(time.Now().Unix() + 300) + + // expired item + entity5 := entity2 + entity5.ClientID = "client2" + entity5.ID = "id5" + entity5.ExpirationTime = aws.Int64(time.Now().Unix() - 300) + + _, err := suite.dynamo.InsertSyncEntity(&entity1) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity2) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity3) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity4) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + _, err = suite.dynamo.InsertSyncEntity(&entity5) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + // Get all updates for type 123 and client1 using token = 0. + var token int64 + hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for type 124 and client1 using token = 0. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity3}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for type 123 and client2 using token = 0. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client2", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity4}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for type 124 and client2 using token = 0. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, &token, nil, true, "client2", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(len(syncItems), 0) + suite.Assert().False(hasChangesRemaining) + + // Test maxSize will limit the return entries size, and hasChangesRemaining + // should be true when there are more updates available in the DB. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 1, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1}) + suite.Assert().True(hasChangesRemaining) + + // Test when num of query items equal to the limit, hasChangesRemaining should + // be true. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 2, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) + suite.Assert().True(hasChangesRemaining) + + // Test fetchFolders will remove folder items if false + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, false, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) + suite.Assert().False(hasChangesRemaining) + + // Get all updates for a type for a client using mtime of one item as token. + token = 12345678 + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 100, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) + suite.Assert().False(hasChangesRemaining) + + // Test batch is working correctly for over 100 items + err = datastoretest.ResetDynamoTable(suite.dynamo) + suite.Require().NoError(err, "Failed to reset table") + + expectedSyncItems := []datastore.SyncEntity{} + entity1 = datastore.SyncEntity{ + ClientID: "client1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + Specifics: []byte{1, 2}, + } + + mtime := utils.UnixMilli(time.Now()) + for i := 1; i <= 250; i++ { + mtime = mtime + 1 + entity := entity1 + entity.ID = "id" + strconv.Itoa(i) + entity.Mtime = aws.Int64(mtime) + entity.DataTypeMtime = aws.String("123#" + strconv.FormatInt(*entity.Mtime, 10)) + _, err := suite.dynamo.InsertSyncEntity(&entity) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + expectedSyncItems = append(expectedSyncItems, entity) + } + + // All items should be returned and sorted by Mtime. + token = 0 + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 300, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + sort.Sort(datastore.SyncEntityByMtime(expectedSyncItems)) + suite.Assert().Equal(syncItems, expectedSyncItems) + suite.Assert().False(hasChangesRemaining) + + // Test that when maxGUBatchSize is smaller than total updates, the first n + // items ordered by Mtime should be returned. + hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, &token, nil, true, "client1", 200, true) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().Equal(syncItems, expectedSyncItems[0:200]) + suite.Assert().True(hasChangesRemaining) +} + +func (suite *SyncEntityDynamoTestSuite) TestDisableSyncChain() { + clientID := "client1" + id := "disabled_chain" + err := suite.dynamo.DisableSyncChain(clientID) + suite.Require().NoError(err, "DisableSyncChain should succeed") + e, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(1, len(e)) + suite.Assert().Equal(clientID, e[0].ClientID) + suite.Assert().Equal(id, e[0].ID) +} + +func (suite *SyncEntityDynamoTestSuite) TestIsSyncChainDisabled() { + clientID := "client1" + + disabled, err := suite.dynamo.IsSyncChainDisabled(clientID) + suite.Require().NoError(err, "IsSyncChainDisabled should succeed") + suite.Assert().Equal(false, disabled) + + err = suite.dynamo.DisableSyncChain(clientID) + suite.Require().NoError(err, "DisableSyncChain should succeed") + disabled, err = suite.dynamo.IsSyncChainDisabled(clientID) + suite.Require().NoError(err, "IsSyncChainDisabled should succeed") + suite.Assert().Equal(true, disabled) +} + +func (suite *SyncEntityDynamoTestSuite) TestClearServerData() { + // Test clear sync entities + entity := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + } + _, err := suite.dynamo.InsertSyncEntity(&entity) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + e, err := datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(1, len(e)) + + e, err = suite.dynamo.ClearServerData(entity.ClientID) + suite.Require().NoError(err, "ClearServerData should succeed") + suite.Assert().Equal(1, len(e)) + + e, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(0, len(e)) + + // Test clear tagged items + entity1 := datastore.SyncEntity{ + ClientID: "client1", + ID: "id1", + Version: aws.Int64(1), + Ctime: aws.Int64(12345678), + Mtime: aws.Int64(12345678), + DataType: aws.Int(123), + Folder: aws.Bool(false), + Deleted: aws.Bool(false), + DataTypeMtime: aws.String("123#12345678"), + ServerDefinedUniqueTag: aws.String("tag1"), + } + entity2 := entity1 + entity2.ID = "id2" + entity2.ServerDefinedUniqueTag = aws.String("tag2") + entities := []*datastore.SyncEntity{&entity1, &entity2} + suite.Require().NoError( + suite.dynamo.InsertSyncEntitiesWithServerTags(entities), + "InsertSyncEntitiesWithServerTags should succeed") + + e, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(2, len(e), "No items should be written if fail") + + t, err := datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(2, len(t), "No items should be written if fail") + + e, err = suite.dynamo.ClearServerData(entity.ClientID) + suite.Require().NoError(err, "ClearServerData should succeed") + suite.Assert().Equal(4, len(e)) + + e, err = datastoretest.ScanSyncEntities(suite.dynamo) + suite.Require().NoError(err, "ScanSyncEntities should succeed") + suite.Assert().Equal(0, len(e), "No items should be written if fail") + + t, err = datastoretest.ScanTagItems(suite.dynamo) + suite.Require().NoError(err, "ScanTagItems should succeed") + suite.Assert().Equal(0, len(t), "No items should be written if fail") +} + +func TestSyncEntityDynamoTestSuite(t *testing.T) { + suite.Run(t, new(SyncEntityDynamoTestSuite)) +} diff --git a/datastore/sync_entity_sql.go b/datastore/sync_entity_sql.go new file mode 100644 index 00000000..7d7508b5 --- /dev/null +++ b/datastore/sync_entity_sql.go @@ -0,0 +1,173 @@ +package datastore + +import ( + "encoding/hex" + "fmt" + "strings" + "time" + + "github.com/jmoiron/sqlx" +) + +var fieldsToInsert = []string{ + "id", "chain_id", "data_type", "ctime", "mtime", "specifics", + "deleted", "client_defined_unique_tag", "server_defined_unique_tag", "folder", "version", + "name", "originator_cache_guid", "originator_client_item_id", "parent_id", "non_unique_name", + "unique_position", +} + +func buildInsertQuery() string { + var insertValues []string + var setValues []string + for _, field := range fieldsToInsert { + insertValues = append(insertValues, ":"+field) + setValues = append(setValues, field+" = EXCLUDED."+field) + } + joinedFields := strings.Join(fieldsToInsert, ", ") + joinedInsertValues := strings.Join(insertValues, ", ") + joinedSetValues := strings.Join(setValues, ", ") + // We only want to update an existing row if it was previously deleted. + // If it was not deleted, then it should be considered a conflict + return `INSERT INTO entities (` + joinedFields + `) VALUES (` + joinedInsertValues + + `) ON CONFLICT (chain_id, client_defined_unique_tag) DO UPDATE SET ` + + joinedSetValues + ` WHERE entities.deleted = true` +} + +// InsertSyncEntities inserts multiple sync entities into the database +func (sqlDB *SQLDB) InsertSyncEntities(tx *sqlx.Tx, entities []*SyncEntity) (conflict bool, err error) { + res, err := tx.NamedExec(sqlDB.insertQuery, entities) + if err != nil { + return false, fmt.Errorf("failed to insert entities: %w", err) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return false, fmt.Errorf("failed to get rows affected after insert: %w", err) + } + + // if rows affected is not len(entities), then there must be a conflict. return true to indicate this condition. + return int(rowsAffected) != len(entities), nil +} + +// HasItem checks if an item exists in the database +func (sqlDB *SQLDB) HasItem(tx *sqlx.Tx, chainID int64, clientTag string) (exists bool, err error) { + err = tx.QueryRowx("SELECT EXISTS(SELECT 1 FROM entities WHERE chain_id = $1 AND client_defined_unique_tag = $2)", chainID, clientTag).Scan(&exists) + if err != nil { + return false, fmt.Errorf("failed to check existence of item: %w", err) + } + return exists, nil +} + +// UpdateSyncEntity updates a sync entity in the database +func (sqlDB *SQLDB) UpdateSyncEntity(tx *sqlx.Tx, entity *SyncEntity, oldVersion int64) (conflict bool, deleted bool, err error) { + var idCondition string + if *entity.DataType == HistoryTypeID { + idCondition = "client_defined_unique_tag = :client_defined_unique_tag" + } else { + idCondition = "id = :id" + } + whereClause := " WHERE " + idCondition + " AND chain_id = :chain_id AND deleted = false" + if *entity.DataType != HistoryTypeID { + entity.OldVersion = &oldVersion + whereClause += " AND version = :old_version" + } + + var updateFields []string + if entity.UniquePosition != nil { + updateFields = append(updateFields, "unique_position = :unique_position") + } + if entity.ParentID != nil { + updateFields = append(updateFields, "parent_id = :parent_id") + } + if entity.Name != nil { + updateFields = append(updateFields, "name = :name") + } + if entity.NonUniqueName != nil { + updateFields = append(updateFields, "non_unique_name = :non_unique_name") + } + if entity.Folder != nil { + updateFields = append(updateFields, "folder = :folder") + } + if entity.Deleted != nil { + updateFields = append(updateFields, "deleted = :deleted") + } + + var joinedUpdateFields string + if len(updateFields) > 0 { + joinedUpdateFields = ", " + strings.Join(updateFields, ", ") + } + query := ` + UPDATE entities + SET version = :version, + mtime = :mtime, + specifics = :specifics + ` + joinedUpdateFields + whereClause + + result, err := tx.NamedExec(query, entity) + if err != nil { + return false, false, fmt.Errorf("error updating entity: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return false, false, fmt.Errorf("error getting rows affected after update: %w", err) + } + + return rowsAffected == 0, entity.Deleted != nil && *entity.Deleted, nil +} + +// GetAndLockChainID retrieves and locks a chain ID for a given client ID +func (sqlDB *SQLDB) GetAndLockChainID(tx *sqlx.Tx, clientID string) (chainID *int64, err error) { + // Get chain ID and lock for updates + clientIDBytes, err := hex.DecodeString(clientID) + if err != nil { + clientIDBytes = []byte(clientID) + } + + var id int64 + + err = tx.QueryRowx(` + INSERT INTO chains (client_id, last_usage_time) VALUES ($1, $2) + ON CONFLICT (client_id) + DO UPDATE SET last_usage_time = EXCLUDED.last_usage_time + RETURNING id + `, clientIDBytes, time.Now()).Scan(&id) + if err != nil { + return nil, fmt.Errorf("failed to upsert chain: %w", err) + } + + // Once we have completely migrated over to SQL, we can change this to + // `FOR UPDATE`, and only lock upon commits. We need to lock for updates + // as we will be deleting older Dynamo items during update requests, and migrating + // them over to SQL. If another client in the chain updates during this process, + // the client may not receive some older items. + _, err = tx.Exec(`SELECT id FROM chains WHERE id = $1 FOR SHARE`, id) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock on chain: %w", err) + } + + return &id, nil +} + +// GetUpdatesForType retrieves updates for a specific data type +func (sqlDB *SQLDB) GetUpdatesForType(tx *sqlx.Tx, dataType int, clientToken int64, fetchFolders bool, chainID int64, maxSize int) (hasChangesRemaining bool, entities []SyncEntity, err error) { + var additionalCondition string + if !fetchFolders { + additionalCondition = "AND folder = false " + } + query := `SELECT * FROM entities + WHERE chain_id = $1 AND data_type = $2 AND mtime > $3 ` + additionalCondition + `ORDER BY mtime LIMIT $4` + + if err := tx.Select(&entities, query, chainID, dataType, clientToken, maxSize); err != nil { + return false, nil, fmt.Errorf("failed to get entity updates: %w", err) + } + return len(entities) == maxSize, entities, nil +} + +// DeleteChain removes a chain and its associated data from the database +func (sqlDB *SQLDB) DeleteChain(tx *sqlx.Tx, chainID int64) error { + _, err := tx.Exec(`DELETE FROM chains WHERE id = $1`, chainID) + if err != nil { + return fmt.Errorf("failed to delete chain with cascade: %w", err) + } + return nil +} diff --git a/datastore/sync_entity_sql_test.go b/datastore/sync_entity_sql_test.go new file mode 100644 index 00000000..de10e3de --- /dev/null +++ b/datastore/sync_entity_sql_test.go @@ -0,0 +1,323 @@ +package datastore_test + +import ( + "testing" + "time" + + "github.com/brave/go-sync/datastore" + "github.com/brave/go-sync/datastore/datastoretest" + "github.com/google/uuid" + "github.com/stretchr/testify/suite" +) + +type SyncEntitySQLTestSuite struct { + suite.Suite + sqlDB *datastore.SQLDB +} + +func (suite *SyncEntitySQLTestSuite) SetupSuite() { + var err error + suite.sqlDB, err = datastore.NewSQLDB(true) + suite.Require().NoError(err, "Failed to create SQL database") +} + +func (suite *SyncEntitySQLTestSuite) SetupTest() { + err := datastoretest.ResetSQLTables(suite.sqlDB) + suite.Require().NoError(err, "Failed to reset SQL tables") +} + +func createSyncEntity(dataType int32, mtime int64) datastore.SyncEntity { + id, _ := uuid.NewV7() + return datastore.SyncEntity{ + ID: id.String(), + Version: &[]int64{1}[0], + Ctime: &[]int64{12345678}[0], + Mtime: &mtime, + DataType: &[]int{int(dataType)}[0], + Folder: &[]bool{false}[0], + Deleted: &[]bool{false}[0], + Specifics: []byte{1, 2, 3}, + } +} + +func (suite *SyncEntitySQLTestSuite) TestInsertSyncEntity() { + entity := createSyncEntity(123, 12345678) + entity.ClientDefinedUniqueTag = &[]string{"test1"}[0] + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + entity.ChainID = chainID + + conflict, err := suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().False(conflict, "Insert should not conflict") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") + + // Try to insert the same entity again + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + id, _ := uuid.NewV7() + entity.ID = id.String() + conflict, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + suite.Assert().True(conflict, "Insert should conflict") + + err = tx.Rollback() + suite.Require().NoError(err, "Rollback should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestHasItem() { + entity := createSyncEntity(123, 12345678) + entity.ClientDefinedUniqueTag = &[]string{"test1"}[0] + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + entity.ChainID = chainID + + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + exists, err := suite.sqlDB.HasItem(tx, *chainID, *entity.ClientDefinedUniqueTag) + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().True(exists, "Item should exist") + + exists, err = suite.sqlDB.HasItem(tx, *chainID, "non_existent_tag") + suite.Require().NoError(err, "HasItem should succeed") + suite.Assert().False(exists, "Item should not exist") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestUpdateSyncEntity() { + entity := createSyncEntity(123, 12345678) + entity.Specifics = []byte{1, 2} + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + entity.ChainID = chainID + + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + + // Test normal update + updatedEntity := entity + updatedEntity.Version = &[]int64{2}[0] + updatedEntity.Mtime = &[]int64{23456789}[0] + updatedEntity.Folder = &[]bool{true}[0] + + // Test updating with wrong chain ID + wrongChainEntity := updatedEntity + wrongChainEntity.ChainID = &[]int64{*chainID + 1}[0] + conflict, deleted, err := suite.sqlDB.UpdateSyncEntity(tx, &wrongChainEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update should conflict due to wrong chain ID") + suite.Assert().False(deleted, "Entity should not be deleted") + + // Valid update + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &updatedEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Update should not conflict") + suite.Assert().False(deleted, "Entity should not be deleted") + + *entity.Version = *updatedEntity.Version + + *updatedEntity.Version = 3 + + // Test updating with wrong version + conflictEntity := updatedEntity + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &conflictEntity, 99) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update should conflict due to version mismatch") + suite.Assert().False(deleted, "Entity should not be deleted") + + // Test updating to deleted state + *updatedEntity.Deleted = true + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &updatedEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().False(conflict, "Update should not conflict") + suite.Assert().True(deleted, "Entity should be deleted") + + *entity.Version = *updatedEntity.Version + + // Test updating a deleted entity + *updatedEntity.Version = 4 + *updatedEntity.Deleted = false + conflict, deleted, err = suite.sqlDB.UpdateSyncEntity(tx, &updatedEntity, *entity.Version) + suite.Require().NoError(err, "UpdateSyncEntity should succeed") + suite.Assert().True(conflict, "Update should conflict") + suite.Assert().False(deleted, "Entity should not be deleted") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestGetUpdatesForType() { + entities := []datastore.SyncEntity{ + createSyncEntity(123, 12345678), + createSyncEntity(123, 12345679), + createSyncEntity(123, 12345680), + createSyncEntity(124, 12345680), + } + + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + chainID, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed") + + for i := range entities { + entities[i].ChainID = chainID + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entities[i]}) + suite.Require().NoError(err, "InsertSyncEntity should succeed") + } + + hasChangesRemaining, syncItems, err := suite.sqlDB.GetUpdatesForType(tx, 123, 0, true, *chainID, 100) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().False(hasChangesRemaining, "Should not have changes remaining") + suite.Assert().Equal(entities[:3], syncItems) + + hasChangesRemaining, syncItems, err = suite.sqlDB.GetUpdatesForType(tx, 123, 12345678, true, *chainID, 100) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().False(hasChangesRemaining, "Should not have changes remaining") + suite.Assert().Equal(entities[1:3], syncItems) + + hasChangesRemaining, syncItems, err = suite.sqlDB.GetUpdatesForType(tx, 123, 0, true, *chainID, 2) + suite.Require().NoError(err, "GetUpdatesForType should succeed") + suite.Assert().True(hasChangesRemaining, "Should have changes remaining") + suite.Assert().Equal(entities[:2], syncItems) + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") +} + +func (suite *SyncEntitySQLTestSuite) TestDeleteChain() { + entity1 := createSyncEntity(123, 12345678) + entity2 := createSyncEntity(123, 12345678) + + // Insert data for two chains + tx, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + chainID1, err := suite.sqlDB.GetAndLockChainID(tx, "client1") + suite.Require().NoError(err, "GetAndLockChainID should succeed for client1") + entity1.ChainID = chainID1 + + chainID2, err := suite.sqlDB.GetAndLockChainID(tx, "client2") + suite.Require().NoError(err, "GetAndLockChainID should succeed for client2") + entity2.ChainID = chainID2 + + _, err = suite.sqlDB.InsertSyncEntities(tx, []*datastore.SyncEntity{&entity1, &entity2}) + suite.Require().NoError(err, "InsertSyncEntities should succeed") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") + + // Delete chain for client1 + tx, err = suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction should succeed") + defer tx.Rollback() + + err = suite.sqlDB.DeleteChain(tx, *chainID1) + suite.Require().NoError(err, "DeleteChain should succeed") + + err = tx.Commit() + suite.Require().NoError(err, "Commit should succeed") + + // Verify that the chain and its entities are deleted for client1 + suite.checkChainExistence(*chainID1, false) + + // Verify that data still exists for client2 + suite.checkChainExistence(*chainID2, true) +} + +func (suite *SyncEntitySQLTestSuite) checkChainExistence(chainID int64, shouldExist bool) { + var expectedCount int + var count int + if shouldExist { + expectedCount = 1 + } + err := suite.sqlDB.QueryRow("SELECT COUNT(*) FROM entities WHERE chain_id = $1", chainID).Scan(&count) + suite.Require().NoError(err, "Count query should succeed for entities") + suite.Assert().Equal(expectedCount, count, "Entities for chain should be correct amount") + + err = suite.sqlDB.QueryRow("SELECT COUNT(*) FROM chains WHERE id = $1", chainID).Scan(&count) + suite.Require().NoError(err, "Count query should succeed") + suite.Assert().Equal(expectedCount, count, "Chain entry should be correct amount") +} + +func (suite *SyncEntitySQLTestSuite) TestConcurrentGetAndLockChainID() { + clientID := "testClient" + + // Start first transaction + tx1, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction 1 should succeed") + defer tx1.Rollback() + + // Get and lock chain ID in first transaction + chainID1, err := suite.sqlDB.GetAndLockChainID(tx1, clientID) + suite.Require().NoError(err, "GetAndLockChainID should succeed for tx1") + + // Try to get and lock chain ID in second transaction + // This should block until the first transaction is committed + stepChan := make(chan bool) + go func() { + // Start second transaction + tx2, err := suite.sqlDB.Beginx() + suite.Require().NoError(err, "Begin transaction 2 should succeed") + defer tx2.Rollback() + + stepChan <- true + chainID2, err := suite.sqlDB.GetAndLockChainID(tx2, clientID) + suite.Require().NoError(err, "GetAndLockChainID should succeed for tx2") + suite.Assert().Equal(*chainID1, *chainID2, "Chain IDs should be the same") + + err = tx2.Commit() + suite.Require().NoError(err, "Commit transaction 2 should succeed") + stepChan <- true + }() + + // Wait until second transaction has started + <-stepChan + + select { + case <-stepChan: + suite.FailNow("Second transaction goroutine exited prematurely") + case <-time.After(200 * time.Millisecond): + } + + // Commit the first transaction + err = tx1.Commit() + suite.Require().NoError(err, "Commit transaction 1 should succeed") + + // Wait for the second transaction to complete + select { + case <-stepChan: + // Success, second transaction completed + case <-time.After(5 * time.Second): + suite.Fail("Second transaction did not complete in time") + } + +} + +func TestSyncEntitySQLTestSuite(t *testing.T) { + suite.Run(t, new(SyncEntitySQLTestSuite)) +} diff --git a/datastore/sync_entity_test.go b/datastore/sync_entity_test.go index ec040c8f..eb3e1748 100644 --- a/datastore/sync_entity_test.go +++ b/datastore/sync_entity_test.go @@ -2,40 +2,20 @@ package datastore_test import ( "encoding/json" - "sort" "strconv" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/brave/go-sync/datastore" - "github.com/brave/go-sync/datastore/datastoretest" "github.com/brave/go-sync/schema/protobuf/sync_pb" - "github.com/brave/go-sync/utils" + "github.com/google/uuid" "github.com/stretchr/testify/suite" "google.golang.org/protobuf/proto" ) type SyncEntityTestSuite struct { suite.Suite - dynamo *datastore.Dynamo -} - -func (suite *SyncEntityTestSuite) SetupSuite() { - datastore.Table = "client-entity-test-datastore" - var err error - suite.dynamo, err = datastore.NewDynamo() - suite.Require().NoError(err, "Failed to get dynamoDB session") -} - -func (suite *SyncEntityTestSuite) SetupTest() { - suite.Require().NoError( - datastoretest.ResetTable(suite.dynamo), "Failed to reset table") -} - -func (suite *SyncEntityTestSuite) TearDownTest() { - suite.Require().NoError( - datastoretest.DeleteTable(suite.dynamo), "Failed to delete table") } func (suite *SyncEntityTestSuite) TestNewServerClientUniqueTagItem() { @@ -66,583 +46,6 @@ func (suite *SyncEntityTestSuite) TestNewServerClientUniqueTagItem() { suite.Assert().Equal(expectedClientTag, actualClientTag) } -func (suite *SyncEntityTestSuite) TestInsertSyncEntity() { - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - } - entity2 := entity1 - entity2.ID = "id2" - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity with other ID should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().Error(err, "InsertSyncEntity with the same ClientID and ID should fail") - - // Each InsertSyncEntity without client tag should result in one sync item saved. - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal( - 0, len(tagItems), "Insert without client tag should not insert tag items") - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) - - // Insert entity with client tag should result in one sync item and one tag - // item saved. - entity3 := entity1 - entity3.ID = "id3" - entity3.ClientDefinedUniqueTag = aws.String("tag1") - _, err = suite.dynamo.InsertSyncEntity(&entity3) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - // Insert entity with different tag for same ClientID should succeed. - entity4 := entity3 - entity4.ID = "id4" - entity4.ClientDefinedUniqueTag = aws.String("tag2") - _, err = suite.dynamo.InsertSyncEntity(&entity4) - suite.Require().NoError(err, "InsertSyncEntity with different server tag should succeed") - - // Insert entity with the same client tag and ClientID should fail with conflict. - entity4Copy := entity4 - entity4Copy.ID = "id4_copy" - conflict, err := suite.dynamo.InsertSyncEntity(&entity4Copy) - suite.Require().Error(err, "InsertSyncEntity with the same client tag and ClientID should fail") - suite.Assert().True(conflict, "Return conflict for duplicate client tag") - - // Insert entity with the same client tag for other client should not fail. - entity5 := entity3 - entity5.ClientID = "client2" - entity5.ID = "id5" - _, err = suite.dynamo.InsertSyncEntity(&entity5) - suite.Require().NoError(err, - "InsertSyncEntity with the same client tag for another client should succeed") - - // Check sync items are saved for entity1, entity2, entity3, entity4, entity5. - syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3, entity4, entity5} - sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) - suite.Assert().Equal(syncItems, expectedSyncItems) - - // Check tag items should be saved for entity3, entity4, entity5. - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - - // Check that Ctime and Mtime have been set, reset to zero value for subsequent - // tests - for i := 0; i < len(tagItems); i++ { - suite.Assert().NotNil(tagItems[i].Ctime) - suite.Assert().NotNil(tagItems[i].Mtime) - - tagItems[i].Ctime = nil - tagItems[i].Mtime = nil - } - - suite.Require().NoError(err, "ScanTagItems should succeed") - expectedTagItems := []datastore.ServerClientUniqueTagItem{ - {ClientID: "client1", ID: "Client#tag1"}, - {ClientID: "client1", ID: "Client#tag2"}, - {ClientID: "client2", ID: "Client#tag1"}, - } - sort.Sort(datastore.TagItemByClientIDID(tagItems)) - suite.Assert().Equal(expectedTagItems, tagItems) -} - -func (suite *SyncEntityTestSuite) TestHasServerDefinedUniqueTag() { - // Insert entities with server tags using InsertSyncEntitiesWithServerTags. - tag1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(true), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - ServerDefinedUniqueTag: aws.String("tag1"), - } - tag2 := tag1 - tag2.ClientID = "client2" - tag2.ID = "id2" - tag2.ServerDefinedUniqueTag = aws.String("tag2") - entities := []*datastore.SyncEntity{&tag1, &tag2} - - err := suite.dynamo.InsertSyncEntitiesWithServerTags(entities) - suite.Require().NoError(err, "Insert sync entities should succeed") - - hasTag, err := suite.dynamo.HasServerDefinedUniqueTag("client1", "tag1") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, true) - - hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client1", "tag2") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, false) - - hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag1") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, false) - - hasTag, err = suite.dynamo.HasServerDefinedUniqueTag("client2", "tag2") - suite.Require().NoError(err, "HasServerDefinedUniqueTag should succeed") - suite.Assert().Equal(hasTag, true) -} - -func (suite *SyncEntityTestSuite) TestHasItem() { - // Insert entity which will be checked later - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - entity2 := entity1 - entity2.ClientID = "client2" - entity2.ID = "id2" - - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - hasTag, err := suite.dynamo.HasItem("client1", "id1") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, true) - - hasTag, err = suite.dynamo.HasItem("client2", "id2") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, true) - - hasTag, err = suite.dynamo.HasItem("client2", "id3") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, false) - - hasTag, err = suite.dynamo.HasItem("client3", "id2") - suite.Require().NoError(err, "HasItem should succeed") - suite.Assert().Equal(hasTag, false) -} - -func (suite *SyncEntityTestSuite) TestInsertSyncEntitiesWithServerTags() { - // Insert with same ClientID and server tag would fail. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - ServerDefinedUniqueTag: aws.String("tag1"), - } - entity2 := entity1 - entity2.ID = "id2" - entities := []*datastore.SyncEntity{&entity1, &entity2} - suite.Require().Error( - suite.dynamo.InsertSyncEntitiesWithServerTags(entities), - "Insert with same ClientID and server tag would fail") - - // Check nothing is written to DB when it fails. - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(0, len(syncItems), "No items should be written if fail") - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(0, len(tagItems), "No items should be written if fail") - - entity2.ServerDefinedUniqueTag = aws.String("tag2") - entity3 := entity1 - entity3.ClientID = "client2" - entity3.ID = "id3" - entities = []*datastore.SyncEntity{&entity1, &entity2, &entity3} - suite.Require().NoError( - suite.dynamo.InsertSyncEntitiesWithServerTags(entities), - "InsertSyncEntitiesWithServerTags should succeed") - - // Scan DB and check all items are saved - syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - expectedSyncItems := []datastore.SyncEntity{entity1, entity2, entity3} - sort.Sort(datastore.SyncEntityByClientIDID(syncItems)) - suite.Assert().Equal(syncItems, expectedSyncItems) - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - - // Check that Ctime and Mtime have been set, reset to zero value for subsequent - // tests - for i := 0; i < len(tagItems); i++ { - suite.Assert().NotNil(tagItems[i].Ctime) - suite.Assert().NotNil(tagItems[i].Mtime) - - tagItems[i].Ctime = nil - tagItems[i].Mtime = nil - } - - expectedTagItems := []datastore.ServerClientUniqueTagItem{ - {ClientID: "client1", ID: "Server#tag1"}, - {ClientID: "client1", ID: "Server#tag2"}, - {ClientID: "client2", ID: "Server#tag1"}, - } - sort.Sort(datastore.TagItemByClientIDID(tagItems)) - suite.Assert().Equal(expectedTagItems, tagItems) -} - -func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_Basic() { - // Insert three new items. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - entity2 := entity1 - entity2.ID = "id2" - entity3 := entity1 - entity3.ID = "id3" - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity3) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - // Check sync entities are inserted correctly in DB. - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2, entity3}) - - // Update without optional fields. - updateEntity1 := entity1 - updateEntity1.Version = aws.Int64(23456789) - updateEntity1.Mtime = aws.Int64(23456789) - updateEntity1.Folder = aws.Bool(true) - updateEntity1.Deleted = aws.Bool(true) - updateEntity1.DataTypeMtime = aws.String("123#23456789") - updateEntity1.Specifics = []byte{3, 4} - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Delete operation should return true") - - // Update with optional fields. - updateEntity2 := updateEntity1 - updateEntity2.ID = "id2" - updateEntity2.Deleted = aws.Bool(false) - updateEntity2.Folder = aws.Bool(false) - updateEntity2.UniquePosition = []byte{5, 6} - updateEntity2.ParentID = aws.String("parentID") - updateEntity2.Name = aws.String("name") - updateEntity2.NonUniqueName = aws.String("non_unique_name") - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, *entity2.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") - - // Update with nil Folder and Deleted - updateEntity3 := updateEntity1 - updateEntity3.ID = "id3" - updateEntity3.Folder = nil - updateEntity3.Deleted = nil - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, *entity3.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") - // Reset these back to false because they will be the expected value in DB. - updateEntity3.Folder = aws.Bool(false) - updateEntity3.Deleted = aws.Bool(false) - - // Update entity again with the wrong old version as (version mismatch) - // should return false. - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 12345678) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().True(conflict, "Update with the same version should return conflict") - suite.Assert().False(deleted, "Conflict operation should return false for delete") - - // Check sync entities are updated correctly in DB. - syncItems, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity1, updateEntity2, updateEntity3}) -} - -func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_HistoryType() { - // Insert a history item - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - ClientDefinedUniqueTag: aws.String("client_tag1"), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(963985), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - conflict, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - suite.Assert().False(conflict, "Successful insert should not have conflict") - - updateEntity1 := entity1 - updateEntity1.Version = aws.Int64(2) - updateEntity1.Folder = aws.Bool(true) - updateEntity1.Mtime = aws.Int64(24242424) - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, 1) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") - - // should still succeed with the same version number, - // since the version number should be ignored - updateEntity2 := updateEntity1 - updateEntity2.Mtime = aws.Int64(42424242) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity2, 1) - suite.Require().NoError(err, "UpdateSyncEntity should not return an error") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") - - updateEntity3 := entity1 - updateEntity3.Deleted = aws.Bool(true) - - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity3, 1) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Delete operation should return true") - - syncItems, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{updateEntity3}) -} - -func (suite *SyncEntityTestSuite) TestUpdateSyncEntity_ReuseClientTag() { - // Insert an item with client tag. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - ClientDefinedUniqueTag: aws.String("client_tag"), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - conflict, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - suite.Assert().False(conflict, "Successful insert should not have conflict") - - // Check a tag item is inserted. - tagItems, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") - - // Update it to version 23456789. - updateEntity1 := entity1 - updateEntity1.Version = aws.Int64(23456789) - updateEntity1.Mtime = aws.Int64(23456789) - updateEntity1.Folder = aws.Bool(true) - updateEntity1.DataTypeMtime = aws.String("123#23456789") - updateEntity1.Specifics = []byte{3, 4} - conflict, deleted, err := suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().False(deleted, "Non-delete operation should return false") - - // Soft-delete the item with wrong version should get conflict. - updateEntity1.Deleted = aws.Bool(true) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, *entity1.Version) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().True(conflict, "Version mismatched update should have conflict") - suite.Assert().False(deleted, "Failed delete operation should return false") - - // Soft-delete the item with matched version. - updateEntity1.Version = aws.Int64(34567890) - conflict, deleted, err = suite.dynamo.UpdateSyncEntity(&updateEntity1, 23456789) - suite.Require().NoError(err, "UpdateSyncEntity should succeed") - suite.Assert().False(conflict, "Successful update should not have conflict") - suite.Assert().True(deleted, "Delete operation should return true") - - // Check tag item is deleted. - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(0, len(tagItems), "Tag item should be deleted") - - // Insert another item with the same client tag again. - entity2 := entity1 - entity2.ID = "id2" - conflict, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - suite.Assert().False(conflict, "Successful insert should not have conflict") - - // Check a tag item is inserted. - tagItems, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(1, len(tagItems), "Tag item should be inserted") -} - -func (suite *SyncEntityTestSuite) TestGetUpdatesForType() { - // Insert items for testing. - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(true), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - Specifics: []byte{1, 2}, - } - - entity2 := entity1 - entity2.ID = "id2" - entity2.Folder = aws.Bool(false) - entity2.Mtime = aws.Int64(12345679) - entity2.DataTypeMtime = aws.String("123#12345679") - - entity3 := entity2 - entity3.ID = "id3" - entity3.DataType = aws.Int(124) - entity3.DataTypeMtime = aws.String("124#12345679") - - // non-expired item - entity4 := entity2 - entity4.ClientID = "client2" - entity4.ID = "id4" - entity4.ExpirationTime = aws.Int64(time.Now().Unix() + 300) - - // expired item - entity5 := entity2 - entity5.ClientID = "client2" - entity5.ID = "id5" - entity5.ExpirationTime = aws.Int64(time.Now().Unix() - 300) - - _, err := suite.dynamo.InsertSyncEntity(&entity1) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity2) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity3) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity4) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - _, err = suite.dynamo.InsertSyncEntity(&entity5) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - // Get all updates for type 123 and client1 using token = 0. - hasChangesRemaining, syncItems, err := suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 100) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for type 124 and client1 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, 0, true, "client1", 100) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity3}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for type 123 and client2 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client2", 100) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity4}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for type 124 and client2 using token = 0. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(124, 0, true, "client2", 100) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(len(syncItems), 0) - suite.Assert().False(hasChangesRemaining) - - // Test maxSize will limit the return entries size, and hasChangesRemaining - // should be true when there are more updates available in the DB. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 1) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1}) - suite.Assert().True(hasChangesRemaining) - - // Test when num of query items equal to the limit, hasChangesRemaining should - // be true. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 2) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity1, entity2}) - suite.Assert().True(hasChangesRemaining) - - // Test fetchFolders will remove folder items if false - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, false, "client1", 100) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) - suite.Assert().False(hasChangesRemaining) - - // Get all updates for a type for a client using mtime of one item as token. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 12345678, true, "client1", 100) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, []datastore.SyncEntity{entity2}) - suite.Assert().False(hasChangesRemaining) - - // Test batch is working correctly for over 100 items - err = datastoretest.ResetTable(suite.dynamo) - suite.Require().NoError(err, "Failed to reset table") - - expectedSyncItems := []datastore.SyncEntity{} - entity1 = datastore.SyncEntity{ - ClientID: "client1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - Specifics: []byte{1, 2}, - } - - mtime := utils.UnixMilli(time.Now()) - for i := 1; i <= 250; i++ { - mtime = mtime + 1 - entity := entity1 - entity.ID = "id" + strconv.Itoa(i) - entity.Mtime = aws.Int64(mtime) - entity.DataTypeMtime = aws.String("123#" + strconv.FormatInt(*entity.Mtime, 10)) - _, err := suite.dynamo.InsertSyncEntity(&entity) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - expectedSyncItems = append(expectedSyncItems, entity) - } - - // All items should be returned and sorted by Mtime. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 300) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - sort.Sort(datastore.SyncEntityByMtime(expectedSyncItems)) - suite.Assert().Equal(syncItems, expectedSyncItems) - suite.Assert().False(hasChangesRemaining) - - // Test that when maxGUBatchSize is smaller than total updates, the first n - // items ordered by Mtime should be returned. - hasChangesRemaining, syncItems, err = suite.dynamo.GetUpdatesForType(123, 0, true, "client1", 200) - suite.Require().NoError(err, "GetUpdatesForType should succeed") - suite.Assert().Equal(syncItems, expectedSyncItems[0:200]) - suite.Assert().True(hasChangesRemaining) -} - func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { nigoriSpecific := &sync_pb.NigoriSpecifics{} nigoriEntitySpecific := &sync_pb.EntitySpecifics_Nigori{Nigori: nigoriSpecific} @@ -670,8 +73,10 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { Specifics: specifics, UniquePosition: uniquePosition, } + var expectedChainID int64 = 1 expectedDBEntity := datastore.SyncEntity{ ClientID: "client1", + ChainID: &expectedChainID, ParentID: pbEntity.ParentIdString, Version: pbEntity.Version, Name: pbEntity.Name, @@ -688,13 +93,16 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { ExpirationTime: nil, } - dbEntity, err := datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err := datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") // Check ID is replaced with a server-generated ID. suite.Assert().NotEqual( dbEntity.ID, *pbEntity.IdString, "ID should be a server-generated ID and not equal to the passed IdString") + _, err = uuid.Parse(dbEntity.ID) + suite.Assert().NoError(err, "dbEntity.ID should be a valid UUID") + expectedDBEntity.ID = dbEntity.ID // Check Mtime and Ctime should be provided by the server if client does not @@ -714,7 +122,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { pbEntity.Deleted = nil pbEntity.Folder = nil - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().False(*dbEntity.Deleted, "Default value should be set for Deleted for new entities") suite.Assert().False(*dbEntity.Folder, "Default value should be set for Deleted for new entities") @@ -723,14 +131,14 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // Check the case when Ctime and Mtime are provided by the client. pbEntity.Ctime = aws.Int64(12345678) pbEntity.Mtime = aws.Int64(12345678) - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().Equal(*dbEntity.Ctime, *pbEntity.Ctime, "Client's Ctime should be respected") suite.Assert().NotEqual(*dbEntity.Mtime, *pbEntity.Mtime, "Client's Mtime should be replaced") suite.Assert().Nil(dbEntity.ExpirationTime) // When cacheGUID is nil, ID should be kept and no originator info are filled. - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, nil, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, nil, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().Equal(dbEntity.ID, *pbEntity.IdString) suite.Assert().Nil(dbEntity.OriginatorCacheGUID) @@ -740,7 +148,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // Check that when updating from a previous version with guid, ID will not be // replaced. pbEntity.Version = aws.Int64(1) - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err, "CreateDBSyncEntity should succeed") suite.Assert().Equal(dbEntity.ID, *pbEntity.IdString) suite.Assert().Nil(dbEntity.Deleted, "Deleted won't apply its default value for updated entities") @@ -749,7 +157,7 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // Empty unique position should be marshalled to nil without error. pbEntity.UniquePosition = nil - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err) suite.Assert().Nil(dbEntity.UniquePosition) suite.Assert().Nil(dbEntity.ExpirationTime) @@ -758,16 +166,15 @@ func (suite *SyncEntityTestSuite) TestCreateDBSyncEntity() { // and an expiration time. historyEntitySpecific := &sync_pb.EntitySpecifics_History{} pbEntity.Specifics = &sync_pb.EntitySpecifics{SpecificsVariant: historyEntitySpecific} - dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + dbEntity, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Require().NoError(err) - suite.Assert().Equal(dbEntity.ID, "client_tag") expectedExpirationTime := time.Now().Unix() + datastore.HistoryExpirationIntervalSecs suite.Assert().Greater(*dbEntity.ExpirationTime+2, expectedExpirationTime) suite.Assert().Less(*dbEntity.ExpirationTime-2, expectedExpirationTime) // Empty specifics should report marshal error. pbEntity.Specifics = nil - _, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1") + _, err = datastore.CreateDBSyncEntity(&pbEntity, guid, "client1", 1) suite.Assert().NotNil(err.Error(), "empty specifics should fail") } @@ -784,9 +191,11 @@ func (suite *SyncEntityTestSuite) TestCreatePBSyncEntity() { uniquePositionBytes, err := proto.Marshal(uniquePosition) suite.Require().NoError(err, "Marshal unique position should succeed") + id, _ := uuid.NewV7() + dbEntity := datastore.SyncEntity{ ClientID: "client1", - ID: "id1", + ID: id.String(), ParentID: aws.String("parent_id"), Version: aws.Int64(10), Mtime: aws.Int64(12345678), @@ -832,6 +241,19 @@ func (suite *SyncEntityTestSuite) TestCreatePBSyncEntity() { suite.Require().NoError(err, "json.Marshal should succeed") suite.Assert().Equal(s1, s2) + // Ensure ID is the client tag for history items + expectedPBEntity.IdString = expectedPBEntity.ClientTagHash + *dbEntity.DataType = datastore.HistoryTypeID + pbEntity, err = datastore.CreatePBSyncEntity(&dbEntity) + suite.Require().NoError(err, "CreatePBSyncEntity should succeed") + + // Marshal to json to ignore protobuf internal fields when checking equality. + s1, err = json.Marshal(pbEntity) + suite.Require().NoError(err, "json.Marshal should succeed") + s2, err = json.Marshal(&expectedPBEntity) + suite.Require().NoError(err, "json.Marshal should succeed") + suite.Assert().Equal(s1, s2) + // Nil UniquePosition should be unmarshalled as nil without error. dbEntity.UniquePosition = nil pbEntity, err = datastore.CreatePBSyncEntity(&dbEntity) @@ -845,102 +267,6 @@ func (suite *SyncEntityTestSuite) TestCreatePBSyncEntity() { suite.Assert().Nil(pbEntity.Specifics) } -func (suite *SyncEntityTestSuite) TestDisableSyncChain() { - clientID := "client1" - id := "disabled_chain" - err := suite.dynamo.DisableSyncChain(clientID) - suite.Require().NoError(err, "DisableSyncChain should succeed") - e, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(1, len(e)) - suite.Assert().Equal(clientID, e[0].ClientID) - suite.Assert().Equal(id, e[0].ID) -} - -func (suite *SyncEntityTestSuite) TestIsSyncChainDisabled() { - clientID := "client1" - - disabled, err := suite.dynamo.IsSyncChainDisabled(clientID) - suite.Require().NoError(err, "IsSyncChainDisabled should succeed") - suite.Assert().Equal(false, disabled) - - err = suite.dynamo.DisableSyncChain(clientID) - suite.Require().NoError(err, "DisableSyncChain should succeed") - disabled, err = suite.dynamo.IsSyncChainDisabled(clientID) - suite.Require().NoError(err, "IsSyncChainDisabled should succeed") - suite.Assert().Equal(true, disabled) -} - -func (suite *SyncEntityTestSuite) TestClearServerData() { - // Test clear sync entities - entity := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - } - _, err := suite.dynamo.InsertSyncEntity(&entity) - suite.Require().NoError(err, "InsertSyncEntity should succeed") - - e, err := datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(1, len(e)) - - e, err = suite.dynamo.ClearServerData(entity.ClientID) - suite.Require().NoError(err, "ClearServerData should succeed") - suite.Assert().Equal(1, len(e)) - - e, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(0, len(e)) - - // Test clear tagged items - entity1 := datastore.SyncEntity{ - ClientID: "client1", - ID: "id1", - Version: aws.Int64(1), - Ctime: aws.Int64(12345678), - Mtime: aws.Int64(12345678), - DataType: aws.Int(123), - Folder: aws.Bool(false), - Deleted: aws.Bool(false), - DataTypeMtime: aws.String("123#12345678"), - ServerDefinedUniqueTag: aws.String("tag1"), - } - entity2 := entity1 - entity2.ID = "id2" - entity2.ServerDefinedUniqueTag = aws.String("tag2") - entities := []*datastore.SyncEntity{&entity1, &entity2} - suite.Require().NoError( - suite.dynamo.InsertSyncEntitiesWithServerTags(entities), - "InsertSyncEntitiesWithServerTags should succeed") - - e, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(2, len(e), "No items should be written if fail") - - t, err := datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(2, len(t), "No items should be written if fail") - - e, err = suite.dynamo.ClearServerData(entity.ClientID) - suite.Require().NoError(err, "ClearServerData should succeed") - suite.Assert().Equal(4, len(e)) - - e, err = datastoretest.ScanSyncEntities(suite.dynamo) - suite.Require().NoError(err, "ScanSyncEntities should succeed") - suite.Assert().Equal(0, len(e), "No items should be written if fail") - - t, err = datastoretest.ScanTagItems(suite.dynamo) - suite.Require().NoError(err, "ScanTagItems should succeed") - suite.Assert().Equal(0, len(t), "No items should be written if fail") -} - func TestSyncEntityTestSuite(t *testing.T) { suite.Run(t, new(SyncEntityTestSuite)) } diff --git a/docker-compose.yml b/docker-compose.yml index 5a7b68d3..237de6a5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -16,6 +16,7 @@ services: depends_on: - dynamo-local - redis + - postgres networks: - sync environment: @@ -28,6 +29,8 @@ services: - AWS_REGION=us-west-2 - AWS_ENDPOINT=http://dynamo-local:8000 - REDIS_URL=redis:6379 + - SQL_DATABASE_URL=postgres://sync:password@postgres/postgres?sslmode=disable + - SQL_TEST_DATABASE_URL=postgres://sync:password@postgres/testing?sslmode=disable web: build: context: . @@ -54,6 +57,8 @@ services: - AWS_ENDPOINT=http://dynamo-local:8000 - TABLE_NAME=client-entity-dev - REDIS_URL=redis:6379 + - SQL_DATABASE_URL=postgres://sync:password@postgres/postgres?sslmode=disable + - SQL_TEST_DATABASE_URL=postgres://sync:password@postgres/testing?sslmode=disable dynamo-local: build: context: . @@ -70,3 +75,16 @@ services: - ALLOW_EMPTY_PASSWORD=yes networks: - sync + postgres: + build: + dockerfile: pg.Dockerfile + ports: + - "5434:5432" + environment: + - POSTGRES_USER=sync + - POSTGRES_PASSWORD=password + networks: + - sync + command: ["postgres", "-c", "shared_preload_libraries=pg_cron"] + volumes: + - "./misc/create_additional_dbs.sql:/docker-entrypoint-initdb.d/create_additional_dbs.sql" diff --git a/go.mod b/go.mod index 7a991df2..f830bc6c 100644 --- a/go.mod +++ b/go.mod @@ -1,45 +1,79 @@ module github.com/brave/go-sync -go 1.22 +go 1.22.0 + +toolchain go1.23.0 require ( github.com/aws/aws-sdk-go v1.55.5 - github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae github.com/getsentry/sentry-go v0.28.1 github.com/go-chi/chi/v5 v5.0.12 github.com/prometheus/client_golang v1.19.0 github.com/redis/go-redis/v9 v9.5.1 github.com/rs/zerolog v1.32.0 - github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.9.0 - google.golang.org/protobuf v1.34.1 + google.golang.org/protobuf v1.34.2 +) + +require ( + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/satori/go.uuid v1.2.0 // indirect + go.uber.org/atomic v1.10.0 // indirect ) require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect + github.com/golang-migrate/migrate/v4 v4.18.1 // indirect github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx v3.6.2+incompatible // indirect + github.com/jackc/pgx/v5 v5.7.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/joho/godotenv v1.5.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/xid v1.5.0 // indirect github.com/shengdoushi/base58 v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/throttled/throttled v2.2.5+incompatible // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f63ee4ed..f5961c49 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,10 @@ +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.23.0 h1:+lwAJYjvvdIVg6doFHuotFjueJ/7KY10xo/vm3X3Scw= @@ -24,23 +31,61 @@ github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbV github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= +github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= +github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.21 h1:wRH9E07mfYqZ1EPphNTUIkrZ/7wcbZAGcjhrBlkWy4c= +github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.21/go.mod h1:6m/MDzT+aFxaIo46f2MYV4d+qG9J9keLlHL0qKnQFgA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae h1:CGUFAtMXAsGajLeobq6ep+5wREYS+lepZSdPckY+Ba0= github.com/brave-intl/bat-go/libs v0.0.0-20231020145457-cc9860c87bae/go.mod h1:sUyKgpr9uxg0SARewNEkNMStvBjOeWuWoLchHgyONGA= +github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e h1:wWsx4axnKnJ2i6HM4m+1etOu+fz68VDjm1dMgnD3+b0= +github.com/brave-intl/bat-go/libs v0.0.0-20240909083638-be56e4a5398e/go.mod h1:8QVK0ZrPIiemLAHAvgGYY+Xf3QXYclAHfnuiHHFxlK8= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/getsentry/sentry-go v0.28.0 h1:7Rqx9M3ythTKy2J6uZLHmc8Sz9OGgIlseuO1iBX/s0M= @@ -55,33 +100,98 @@ github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= +github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= +github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= +github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -100,33 +210,56 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shengdoushi/base58 v1.0.0 h1:tGe4o6TmdXFJWoI31VoSWvuaKxf0Px3gqa3sUWhAxBs= github.com/shengdoushi/base58 v1.0.0/go.mod h1:m5uIILfzcKMw6238iWAhP4l3s5+uXyF3+bJKUNhAL9I= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/throttled/throttled v2.2.5+incompatible h1:65UB52X0qNTYiT0Sohp8qLYVFwZQPDw85uSa65OljjQ= github.com/throttled/throttled v2.2.5+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -136,19 +269,45 @@ golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= diff --git a/main.go b/main.go index dc289123..3a353527 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,7 @@ package main import ( "github.com/brave/go-sync/server" + _ "github.com/joho/godotenv/autoload" ) func main() { diff --git a/middleware/disabled_chain.go b/middleware/disabled_chain.go index 282e5382..4a52fa3b 100644 --- a/middleware/disabled_chain.go +++ b/middleware/disabled_chain.go @@ -21,7 +21,7 @@ func DisabledChain(next http.Handler) http.Handler { return } - db, ok := ctx.Value(syncContext.ContextKeyDatastore).(datastore.Datastore) + db, ok := ctx.Value(syncContext.ContextKeyDatastore).(datastore.DynamoDatastore) if !ok { http.Error(w, "unable to complete request", http.StatusInternalServerError) return diff --git a/middleware/middleware_test.go b/middleware/middleware_test.go index 21ebd8e4..8bc8f8cb 100644 --- a/middleware/middleware_test.go +++ b/middleware/middleware_test.go @@ -4,15 +4,18 @@ import ( "bytes" "context" "fmt" + "net/http" "net/http/httptest" "testing" + "time" "github.com/brave/go-sync/auth/authtest" syncContext "github.com/brave/go-sync/context" "github.com/brave/go-sync/datastore/datastoretest" "github.com/brave/go-sync/middleware" + "github.com/brave/go-sync/utils" "github.com/stretchr/testify/suite" ) diff --git a/misc/create_additional_dbs.sql b/misc/create_additional_dbs.sql new file mode 100644 index 00000000..da88a0d9 --- /dev/null +++ b/misc/create_additional_dbs.sql @@ -0,0 +1,2 @@ +CREATE DATABASE testing; +GRANT ALL PRIVILEGES ON DATABASE testing TO sync; diff --git a/pg.Dockerfile b/pg.Dockerfile new file mode 100644 index 00000000..0a7d3475 --- /dev/null +++ b/pg.Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/docker/library/postgres:16 + +RUN apt update && apt install -y git make gcc postgresql-server-dev-16 + +RUN git clone https://github.com/pgpartman/pg_partman +RUN cd pg_partman && make NO_BGW=1 install + +RUN git clone https://github.com/citusdata/pg_cron +RUN cd pg_cron && make && make install diff --git a/server/rollout.go b/server/rollout.go new file mode 100644 index 00000000..7606cc1c --- /dev/null +++ b/server/rollout.go @@ -0,0 +1,45 @@ +package server + +import ( + "context" + "os" + + "github.com/brave/go-sync/cache" + "github.com/brave/go-sync/datastore" + "github.com/rs/zerolog/log" +) + +const ( + lastRolloutStateCacheKey string = "last-rollout-state" + rolloutConfirmChannelKey string = "rollout-confirm" + sqlDisableRolloutConfirm string = "SQL_DISABLE_ROLLOUT_CONFIRM" +) + +func maybeWaitOnRolloutConfigChange(sqlVariations *datastore.SQLVariations, cache *cache.Cache) { + currentDigest := sqlVariations.GetStateDigest() + + lastDigest, err := cache.Get(context.Background(), lastRolloutStateCacheKey, false) + if err != nil { + log.Fatal().Msgf("failed to get last rollout state: %v", err) + return + } + + rolloutConfirmDisabled := os.Getenv(sqlDisableRolloutConfirm) != "" + if !rolloutConfirmDisabled && currentDigest != lastDigest { + log.Info().Msg("Rollout configuration detected. Commits/writes disabled until Redis confirmation event is received...") + err = cache.SubscribeAndWait(context.Background(), rolloutConfirmChannelKey) + if err != nil { + log.Fatal().Msgf("failed to subscribe and wait for rollout confirmation: %v", err) + return + } + + err = cache.Set(context.Background(), lastRolloutStateCacheKey, currentDigest, 0) + if err != nil { + log.Fatal().Msgf("failed to update last rollout state: %v", err) + return + } + log.Info().Msg("Confirmation event received") + } + + sqlVariations.Ready = true +} diff --git a/server/server.go b/server/server.go index 67d854e0..46107bc3 100644 --- a/server/server.go +++ b/server/server.go @@ -44,7 +44,7 @@ func setupLogger(ctx context.Context) (context.Context, *zerolog.Logger) { return logging.SetupLogger(ctx) } -func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, *chi.Mux) { +func setupRouter(ctx context.Context, logger *zerolog.Logger, isTesting bool) (context.Context, *chi.Mux) { r := chi.NewRouter() r.Use(chiware.RequestID) @@ -63,22 +63,30 @@ func setupRouter(ctx context.Context, logger *zerolog.Logger) (context.Context, r.Use(batware.BearerToken) r.Use(middleware.CommonResponseHeaders) - db, err := datastore.NewDynamo() + dynamoDB, err := datastore.NewDynamo(isTesting) if err != nil { sentry.CaptureException(err) - log.Panic().Err(err).Msg("Must be able to init datastore to start") + log.Panic().Err(err).Msg("Must be able to init Dynamo datastore to start") + } + + sqlDB, err := datastore.NewSQLDB(isTesting) + if err != nil { + sentry.CaptureException(err) + log.Panic().Err(err).Msg("Must be able to init SQL datastore to start") } redis := cache.NewRedisClient() cache := cache.NewCache(cache.NewRedisClientWithPrometheus(redis, "redis")) + go maybeWaitOnRolloutConfigChange(sqlDB.Variations(), cache) + // Provide datastore & cache via context - ctx = context.WithValue(ctx, syncContext.ContextKeyDatastore, db) + ctx = context.WithValue(ctx, syncContext.ContextKeyDatastore, dynamoDB) ctx = context.WithValue(ctx, syncContext.ContextKeyCache, &cache) r.Mount("/v2", controller.SyncRouter( cache, - datastore.NewDatastoreWithPrometheus(db, "dynamo"))) + datastore.NewDynamoDatastoreWithPrometheus(dynamoDB, "dynamo"), datastore.NewSQLDatastoreWithPrometheus(sqlDB, "sql"))) r.Get("/metrics", batware.Metrics()) log.Info(). @@ -123,7 +131,7 @@ func StartServer() { subLog := logger.Info().Str("prefix", "main") subLog.Msg("Starting server") - serverCtx, r := setupRouter(serverCtx, logger) + serverCtx, r := setupRouter(serverCtx, logger, false) port := ":8295" srv := http.Server{ diff --git a/server/server_test.go b/server/server_test.go index 22f4ad56..82f24acf 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -19,7 +19,7 @@ var ( func init() { testCtx, logger := server.SetupLogger(context.Background()) - serverCtx, mux = server.SetupRouter(testCtx, logger) + serverCtx, mux = server.SetupRouter(testCtx, logger, true) } func TestPing(t *testing.T) {