From 752b66e19780a559800d00020abc2384bf533328 Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Thu, 5 Dec 2024 19:21:02 +0200 Subject: [PATCH 01/10] Add changes from p2p branch on top ov dev branch. --- ...compactions.yaml => push-p2p-rewrite.yaml} | 2 +- go.mod | 1 + main.go | 32 +- processor/processor.go | 20 +- processor/sync.go | 685 ++++++++++++++++++ processor/sync_test.go | 299 ++++++++ protobuff/sync.pb.go | 657 +++++++++++++++++ protobuff/sync.pb.gw.go | 272 +++++++ protobuff/sync.proto | 60 ++ protobuff/sync_grpc.pb.go | 239 ++++++ rpc/rpc_server.go | 43 +- rpc/sync.go | 177 +++++ store/keys.go | 40 + store/store.go | 62 +- utils/utils.go | 2 + validator/chain/chain.go | 8 +- validator/computors/models.go | 2 +- validator/computors/models_test.go | 2 +- validator/computors/validator.go | 13 +- validator/quorum/models.go | 224 +++++- validator/quorum/models_test.go | 2 +- validator/quorum/validator.go | 4 +- validator/sync.go | 235 ++++++ validator/tick/empty_tick.go | 8 +- validator/tick/full_tick_data.go | 102 +++ validator/tick/models.go | 145 +++- validator/tick/models_test.go | 4 +- validator/tick/validator.go | 2 +- validator/tx/models.go | 48 +- validator/tx/models_test.go | 2 +- validator/tx/validator.go | 2 +- validator/validator.go | 46 +- 32 files changed, 3373 insertions(+), 67 deletions(-) rename .github/workflows/{push-db-compactions.yaml => push-p2p-rewrite.yaml} (96%) create mode 100644 processor/sync.go create mode 100644 processor/sync_test.go create mode 100644 protobuff/sync.pb.go create mode 100644 protobuff/sync.pb.gw.go create mode 100644 protobuff/sync.proto create mode 100644 protobuff/sync_grpc.pb.go create mode 100644 rpc/sync.go create mode 100644 validator/sync.go create mode 100644 validator/tick/full_tick_data.go diff --git a/.github/workflows/push-db-compactions.yaml b/.github/workflows/push-p2p-rewrite.yaml similarity index 96% rename from .github/workflows/push-db-compactions.yaml rename to .github/workflows/push-p2p-rewrite.yaml index 1cae876..5325bdd 100644 --- a/.github/workflows/push-db-compactions.yaml +++ b/.github/workflows/push-p2p-rewrite.yaml @@ -3,7 +3,7 @@ name: Deploy dev images to GHCR on: push: branches: - - 'db-compactions' + - 'p2p-rewrite' jobs: push-store-image: diff --git a/go.mod b/go.mod index a0bad35..ad7a83a 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/cockroachdb/pebble v1.1.2 github.com/google/go-cmp v0.6.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 + github.com/pingcap/errors v0.11.4 github.com/pkg/errors v0.9.1 github.com/qubic/go-node-connector v0.10.2 github.com/qubic/go-schnorrq v1.0.1 diff --git a/main.go b/main.go index aae855d..29e0e30 100644 --- a/main.go +++ b/main.go @@ -47,12 +47,23 @@ func run() error { } Qubic struct { NodePort string `conf:"default:21841"` - StorageFolder string `conf:"default:store"` + StorageFolder string `conf:"default:storage"` ProcessTickTimeout time.Duration `conf:"default:5s"` } Store struct { ResetEmptyTickKeys bool `conf:"default:false"` } + Sync struct { + Enable bool `conf:"default:false"` + Source string `conf:"default:localhost:8001"` + ResponseTimeout time.Duration `conf:"default:5s"` + EnableCompression bool `conf:"default:true"` + } + Bootstrap struct { + Enable bool `conf:"default:true"` + MaxRequestedItems int `conf:"default:100"` + BatchSize int `conf:"default:10"` + } } if err := conf.Parse(os.Args[1:], prefix, &cfg); err != nil { @@ -145,7 +156,7 @@ func run() error { } } - err = tick.CalculateEmptyTicksForAllEpochs(ps) + err = tick.CalculateEmptyTicksForAllEpochs(ps, false) if err != nil { return errors.Wrap(err, "calculating empty ticks for all epochs") } @@ -163,7 +174,13 @@ func run() error { return errors.Wrap(err, "creating qubic pool") } - rpcServer := rpc.NewServer(cfg.Server.GrpcHost, cfg.Server.HttpHost, cfg.Server.NodeSyncThreshold, cfg.Server.ChainTickFetchUrl, ps, p) + bootstrapConfiguration := rpc.BootstrapConfiguration{ + Enable: cfg.Bootstrap.Enable, + MaximumRequestedItems: cfg.Bootstrap.MaxRequestedItems, + BatchSize: cfg.Bootstrap.BatchSize, + } + + rpcServer := rpc.NewServer(cfg.Server.GrpcHost, cfg.Server.HttpHost, cfg.Server.NodeSyncThreshold, cfg.Server.ChainTickFetchUrl, ps, p, bootstrapConfiguration) err = rpcServer.Start() if err != nil { return errors.Wrap(err, "starting rpc server") @@ -172,7 +189,14 @@ func run() error { shutdown := make(chan os.Signal, 1) signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM) - proc := processor.NewProcessor(p, ps, cfg.Qubic.ProcessTickTimeout) + syncConfiguration := processor.SyncConfiguration{ + Enable: cfg.Sync.Enable, + Source: cfg.Sync.Source, + ResponseTimeout: cfg.Sync.ResponseTimeout, + EnableCompression: cfg.Sync.EnableCompression, + } + + proc := processor.NewProcessor(p, ps, cfg.Qubic.ProcessTickTimeout, syncConfiguration) procErrors := make(chan error, 1) // Start the service listening for requests. diff --git a/processor/processor.go b/processor/processor.go index fbd589f..e12e44b 100644 --- a/processor/processor.go +++ b/processor/processor.go @@ -25,21 +25,39 @@ func (e *TickInTheFutureError) Error() string { return errors.Errorf("Requested tick %d is in the future. Latest tick is: %d", e.requestedTick, e.latestTick).Error() } +type SyncConfiguration struct { + Enable bool + Source string + ResponseTimeout time.Duration + EnableCompression bool +} + type Processor struct { pool *qubic.Pool ps *store.PebbleStore processTickTimeout time.Duration + SyncConfiguration SyncConfiguration } -func NewProcessor(p *qubic.Pool, ps *store.PebbleStore, processTickTimeout time.Duration) *Processor { +func NewProcessor(p *qubic.Pool, ps *store.PebbleStore, processTickTimeout time.Duration, syncConfiguration SyncConfiguration) *Processor { return &Processor{ pool: p, ps: ps, processTickTimeout: processTickTimeout, + SyncConfiguration: syncConfiguration, } } func (p *Processor) Start() error { + + if p.SyncConfiguration.Enable { + syncProcessor := NewSyncProcessor(p.SyncConfiguration, p.ps, p.processTickTimeout) + err := syncProcessor.Start() + if err != nil { + return errors.Wrap(err, "performing synchronization") + } + } + for { err := p.processOneByOne() if err != nil { diff --git a/processor/sync.go b/processor/sync.go new file mode 100644 index 0000000..d7164c9 --- /dev/null +++ b/processor/sync.go @@ -0,0 +1,685 @@ +package processor + +import ( + "cmp" + "context" + "encoding/binary" + "fmt" + "github.com/cockroachdb/pebble" + "github.com/pkg/errors" + "github.com/qubic/go-archiver/protobuff" + "github.com/qubic/go-archiver/store" + "github.com/qubic/go-archiver/utils" + "github.com/qubic/go-archiver/validator" + "github.com/qubic/go-archiver/validator/computors" + "github.com/qubic/go-archiver/validator/tick" + "github.com/qubic/go-node-connector/types" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/protobuf/proto" + "io" + "log" + "runtime" + "slices" + "sync" + "time" +) + +type SyncProcessor struct { + syncConfiguration SyncConfiguration + syncServiceClient protobuff.SyncServiceClient + pebbleStore *store.PebbleStore + syncDelta SyncDelta + processTickTimeout time.Duration + maxObjectRequest uint32 + lastSynchronizedTick *protobuff.SyncLastSynchronizedTick +} + +func NewSyncProcessor(syncConfiguration SyncConfiguration, pebbleStore *store.PebbleStore, processTickTimeout time.Duration) *SyncProcessor { + return &SyncProcessor{ + syncConfiguration: syncConfiguration, + pebbleStore: pebbleStore, + processTickTimeout: processTickTimeout, + } +} + +func (sp *SyncProcessor) Start() error { + + log.Printf("Connecting to bootstrap node %s...", sp.syncConfiguration.Source) + + grpcConnection, err := grpc.NewClient(sp.syncConfiguration.Source, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return errors.Wrap(err, "creating grpc connection to bootstrap") + } + defer grpcConnection.Close() + + syncServiceClient := protobuff.NewSyncServiceClient(grpcConnection) + sp.syncServiceClient = syncServiceClient + + log.Println("Fetching bootstrap metadata...") + bootstrapMetadata, err := sp.getBootstrapMetadata() + if err != nil { + return err + } + + sp.maxObjectRequest = uint32(bootstrapMetadata.MaxObjectRequest) + + clientMetadata, err := sp.getClientMetadata() + if err != nil { + return errors.Wrap(err, "getting client metadata") + } + + lastSynchronizedTick, err := sp.pebbleStore.GetSyncLastSynchronizedTick() + if err != nil { + log.Printf("Error fetching last synchronized tick from disk: %v\n", err) + } + + sp.lastSynchronizedTick = lastSynchronizedTick + + log.Println("Calculating synchronization delta...") + syncDelta, err := sp.CalculateSyncDelta(bootstrapMetadata, clientMetadata, lastSynchronizedTick) + if err != nil { + return errors.Wrap(err, "calculating sync delta") + } + + if len(syncDelta) == 0 { + log.Println("Nothing to synchronize, resuming to processing network ticks.") + return nil + } + + log.Println("Synchronizing missing epoch information...") + err = sp.syncEpochInfo(syncDelta, bootstrapMetadata) + if err != nil { + return errors.Wrap(err, "syncing epoch info") + } + + sp.syncDelta = syncDelta + + log.Println("Starting tick synchronization") + err = sp.synchronize() + if err != nil { + return errors.Wrap(err, "performing synchronization") + } + + return nil +} + +func (sp *SyncProcessor) getBootstrapMetadata() (*protobuff.SyncMetadataResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), sp.syncConfiguration.ResponseTimeout) + defer cancel() + + metadata, err := sp.syncServiceClient.SyncGetBootstrapMetadata(ctx, nil) + if err != nil { + return nil, errors.Wrap(err, "getting bootstrap metadata") + } + + return metadata, nil +} + +func (sp *SyncProcessor) getClientMetadata() (*protobuff.SyncMetadataResponse, error) { + + processedTickIntervals, err := sp.pebbleStore.GetProcessedTickIntervals(nil) + if err != nil { + return nil, errors.Wrap(err, "getting processed tick intervals") + } + + return &protobuff.SyncMetadataResponse{ + ArchiverVersion: utils.ArchiverVersion, + ProcessedTickIntervals: processedTickIntervals, + }, nil +} + +type EpochDelta struct { + Epoch uint32 + ProcessedIntervals []*protobuff.ProcessedTickInterval +} + +type SyncDelta []EpochDelta + +func areIntervalsEqual(a, b []*protobuff.ProcessedTickInterval) bool { + if len(a) != len(b) { + return false + } + + for index := 0; index < len(a); index++ { + + if !proto.Equal(a[index], b[index]) { + return false + } + } + return true +} + +func (sp *SyncProcessor) CalculateSyncDelta(bootstrapMetadata, clientMetadata *protobuff.SyncMetadataResponse, lastSynchronizedTick *protobuff.SyncLastSynchronizedTick) (SyncDelta, error) { + + if bootstrapMetadata.ArchiverVersion != clientMetadata.ArchiverVersion { + return nil, errors.New(fmt.Sprintf("client version (%s) does not match bootstrap version (%s)", clientMetadata.ArchiverVersion, bootstrapMetadata.ArchiverVersion)) + } + + bootstrapProcessedTicks := make(map[uint32][]*protobuff.ProcessedTickInterval) + clientProcessedTicks := make(map[uint32][]*protobuff.ProcessedTickInterval) + + for _, epochIntervals := range bootstrapMetadata.ProcessedTickIntervals { + bootstrapProcessedTicks[epochIntervals.Epoch] = epochIntervals.Intervals + } + + for _, epochIntervals := range clientMetadata.ProcessedTickIntervals { + clientProcessedTicks[epochIntervals.Epoch] = epochIntervals.Intervals + } + + var syncDelta SyncDelta + + for epoch, processedIntervals := range bootstrapProcessedTicks { + + if lastSynchronizedTick != nil && lastSynchronizedTick.Epoch == epoch { + + var intervals []*protobuff.ProcessedTickInterval + + foundIncompleteInterval := false + + for _, interval := range processedIntervals { + if !foundIncompleteInterval && lastSynchronizedTick.TickNumber >= interval.InitialProcessedTick && lastSynchronizedTick.TickNumber <= interval.LastProcessedTick { + intervals = append(intervals, &protobuff.ProcessedTickInterval{ + InitialProcessedTick: lastSynchronizedTick.TickNumber, + LastProcessedTick: interval.LastProcessedTick, + }) + foundIncompleteInterval = true + continue + } + intervals = append(intervals, interval) + } + syncDelta = append(syncDelta, EpochDelta{ + Epoch: epoch, + ProcessedIntervals: intervals, + }) + continue + } + + clientProcessedIntervals, exists := clientProcessedTicks[epoch] + + if !exists || !areIntervalsEqual(processedIntervals, clientProcessedIntervals) { + epochDelta := EpochDelta{ + Epoch: epoch, + ProcessedIntervals: processedIntervals, + } + syncDelta = append(syncDelta, epochDelta) + } + + } + + slices.SortFunc(syncDelta, func(a, b EpochDelta) int { + return cmp.Compare(a.Epoch, b.Epoch) + }) + + return syncDelta, nil +} + +func (sp *SyncProcessor) storeEpochInfo(response *protobuff.SyncEpochInfoResponse) error { + + for _, epoch := range response.Epochs { + err := sp.pebbleStore.SetComputors(context.Background(), epoch.ComputorList.Epoch, epoch.ComputorList) + if err != nil { + return errors.Wrapf(err, "storing computor list for epoch %d", epoch.ComputorList.Epoch) + } + + err = sp.pebbleStore.SetLastTickQuorumDataPerEpochIntervals(epoch.ComputorList.Epoch, epoch.LastTickQuorumDataPerIntervals) + if err != nil { + return errors.Wrapf(err, "storing last tick quorum data for epoch %d", epoch.ComputorList.Epoch) + } + } + + return nil +} + +func (sp *SyncProcessor) syncEpochInfo(delta SyncDelta, metadata *protobuff.SyncMetadataResponse) error { + + // TODO: remove skipped tick intervals from proto file + /*err := sp.pebbleStore.SetSkippedTickIntervalList(&protobuff.SkippedTicksIntervalList{ + SkippedTicks: metadata.SkippedTickIntervals, + }) + if err != nil { + return errors.Wrap(err, "saving skipped tick intervals from bootstrap") + }*/ + + var epochs []uint32 + + for _, epochDelta := range delta { + epochs = append(epochs, epochDelta.Epoch) + } + + ctx, cancel := context.WithTimeout(context.Background(), sp.syncConfiguration.ResponseTimeout) + + defer cancel() + + stream, err := sp.syncServiceClient.SyncGetEpochInformation(ctx, &protobuff.SyncEpochInfoRequest{Epochs: epochs}) + if err != nil { + return errors.Wrap(err, "fetching epoch info") + } + + for { + data, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "reading stream") + } + err = sp.storeEpochInfo(data) + if err != nil { + return errors.Wrap(err, "storing epoch data") + } + } + + return nil +} + +func (sp *SyncProcessor) synchronize() error { + + for _, epochDelta := range sp.syncDelta { + + if sp.lastSynchronizedTick.Epoch > epochDelta.Epoch { + continue + } + + log.Printf("Synchronizing ticks for epoch %d...\n", epochDelta.Epoch) + + processedTickIntervalsForEpoch, err := sp.pebbleStore.GetProcessedTickIntervalsPerEpoch(nil, epochDelta.Epoch) + if err != nil { + return errors.Wrapf(err, "getting processed tick intervals for epoch %d", epochDelta.Epoch) + } + + computorList, err := sp.pebbleStore.GetComputors(nil, epochDelta.Epoch) + if err != nil { + return errors.Wrapf(err, "reading computor list from disk for epoch %d", epochDelta.Epoch) + } + + if len(epochDelta.ProcessedIntervals) == 0 { + return errors.New(fmt.Sprintf("no processed tick intervals in delta for epoch %d", epochDelta.Epoch)) + } + + log.Printf("Validating computor list") + err = computors.ValidateProto(nil, validator.GoSchnorrqVerify, computorList) + if err != nil { + return errors.Wrapf(err, "validating computors for epoch %d", epochDelta.Epoch) + } + + qubicComputors, err := computors.ProtoToQubic(computorList) + if err != nil { + return errors.Wrap(err, "converting computors to qubic format") + } + + for _, interval := range epochDelta.ProcessedIntervals { + + initialIntervalTick := interval.InitialProcessedTick + + if initialIntervalTick > sp.lastSynchronizedTick.TickNumber { + err = sp.pebbleStore.SetSkippedTicksInterval(nil, &protobuff.SkippedTicksInterval{ + StartTick: sp.lastSynchronizedTick.TickNumber + 1, + EndTick: initialIntervalTick - 1, + }) + if err != nil { + return errors.Wrap(err, "appending skipped tick interval") + } + } + + for tickNumber := interval.InitialProcessedTick; tickNumber <= interval.LastProcessedTick; tickNumber += sp.maxObjectRequest { + + startTick := tickNumber + endTick := startTick + sp.maxObjectRequest - 1 + if endTick > interval.LastProcessedTick { + endTick = interval.LastProcessedTick + } + + duration := time.Now() + + fetchedTicks, err := sp.fetchTicks(startTick, endTick) + if err != nil { + return errors.Wrapf(err, "fetching tick range %d - %d", startTick, endTick) + } + + processedTicks, err := sp.processTicks(fetchedTicks, initialIntervalTick, qubicComputors) + if err != nil { + return errors.Wrapf(err, "processing tick range %d - %d", startTick, endTick) + } + lastSynchronizedTick, err := sp.storeTicks(processedTicks, epochDelta.Epoch, processedTickIntervalsForEpoch, initialIntervalTick) + sp.lastSynchronizedTick = lastSynchronizedTick + if err != nil { + return errors.Wrapf(err, "storing processed tick range %d - %d", startTick, endTick) + } + + elapsed := time.Since(duration) + + log.Printf("Done processing %d ticks. Took: %v | Average time / tick: %v\n", sp.maxObjectRequest, elapsed, elapsed.Seconds()/float64(sp.maxObjectRequest)) + } + } + + } + + err := tick.CalculateEmptyTicksForAllEpochs(sp.pebbleStore, true) + if err != nil { + return errors.Wrap(err, "calculating empty ticks after synchronization") + } + + log.Println("Finished synchronizing ticks.") + err = sp.pebbleStore.DeleteSyncLastSynchronizedTick() + if err != nil { + return errors.Wrap(err, "resetting synchronization index") + } + return nil +} + +func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.SyncTickData, error) { + + //TODO: We are currently fetching a large process of ticks, and using the default will cause the method to error before we are finished + //ctx, cancel := context.WithTimeout(context.Background(), sp.syncConfiguration.ResponseTimeout) + //defer cancel() + ctx := context.Background() + + var compression grpc.CallOption = grpc.EmptyCallOption{} + if sp.syncConfiguration.EnableCompression { + compression = grpc.UseCompressor(gzip.Name) + } + + var responses []*protobuff.SyncTickData + + mutex := sync.RWMutex{} + routineCount := runtime.NumCPU() / 2 + tickDifference := endTick - startTick + batchSize := tickDifference / uint32(routineCount) + errChannel := make(chan error, routineCount) + var waitGroup sync.WaitGroup + startTime := time.Now() + counter := 0 + + log.Printf("Fetching tick range [%d - %d] on %d routines\n", startTick, endTick, routineCount) + + for index := range routineCount { + waitGroup.Add(1) + + start := startTick + (batchSize * uint32(index)) + end := start + batchSize - 1 + + if end > endTick || index == (int(routineCount)-1) { + end = endTick + } + + go func(errChannel chan<- error) { + defer waitGroup.Done() + + log.Printf("[Routine %d] Fetching tick range %d - %d", index, start, end) + + stream, err := sp.syncServiceClient.SyncGetTickInformation(ctx, &protobuff.SyncTickInfoRequest{ + FirstTick: start, + LastTick: end, + }, compression) + if err != nil { + errChannel <- errors.Wrap(err, "fetching tick information") + return + } + + lastTime := time.Now() + + for { + data, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + errChannel <- errors.Wrap(err, "reading tick information stream") + return + } + + mutex.Lock() + responses = append(responses, data.Ticks...) + counter += len(data.Ticks) + mutex.Unlock() + + elapsed := time.Since(lastTime) + rate := float64(len(data.Ticks)) / elapsed.Seconds() + + var firstFetchedTick uint32 + var lastFetchedTick uint32 + + if len(data.Ticks) > 0 { + firstFetchedTick = data.Ticks[0].QuorumData.QuorumTickStructure.TickNumber + lastFetchedTick = data.Ticks[len(data.Ticks)-1].QuorumData.QuorumTickStructure.TickNumber + } + + fmt.Printf("[Routine %d]: Fetched %d ticks - [%d - %d] Took: %v | Rate: %f t/s - ~ %d t/m | Total: %d\n", index, len(data.Ticks), firstFetchedTick, lastFetchedTick, time.Since(lastTime), rate, int(rate*60), counter) + + lastTime = time.Now() + } + + fmt.Printf("Routine %d finished\n", index) + + errChannel <- nil + + }(errChannel) + } + + waitGroup.Wait() + + fmt.Printf("Done fetching %d ticks. Took: %v\n", counter, time.Since(startTime)) + + for _ = range routineCount { + err := <-errChannel + if err != nil { + return nil, errors.Wrap(err, "fetching ticks concurrently") + } + } + + return responses, nil + +} + +func (sp *SyncProcessor) processTicks(tickInfoResponses []*protobuff.SyncTickData, initialIntervalTick uint32, computors types.Computors) (validator.ValidatedTicks, error) { + + syncValidator := validator.NewSyncValidator(initialIntervalTick, computors, tickInfoResponses, sp.processTickTimeout, sp.pebbleStore, sp.lastSynchronizedTick) + validatedTicks, err := syncValidator.Validate() + if err != nil { + return nil, errors.Wrap(err, "validating ticks") + } + + return validatedTicks, nil +} + +func (sp *SyncProcessor) storeTicks(validatedTicks validator.ValidatedTicks, epoch uint32, processedTickIntervalsPerEpoch *protobuff.ProcessedTickIntervalsPerEpoch, initialTickInterval uint32) (*protobuff.SyncLastSynchronizedTick, error) { + + if epoch == 0 { + return nil, errors.Errorf("epoch is 0") + } + + db := sp.pebbleStore.GetDB() + + batch := db.NewBatch() + defer batch.Close() + + log.Println("Storing validated ticks...") + + var lastSynchronizedTick protobuff.SyncLastSynchronizedTick + lastSynchronizedTick.Epoch = epoch + + for _, validatedTick := range validatedTicks { + + tickNumber := validatedTick.AlignedVotes.QuorumTickStructure.TickNumber + + fmt.Printf("Storing data for tick %d\n", tickNumber) + + quorumDataKey := store.AssembleKey(store.QuorumData, tickNumber) + serializedData, err := proto.Marshal(validatedTick.AlignedVotes) + if err != nil { + return nil, errors.Wrapf(err, "serializing aligned votes for tick %d", tickNumber) + } + err = batch.Set(quorumDataKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding aligned votes to batch for tick %d", tickNumber) + } + + tickDataKey := store.AssembleKey(store.TickData, tickNumber) + serializedData, err = proto.Marshal(validatedTick.TickData) + if err != nil { + return nil, errors.Wrapf(err, "serializing tick data for tick %d", tickNumber) + } + err = batch.Set(tickDataKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding tick data to batch for tick %d", tickNumber) + } + + for _, transaction := range validatedTick.ValidTransactions { + transactionKey := store.AssembleKey(store.Transaction, transaction.TxId) + serializedData, err = proto.Marshal(transaction) + if err != nil { + return nil, errors.Wrapf(err, "deserializing transaction %s for tick %d", transaction.TxId, tickNumber) + } + err = batch.Set(transactionKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "addin transaction %s to batch for tick %d", transaction.TxId, tickNumber) + } + } + + transactionsPerIdentity := removeNonTransferTransactionsAndSortPerIdentity(validatedTick.ValidTransactions) + for identity, transactions := range transactionsPerIdentity { + identityTransfersPerTickKey := store.AssembleKey(store.IdentityTransferTransactions, identity) + identityTransfersPerTickKey = binary.BigEndian.AppendUint64(identityTransfersPerTickKey, uint64(tickNumber)) + + serializedData, err = proto.Marshal(&protobuff.TransferTransactionsPerTick{ + TickNumber: tickNumber, + Identity: identity, + Transactions: transactions, + }) + if err != nil { + return nil, errors.Wrapf(err, "serializing transfer transactions for tickl %d", tickNumber) + } + err = batch.Set(identityTransfersPerTickKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding transafer transactions to batch for tick %d", tickNumber) + } + } + + tickTxStatusKey := store.AssembleKey(store.TickTransactionsStatus, uint64(tickNumber)) + serializedData, err = proto.Marshal(validatedTick.ApprovedTransactions) + if err != nil { + return nil, errors.Wrapf(err, "serializing transaction statuses for tick %d", tickNumber) + } + err = batch.Set(tickTxStatusKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding transactions statuses to batch for tick %d", tickNumber) + } + for _, transaction := range validatedTick.ApprovedTransactions.Transactions { + approvedTransactionKey := store.AssembleKey(store.TransactionStatus, transaction.TxId) + serializedData, err = proto.Marshal(transaction) + if err != nil { + return nil, errors.Wrapf(err, "serialzing approved transaction %s for tick %d", transaction.TxId, tickNumber) + } + err = batch.Set(approvedTransactionKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding approved transaction %s to batch for tick %d", transaction.TxId, tickNumber) + } + } + + chainDigestKey := store.AssembleKey(store.ChainDigest, tickNumber) + err = batch.Set(chainDigestKey, validatedTick.ChainHash[:], nil) + if err != nil { + return nil, errors.Wrapf(err, "adding chain hash to batch for tick %d", tickNumber) + } + + storeDigestKey := store.AssembleKey(store.StoreDigest, tickNumber) + err = batch.Set(storeDigestKey, validatedTick.StoreHash[:], nil) + if err != nil { + return nil, errors.Wrapf(err, "adding store hash to batch for tick %d", tickNumber) + } + + lastSynchronizedTick.TickNumber = tickNumber + lastSynchronizedTick.ChainHash = validatedTick.ChainHash[:] + lastSynchronizedTick.StoreHash = validatedTick.StoreHash[:] + } + + lastProcessedTickPerEpochKey := store.AssembleKey(store.LastProcessedTickPerEpoch, epoch) + value := make([]byte, 4) + binary.LittleEndian.PutUint32(value, lastSynchronizedTick.TickNumber) + err := batch.Set(lastProcessedTickPerEpochKey, value, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding last processed tick %d for epoch %d to batch", lastSynchronizedTick.TickNumber, epoch) + } + + lastProcessedTickProto := &protobuff.ProcessedTick{ + TickNumber: lastSynchronizedTick.TickNumber, + Epoch: epoch, + } + lastProcessedTickKey := []byte{store.LastProcessedTick} + serializedData, err := proto.Marshal(lastProcessedTickProto) + if err != nil { + return nil, errors.Wrapf(err, "serializing last processed tick %d", lastSynchronizedTick.TickNumber) + } + err = batch.Set(lastProcessedTickKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding last processed tick %d to batch", lastSynchronizedTick.TickNumber) + } + + if len(processedTickIntervalsPerEpoch.Intervals) == 0 { + processedTickIntervalsPerEpoch = &protobuff.ProcessedTickIntervalsPerEpoch{ + Epoch: epoch, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: initialTickInterval, + LastProcessedTick: lastSynchronizedTick.TickNumber, + }, + }, + } + } else { + processedTickIntervalsPerEpoch.Intervals[len(processedTickIntervalsPerEpoch.Intervals)-1].LastProcessedTick = lastSynchronizedTick.TickNumber + } + + processedTickIntervalsPerEpochKey := store.AssembleKey(store.ProcessedTickIntervals, epoch) + serializedData, err = proto.Marshal(processedTickIntervalsPerEpoch) + if err != nil { + return nil, errors.Wrapf(err, "serializing processed tick intervals for epoch %d", epoch) + } + err = batch.Set(processedTickIntervalsPerEpochKey, serializedData, nil) + if err != nil { + return nil, errors.Wrapf(err, "adding processed tick intervals for epoch %d to batch", epoch) + } + + lastSynchronizedTickKey := []byte{store.SyncLastSynchronizedTick} + serializedData, err = proto.Marshal(&lastSynchronizedTick) + if err != nil { + return nil, errors.Wrap(err, "serializing last synchronized tick") + } + err = batch.Set(lastSynchronizedTickKey, serializedData, nil) + if err != nil { + return nil, errors.Wrap(err, "adding synchronization index to batch") + } + + err = batch.Commit(pebble.Sync) + if err != nil { + return nil, errors.Wrap(err, "commiting batch") + } + + return &lastSynchronizedTick, nil +} + +func removeNonTransferTransactionsAndSortPerIdentity(transactions []*protobuff.Transaction) map[string][]*protobuff.Transaction { + + transferTransactions := make([]*protobuff.Transaction, 0) + for _, transaction := range transactions { + if transaction.Amount == 0 { + continue + } + transferTransactions = append(transferTransactions, transaction) + } + transactionsPerIdentity := make(map[string][]*protobuff.Transaction) + for _, transaction := range transferTransactions { + _, exists := transactionsPerIdentity[transaction.DestId] + if !exists { + transactionsPerIdentity[transaction.DestId] = make([]*protobuff.Transaction, 0) + } + _, exists = transactionsPerIdentity[transaction.SourceId] + if !exists { + transactionsPerIdentity[transaction.SourceId] = make([]*protobuff.Transaction, 0) + } + } + + return transactionsPerIdentity + +} diff --git a/processor/sync_test.go b/processor/sync_test.go new file mode 100644 index 0000000..470b552 --- /dev/null +++ b/processor/sync_test.go @@ -0,0 +1,299 @@ +package processor + +import ( + "github.com/qubic/go-archiver/protobuff" + "testing" + "time" +) + +func TestSyncProcessor_CalculateSyncDelta(t *testing.T) { + + mockSyncProcessor := NewSyncProcessor(SyncConfiguration{}, nil, time.Second) + + testData := []struct { + name string + bootstrapMetadata *protobuff.SyncMetadataResponse + clientMetadata *protobuff.SyncMetadataResponse + lastSynchronizedTick *protobuff.SyncLastSynchronizedTick + expected SyncDelta + }{ + { + name: "TestCalculateSyncDelta_1", + bootstrapMetadata: &protobuff.SyncMetadataResponse{ + ArchiverVersion: "dev", + MaxObjectRequest: 1000, + ProcessedTickIntervals: []*protobuff.ProcessedTickIntervalsPerEpoch{ + { + Epoch: 123, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15500000, + LastProcessedTick: 15578849, + }, + }, + }, + { + Epoch: 124, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15590000, + LastProcessedTick: 15694132, + }, + }, + }, + { + Epoch: 125, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15700000, + LastProcessedTick: 15829438, + }, + }, + }, + { + Epoch: 126, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15840000, + LastProcessedTick: 15959704, + }, + }, + }, + { + Epoch: 127, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15970000, + LastProcessedTick: 16089394, + }, + }, + }, + }, + }, + clientMetadata: &protobuff.SyncMetadataResponse{ + ArchiverVersion: "dev", + MaxObjectRequest: 1000, + ProcessedTickIntervals: []*protobuff.ProcessedTickIntervalsPerEpoch{ + { + Epoch: 124, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15590000, + LastProcessedTick: 15694132, + }, + }, + }, + { + Epoch: 125, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15700000, + LastProcessedTick: 15829438, + }, + }, + }, + { + Epoch: 126, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15840000, + LastProcessedTick: 15849999, + }, + }, + }, + }, + }, + lastSynchronizedTick: nil, + expected: SyncDelta{ + { + Epoch: 123, + ProcessedIntervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15500000, + LastProcessedTick: 15578849, + }, + }, + }, + { + Epoch: 126, + ProcessedIntervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15840000, + LastProcessedTick: 15959704, + }, + }, + }, + { + Epoch: 127, + ProcessedIntervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15970000, + LastProcessedTick: 16089394, + }, + }, + }, + }, + }, + { + name: "TestCalculateSyncDelta_2", + bootstrapMetadata: &protobuff.SyncMetadataResponse{ + ArchiverVersion: "dev", + MaxObjectRequest: 1000, + ProcessedTickIntervals: []*protobuff.ProcessedTickIntervalsPerEpoch{ + { + Epoch: 123, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15500000, + LastProcessedTick: 15578849, + }, + }, + }, + { + Epoch: 124, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15590000, + LastProcessedTick: 15694132, + }, + }, + }, + { + Epoch: 125, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15700000, + LastProcessedTick: 15829438, + }, + }, + }, + { + Epoch: 126, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15840000, + LastProcessedTick: 15959704, + }, + }, + }, + { + Epoch: 127, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15970000, + LastProcessedTick: 16089394, + }, + }, + }, + }, + }, + clientMetadata: &protobuff.SyncMetadataResponse{ + ArchiverVersion: "dev", + MaxObjectRequest: 1000, + ProcessedTickIntervals: []*protobuff.ProcessedTickIntervalsPerEpoch{ + { + Epoch: 124, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15590000, + LastProcessedTick: 15694132, + }, + }, + }, + { + Epoch: 125, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15700000, + LastProcessedTick: 15829438, + }, + }, + }, + { + Epoch: 126, + Intervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15840000, + LastProcessedTick: 15849999, + }, + }, + }, + }, + }, + lastSynchronizedTick: &protobuff.SyncLastSynchronizedTick{ + TickNumber: 15849999, + Epoch: 126, + ChainHash: nil, + StoreHash: nil, + }, + expected: SyncDelta{ + { + Epoch: 123, + ProcessedIntervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15500000, + LastProcessedTick: 15578849, + }, + }, + }, + { + Epoch: 126, + ProcessedIntervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15849999, + LastProcessedTick: 15959704, + }, + }, + }, + { + Epoch: 127, + ProcessedIntervals: []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: 15970000, + LastProcessedTick: 16089394, + }, + }, + }, + }, + }, + } + + for _, data := range testData { + t.Run(data.name, func(t *testing.T) { + + syncDelta, err := mockSyncProcessor.CalculateSyncDelta(data.bootstrapMetadata, data.clientMetadata, data.lastSynchronizedTick) + if err != nil { + t.Fatalf("Error occured while calculating sync data: %v", err) + } + + if len(data.expected) != len(syncDelta) { + t.Fatalf("Mismatched delta length. Expected: %d Got: %d", len(data.expected), len(syncDelta)) + } + + for index := 0; index < len(data.expected); index++ { + expectedEpochDelta := data.expected[index] + gotEpochDelta := syncDelta[index] + if expectedEpochDelta.Epoch != gotEpochDelta.Epoch { + t.Fatalf("Mismatched epoch at index %d. Expected: %d Got: %d", index, expectedEpochDelta.Epoch, gotEpochDelta.Epoch) + } + + if len(expectedEpochDelta.ProcessedIntervals) != len(gotEpochDelta.ProcessedIntervals) { + t.Fatalf("Mismatched processed interval list for epoch %d. Expected: %d Got: %d", expectedEpochDelta.Epoch, len(expectedEpochDelta.ProcessedIntervals), len(gotEpochDelta.ProcessedIntervals)) + } + + for intervalIndex := 0; intervalIndex < len(expectedEpochDelta.ProcessedIntervals); intervalIndex++ { + + expectedInterval := expectedEpochDelta.ProcessedIntervals[intervalIndex] + gotInterval := gotEpochDelta.ProcessedIntervals[intervalIndex] + + if expectedInterval.InitialProcessedTick != gotInterval.InitialProcessedTick || expectedInterval.LastProcessedTick != gotInterval.LastProcessedTick { + t.Fatalf("Mismatched tick intervals at index %d for epoch %d. \nExpected: %v \nGot: %v", intervalIndex, expectedEpochDelta.Epoch, expectedInterval, gotInterval) + } + + } + + } + }) + } + +} diff --git a/protobuff/sync.pb.go b/protobuff/sync.pb.go new file mode 100644 index 0000000..1a800ba --- /dev/null +++ b/protobuff/sync.pb.go @@ -0,0 +1,657 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v5.28.3 +// source: sync.proto + +package protobuff + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SyncEpochData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ComputorList *Computors `protobuf:"bytes,1,opt,name=computor_list,json=computorList,proto3" json:"computor_list,omitempty"` + LastTickQuorumDataPerIntervals *LastTickQuorumDataPerEpochIntervals `protobuf:"bytes,2,opt,name=last_tick_quorum_data_per_intervals,json=lastTickQuorumDataPerIntervals,proto3" json:"last_tick_quorum_data_per_intervals,omitempty"` +} + +func (x *SyncEpochData) Reset() { + *x = SyncEpochData{} + mi := &file_sync_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncEpochData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncEpochData) ProtoMessage() {} + +func (x *SyncEpochData) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncEpochData.ProtoReflect.Descriptor instead. +func (*SyncEpochData) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{0} +} + +func (x *SyncEpochData) GetComputorList() *Computors { + if x != nil { + return x.ComputorList + } + return nil +} + +func (x *SyncEpochData) GetLastTickQuorumDataPerIntervals() *LastTickQuorumDataPerEpochIntervals { + if x != nil { + return x.LastTickQuorumDataPerIntervals + } + return nil +} + +type SyncTickData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TickData *TickData `protobuf:"bytes,1,opt,name=tick_data,json=tickData,proto3" json:"tick_data,omitempty"` + QuorumData *QuorumTickData `protobuf:"bytes,2,opt,name=quorum_data,json=quorumData,proto3" json:"quorum_data,omitempty"` + Transactions []*Transaction `protobuf:"bytes,3,rep,name=transactions,proto3" json:"transactions,omitempty"` + TransactionsStatus []*TransactionStatus `protobuf:"bytes,4,rep,name=transactions_status,json=transactionsStatus,proto3" json:"transactions_status,omitempty"` +} + +func (x *SyncTickData) Reset() { + *x = SyncTickData{} + mi := &file_sync_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncTickData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncTickData) ProtoMessage() {} + +func (x *SyncTickData) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncTickData.ProtoReflect.Descriptor instead. +func (*SyncTickData) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{1} +} + +func (x *SyncTickData) GetTickData() *TickData { + if x != nil { + return x.TickData + } + return nil +} + +func (x *SyncTickData) GetQuorumData() *QuorumTickData { + if x != nil { + return x.QuorumData + } + return nil +} + +func (x *SyncTickData) GetTransactions() []*Transaction { + if x != nil { + return x.Transactions + } + return nil +} + +func (x *SyncTickData) GetTransactionsStatus() []*TransactionStatus { + if x != nil { + return x.TransactionsStatus + } + return nil +} + +type SyncMetadataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ArchiverVersion string `protobuf:"bytes,1,opt,name=archiver_version,json=archiverVersion,proto3" json:"archiver_version,omitempty"` + MaxObjectRequest int32 `protobuf:"varint,2,opt,name=maxObjectRequest,proto3" json:"maxObjectRequest,omitempty"` + ProcessedTickIntervals []*ProcessedTickIntervalsPerEpoch `protobuf:"bytes,3,rep,name=processed_tick_intervals,json=processedTickIntervals,proto3" json:"processed_tick_intervals,omitempty"` //repeated SkippedTicksInterval skipped_tick_intervals = 4; +} + +func (x *SyncMetadataResponse) Reset() { + *x = SyncMetadataResponse{} + mi := &file_sync_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncMetadataResponse) ProtoMessage() {} + +func (x *SyncMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncMetadataResponse.ProtoReflect.Descriptor instead. +func (*SyncMetadataResponse) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{2} +} + +func (x *SyncMetadataResponse) GetArchiverVersion() string { + if x != nil { + return x.ArchiverVersion + } + return "" +} + +func (x *SyncMetadataResponse) GetMaxObjectRequest() int32 { + if x != nil { + return x.MaxObjectRequest + } + return 0 +} + +func (x *SyncMetadataResponse) GetProcessedTickIntervals() []*ProcessedTickIntervalsPerEpoch { + if x != nil { + return x.ProcessedTickIntervals + } + return nil +} + +type SyncEpochInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Epochs []uint32 `protobuf:"varint,1,rep,packed,name=epochs,proto3" json:"epochs,omitempty"` +} + +func (x *SyncEpochInfoRequest) Reset() { + *x = SyncEpochInfoRequest{} + mi := &file_sync_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncEpochInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncEpochInfoRequest) ProtoMessage() {} + +func (x *SyncEpochInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncEpochInfoRequest.ProtoReflect.Descriptor instead. +func (*SyncEpochInfoRequest) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{3} +} + +func (x *SyncEpochInfoRequest) GetEpochs() []uint32 { + if x != nil { + return x.Epochs + } + return nil +} + +type SyncEpochInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Epochs []*SyncEpochData `protobuf:"bytes,1,rep,name=epochs,proto3" json:"epochs,omitempty"` +} + +func (x *SyncEpochInfoResponse) Reset() { + *x = SyncEpochInfoResponse{} + mi := &file_sync_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncEpochInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncEpochInfoResponse) ProtoMessage() {} + +func (x *SyncEpochInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncEpochInfoResponse.ProtoReflect.Descriptor instead. +func (*SyncEpochInfoResponse) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{4} +} + +func (x *SyncEpochInfoResponse) GetEpochs() []*SyncEpochData { + if x != nil { + return x.Epochs + } + return nil +} + +type SyncTickInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FirstTick uint32 `protobuf:"varint,1,opt,name=first_tick,json=firstTick,proto3" json:"first_tick,omitempty"` + LastTick uint32 `protobuf:"varint,2,opt,name=last_tick,json=lastTick,proto3" json:"last_tick,omitempty"` +} + +func (x *SyncTickInfoRequest) Reset() { + *x = SyncTickInfoRequest{} + mi := &file_sync_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncTickInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncTickInfoRequest) ProtoMessage() {} + +func (x *SyncTickInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncTickInfoRequest.ProtoReflect.Descriptor instead. +func (*SyncTickInfoRequest) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{5} +} + +func (x *SyncTickInfoRequest) GetFirstTick() uint32 { + if x != nil { + return x.FirstTick + } + return 0 +} + +func (x *SyncTickInfoRequest) GetLastTick() uint32 { + if x != nil { + return x.LastTick + } + return 0 +} + +type SyncTickInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ticks []*SyncTickData `protobuf:"bytes,1,rep,name=ticks,proto3" json:"ticks,omitempty"` +} + +func (x *SyncTickInfoResponse) Reset() { + *x = SyncTickInfoResponse{} + mi := &file_sync_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncTickInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncTickInfoResponse) ProtoMessage() {} + +func (x *SyncTickInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncTickInfoResponse.ProtoReflect.Descriptor instead. +func (*SyncTickInfoResponse) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{6} +} + +func (x *SyncTickInfoResponse) GetTicks() []*SyncTickData { + if x != nil { + return x.Ticks + } + return nil +} + +type SyncLastSynchronizedTick struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TickNumber uint32 `protobuf:"varint,1,opt,name=tick_number,json=tickNumber,proto3" json:"tick_number,omitempty"` + Epoch uint32 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` + ChainHash []byte `protobuf:"bytes,3,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` + StoreHash []byte `protobuf:"bytes,4,opt,name=store_hash,json=storeHash,proto3" json:"store_hash,omitempty"` +} + +func (x *SyncLastSynchronizedTick) Reset() { + *x = SyncLastSynchronizedTick{} + mi := &file_sync_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncLastSynchronizedTick) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncLastSynchronizedTick) ProtoMessage() {} + +func (x *SyncLastSynchronizedTick) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncLastSynchronizedTick.ProtoReflect.Descriptor instead. +func (*SyncLastSynchronizedTick) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{7} +} + +func (x *SyncLastSynchronizedTick) GetTickNumber() uint32 { + if x != nil { + return x.TickNumber + } + return 0 +} + +func (x *SyncLastSynchronizedTick) GetEpoch() uint32 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *SyncLastSynchronizedTick) GetChainHash() []byte { + if x != nil { + return x.ChainHash + } + return nil +} + +func (x *SyncLastSynchronizedTick) GetStoreHash() []byte { + if x != nil { + return x.StoreHash + } + return nil +} + +var File_sync_proto protoreflect.FileDescriptor + +var file_sync_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x71, 0x75, + 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, + 0x68, 0x44, 0x61, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, + 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, + 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, + 0x72, 0x73, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x8b, 0x01, 0x0a, 0x23, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x71, + 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x54, + 0x69, 0x63, 0x6b, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x65, 0x72, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x52, 0x1e, + 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, + 0x74, 0x61, 0x50, 0x65, 0x72, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x22, 0xc7, + 0x02, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x40, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, + 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x54, + 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x4a, 0x0a, 0x0b, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, + 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, + 0x70, 0x62, 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, + 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x13, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, + 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, + 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x53, 0x79, 0x6e, + 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x73, 0x0a, 0x18, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x71, 0x75, 0x62, + 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x50, 0x65, 0x72, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x16, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x22, 0x2e, 0x0a, + 0x14, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x22, 0x59, 0x0a, + 0x15, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, + 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, + 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x22, 0x51, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, + 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x12, 0x1b, + 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x53, + 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x05, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, + 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x74, 0x69, 0x63, + 0x6b, 0x73, 0x22, 0x8f, 0x01, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x4c, 0x61, 0x73, 0x74, 0x53, + 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x12, + 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x69, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x48, 0x61, 0x73, 0x68, 0x32, 0xf6, 0x02, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x42, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, + 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x17, + 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, + 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, + 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x7d, + 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, + 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, + 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x29, 0x5a, + 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x62, 0x69, + 0x63, 0x2f, 0x67, 0x6f, 0x2d, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x66, 0x2f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sync_proto_rawDescOnce sync.Once + file_sync_proto_rawDescData = file_sync_proto_rawDesc +) + +func file_sync_proto_rawDescGZIP() []byte { + file_sync_proto_rawDescOnce.Do(func() { + file_sync_proto_rawDescData = protoimpl.X.CompressGZIP(file_sync_proto_rawDescData) + }) + return file_sync_proto_rawDescData +} + +var file_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_sync_proto_goTypes = []any{ + (*SyncEpochData)(nil), // 0: qubic.archiver.archive.pb.SyncEpochData + (*SyncTickData)(nil), // 1: qubic.archiver.archive.pb.SyncTickData + (*SyncMetadataResponse)(nil), // 2: qubic.archiver.archive.pb.SyncMetadataResponse + (*SyncEpochInfoRequest)(nil), // 3: qubic.archiver.archive.pb.SyncEpochInfoRequest + (*SyncEpochInfoResponse)(nil), // 4: qubic.archiver.archive.pb.SyncEpochInfoResponse + (*SyncTickInfoRequest)(nil), // 5: qubic.archiver.archive.pb.SyncTickInfoRequest + (*SyncTickInfoResponse)(nil), // 6: qubic.archiver.archive.pb.SyncTickInfoResponse + (*SyncLastSynchronizedTick)(nil), // 7: qubic.archiver.archive.pb.SyncLastSynchronizedTick + (*Computors)(nil), // 8: qubic.archiver.archive.pb.Computors + (*LastTickQuorumDataPerEpochIntervals)(nil), // 9: qubic.archiver.archive.pb.LastTickQuorumDataPerEpochIntervals + (*TickData)(nil), // 10: qubic.archiver.archive.pb.TickData + (*QuorumTickData)(nil), // 11: qubic.archiver.archive.pb.QuorumTickData + (*Transaction)(nil), // 12: qubic.archiver.archive.pb.Transaction + (*TransactionStatus)(nil), // 13: qubic.archiver.archive.pb.TransactionStatus + (*ProcessedTickIntervalsPerEpoch)(nil), // 14: qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch + (*emptypb.Empty)(nil), // 15: google.protobuf.Empty +} +var file_sync_proto_depIdxs = []int32{ + 8, // 0: qubic.archiver.archive.pb.SyncEpochData.computor_list:type_name -> qubic.archiver.archive.pb.Computors + 9, // 1: qubic.archiver.archive.pb.SyncEpochData.last_tick_quorum_data_per_intervals:type_name -> qubic.archiver.archive.pb.LastTickQuorumDataPerEpochIntervals + 10, // 2: qubic.archiver.archive.pb.SyncTickData.tick_data:type_name -> qubic.archiver.archive.pb.TickData + 11, // 3: qubic.archiver.archive.pb.SyncTickData.quorum_data:type_name -> qubic.archiver.archive.pb.QuorumTickData + 12, // 4: qubic.archiver.archive.pb.SyncTickData.transactions:type_name -> qubic.archiver.archive.pb.Transaction + 13, // 5: qubic.archiver.archive.pb.SyncTickData.transactions_status:type_name -> qubic.archiver.archive.pb.TransactionStatus + 14, // 6: qubic.archiver.archive.pb.SyncMetadataResponse.processed_tick_intervals:type_name -> qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch + 0, // 7: qubic.archiver.archive.pb.SyncEpochInfoResponse.epochs:type_name -> qubic.archiver.archive.pb.SyncEpochData + 1, // 8: qubic.archiver.archive.pb.SyncTickInfoResponse.ticks:type_name -> qubic.archiver.archive.pb.SyncTickData + 15, // 9: qubic.archiver.archive.pb.SyncService.SyncGetBootstrapMetadata:input_type -> google.protobuf.Empty + 3, // 10: qubic.archiver.archive.pb.SyncService.SyncGetEpochInformation:input_type -> qubic.archiver.archive.pb.SyncEpochInfoRequest + 5, // 11: qubic.archiver.archive.pb.SyncService.SyncGetTickInformation:input_type -> qubic.archiver.archive.pb.SyncTickInfoRequest + 2, // 12: qubic.archiver.archive.pb.SyncService.SyncGetBootstrapMetadata:output_type -> qubic.archiver.archive.pb.SyncMetadataResponse + 4, // 13: qubic.archiver.archive.pb.SyncService.SyncGetEpochInformation:output_type -> qubic.archiver.archive.pb.SyncEpochInfoResponse + 6, // 14: qubic.archiver.archive.pb.SyncService.SyncGetTickInformation:output_type -> qubic.archiver.archive.pb.SyncTickInfoResponse + 12, // [12:15] is the sub-list for method output_type + 9, // [9:12] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sync_proto_init() } +func file_sync_proto_init() { + if File_sync_proto != nil { + return + } + file_archive_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sync_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sync_proto_goTypes, + DependencyIndexes: file_sync_proto_depIdxs, + MessageInfos: file_sync_proto_msgTypes, + }.Build() + File_sync_proto = out.File + file_sync_proto_rawDesc = nil + file_sync_proto_goTypes = nil + file_sync_proto_depIdxs = nil +} diff --git a/protobuff/sync.pb.gw.go b/protobuff/sync.pb.gw.go new file mode 100644 index 0000000..4828016 --- /dev/null +++ b/protobuff/sync.pb.gw.go @@ -0,0 +1,272 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: sync.proto + +/* +Package protobuff is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobuff + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_SyncService_SyncGetBootstrapMetadata_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.SyncGetBootstrapMetadata(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SyncService_SyncGetBootstrapMetadata_0(ctx context.Context, marshaler runtime.Marshaler, server SyncServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.SyncGetBootstrapMetadata(ctx, &protoReq) + return msg, metadata, err + +} + +func request_SyncService_SyncGetEpochInformation_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (SyncService_SyncGetEpochInformationClient, runtime.ServerMetadata, error) { + var protoReq SyncEpochInfoRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.SyncGetEpochInformation(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +func request_SyncService_SyncGetTickInformation_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (SyncService_SyncGetTickInformationClient, runtime.ServerMetadata, error) { + var protoReq SyncTickInfoRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.SyncGetTickInformation(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +// RegisterSyncServiceHandlerServer registers the http handlers for service SyncService to "mux". +// UnaryRPC :call SyncServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSyncServiceHandlerFromEndpoint instead. +func RegisterSyncServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SyncServiceServer) error { + + mux.Handle("POST", pattern_SyncService_SyncGetBootstrapMetadata_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qubic.archiver.archive.pb.SyncService/SyncGetBootstrapMetadata", runtime.WithHTTPPathPattern("/qubic.archiver.archive.pb.SyncService/SyncGetBootstrapMetadata")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SyncService_SyncGetBootstrapMetadata_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_SyncGetBootstrapMetadata_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_SyncService_SyncGetEpochInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + mux.Handle("POST", pattern_SyncService_SyncGetTickInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + return nil +} + +// RegisterSyncServiceHandlerFromEndpoint is same as RegisterSyncServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterSyncServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterSyncServiceHandler(ctx, mux, conn) +} + +// RegisterSyncServiceHandler registers the http handlers for service SyncService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterSyncServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterSyncServiceHandlerClient(ctx, mux, NewSyncServiceClient(conn)) +} + +// RegisterSyncServiceHandlerClient registers the http handlers for service SyncService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SyncServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SyncServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "SyncServiceClient" to call the correct interceptors. +func RegisterSyncServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SyncServiceClient) error { + + mux.Handle("POST", pattern_SyncService_SyncGetBootstrapMetadata_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qubic.archiver.archive.pb.SyncService/SyncGetBootstrapMetadata", runtime.WithHTTPPathPattern("/qubic.archiver.archive.pb.SyncService/SyncGetBootstrapMetadata")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_SyncGetBootstrapMetadata_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_SyncGetBootstrapMetadata_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_SyncService_SyncGetEpochInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qubic.archiver.archive.pb.SyncService/SyncGetEpochInformation", runtime.WithHTTPPathPattern("/qubic.archiver.archive.pb.SyncService/SyncGetEpochInformation")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_SyncGetEpochInformation_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_SyncGetEpochInformation_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_SyncService_SyncGetTickInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qubic.archiver.archive.pb.SyncService/SyncGetTickInformation", runtime.WithHTTPPathPattern("/qubic.archiver.archive.pb.SyncService/SyncGetTickInformation")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_SyncGetTickInformation_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_SyncGetTickInformation_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_SyncService_SyncGetBootstrapMetadata_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"qubic.archiver.archive.pb.SyncService", "SyncGetBootstrapMetadata"}, "")) + + pattern_SyncService_SyncGetEpochInformation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"qubic.archiver.archive.pb.SyncService", "SyncGetEpochInformation"}, "")) + + pattern_SyncService_SyncGetTickInformation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"qubic.archiver.archive.pb.SyncService", "SyncGetTickInformation"}, "")) +) + +var ( + forward_SyncService_SyncGetBootstrapMetadata_0 = runtime.ForwardResponseMessage + + forward_SyncService_SyncGetEpochInformation_0 = runtime.ForwardResponseStream + + forward_SyncService_SyncGetTickInformation_0 = runtime.ForwardResponseStream +) diff --git a/protobuff/sync.proto b/protobuff/sync.proto new file mode 100644 index 0000000..7a01bb0 --- /dev/null +++ b/protobuff/sync.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package qubic.archiver.archive.pb; + +option go_package = "github.com/qubic/go-archiver/protobuff/"; +import "google/protobuf/empty.proto"; +import "archive.proto"; + + +message SyncEpochData { + Computors computor_list = 1; + LastTickQuorumDataPerEpochIntervals last_tick_quorum_data_per_intervals = 2; +} + +message SyncTickData { + TickData tick_data = 1; + QuorumTickData quorum_data = 2; + repeated Transaction transactions = 3; + repeated TransactionStatus transactions_status = 4; +} + +message SyncMetadataResponse { + string archiver_version = 1; + int32 maxObjectRequest = 2; + repeated ProcessedTickIntervalsPerEpoch processed_tick_intervals = 3; + //repeated SkippedTicksInterval skipped_tick_intervals = 4; +} + +message SyncEpochInfoRequest { + repeated uint32 epochs = 1; +} + +message SyncEpochInfoResponse { + repeated SyncEpochData epochs = 1; +} + +message SyncTickInfoRequest { + uint32 first_tick = 1; + uint32 last_tick = 2; +} + +message SyncTickInfoResponse { + repeated SyncTickData ticks = 1; +} + +message SyncLastSynchronizedTick { + uint32 tick_number = 1; + uint32 epoch = 2; + bytes chain_hash = 3; + bytes store_hash = 4; + +} + +service SyncService { + rpc SyncGetBootstrapMetadata(google.protobuf.Empty) returns (SyncMetadataResponse) {}; + + rpc SyncGetEpochInformation(SyncEpochInfoRequest) returns (stream SyncEpochInfoResponse) {}; + + rpc SyncGetTickInformation(SyncTickInfoRequest) returns (stream SyncTickInfoResponse) {}; +} \ No newline at end of file diff --git a/protobuff/sync_grpc.pb.go b/protobuff/sync_grpc.pb.go new file mode 100644 index 0000000..d428c57 --- /dev/null +++ b/protobuff/sync_grpc.pb.go @@ -0,0 +1,239 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.28.3 +// source: sync.proto + +package protobuff + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + SyncService_SyncGetBootstrapMetadata_FullMethodName = "/qubic.archiver.archive.pb.SyncService/SyncGetBootstrapMetadata" + SyncService_SyncGetEpochInformation_FullMethodName = "/qubic.archiver.archive.pb.SyncService/SyncGetEpochInformation" + SyncService_SyncGetTickInformation_FullMethodName = "/qubic.archiver.archive.pb.SyncService/SyncGetTickInformation" +) + +// SyncServiceClient is the client API for SyncService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SyncServiceClient interface { + SyncGetBootstrapMetadata(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SyncMetadataResponse, error) + SyncGetEpochInformation(ctx context.Context, in *SyncEpochInfoRequest, opts ...grpc.CallOption) (SyncService_SyncGetEpochInformationClient, error) + SyncGetTickInformation(ctx context.Context, in *SyncTickInfoRequest, opts ...grpc.CallOption) (SyncService_SyncGetTickInformationClient, error) +} + +type syncServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSyncServiceClient(cc grpc.ClientConnInterface) SyncServiceClient { + return &syncServiceClient{cc} +} + +func (c *syncServiceClient) SyncGetBootstrapMetadata(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SyncMetadataResponse, error) { + out := new(SyncMetadataResponse) + err := c.cc.Invoke(ctx, SyncService_SyncGetBootstrapMetadata_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) SyncGetEpochInformation(ctx context.Context, in *SyncEpochInfoRequest, opts ...grpc.CallOption) (SyncService_SyncGetEpochInformationClient, error) { + stream, err := c.cc.NewStream(ctx, &SyncService_ServiceDesc.Streams[0], SyncService_SyncGetEpochInformation_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &syncServiceSyncGetEpochInformationClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SyncService_SyncGetEpochInformationClient interface { + Recv() (*SyncEpochInfoResponse, error) + grpc.ClientStream +} + +type syncServiceSyncGetEpochInformationClient struct { + grpc.ClientStream +} + +func (x *syncServiceSyncGetEpochInformationClient) Recv() (*SyncEpochInfoResponse, error) { + m := new(SyncEpochInfoResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *syncServiceClient) SyncGetTickInformation(ctx context.Context, in *SyncTickInfoRequest, opts ...grpc.CallOption) (SyncService_SyncGetTickInformationClient, error) { + stream, err := c.cc.NewStream(ctx, &SyncService_ServiceDesc.Streams[1], SyncService_SyncGetTickInformation_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &syncServiceSyncGetTickInformationClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SyncService_SyncGetTickInformationClient interface { + Recv() (*SyncTickInfoResponse, error) + grpc.ClientStream +} + +type syncServiceSyncGetTickInformationClient struct { + grpc.ClientStream +} + +func (x *syncServiceSyncGetTickInformationClient) Recv() (*SyncTickInfoResponse, error) { + m := new(SyncTickInfoResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SyncServiceServer is the server API for SyncService service. +// All implementations must embed UnimplementedSyncServiceServer +// for forward compatibility +type SyncServiceServer interface { + SyncGetBootstrapMetadata(context.Context, *emptypb.Empty) (*SyncMetadataResponse, error) + SyncGetEpochInformation(*SyncEpochInfoRequest, SyncService_SyncGetEpochInformationServer) error + SyncGetTickInformation(*SyncTickInfoRequest, SyncService_SyncGetTickInformationServer) error + mustEmbedUnimplementedSyncServiceServer() +} + +// UnimplementedSyncServiceServer must be embedded to have forward compatible implementations. +type UnimplementedSyncServiceServer struct { +} + +func (UnimplementedSyncServiceServer) SyncGetBootstrapMetadata(context.Context, *emptypb.Empty) (*SyncMetadataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncGetBootstrapMetadata not implemented") +} +func (UnimplementedSyncServiceServer) SyncGetEpochInformation(*SyncEpochInfoRequest, SyncService_SyncGetEpochInformationServer) error { + return status.Errorf(codes.Unimplemented, "method SyncGetEpochInformation not implemented") +} +func (UnimplementedSyncServiceServer) SyncGetTickInformation(*SyncTickInfoRequest, SyncService_SyncGetTickInformationServer) error { + return status.Errorf(codes.Unimplemented, "method SyncGetTickInformation not implemented") +} +func (UnimplementedSyncServiceServer) mustEmbedUnimplementedSyncServiceServer() {} + +// UnsafeSyncServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SyncServiceServer will +// result in compilation errors. +type UnsafeSyncServiceServer interface { + mustEmbedUnimplementedSyncServiceServer() +} + +func RegisterSyncServiceServer(s grpc.ServiceRegistrar, srv SyncServiceServer) { + s.RegisterService(&SyncService_ServiceDesc, srv) +} + +func _SyncService_SyncGetBootstrapMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).SyncGetBootstrapMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SyncService_SyncGetBootstrapMetadata_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).SyncGetBootstrapMetadata(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_SyncGetEpochInformation_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SyncEpochInfoRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SyncServiceServer).SyncGetEpochInformation(m, &syncServiceSyncGetEpochInformationServer{stream}) +} + +type SyncService_SyncGetEpochInformationServer interface { + Send(*SyncEpochInfoResponse) error + grpc.ServerStream +} + +type syncServiceSyncGetEpochInformationServer struct { + grpc.ServerStream +} + +func (x *syncServiceSyncGetEpochInformationServer) Send(m *SyncEpochInfoResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SyncService_SyncGetTickInformation_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SyncTickInfoRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SyncServiceServer).SyncGetTickInformation(m, &syncServiceSyncGetTickInformationServer{stream}) +} + +type SyncService_SyncGetTickInformationServer interface { + Send(*SyncTickInfoResponse) error + grpc.ServerStream +} + +type syncServiceSyncGetTickInformationServer struct { + grpc.ServerStream +} + +func (x *syncServiceSyncGetTickInformationServer) Send(m *SyncTickInfoResponse) error { + return x.ServerStream.SendMsg(m) +} + +// SyncService_ServiceDesc is the grpc.ServiceDesc for SyncService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SyncService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "qubic.archiver.archive.pb.SyncService", + HandlerType: (*SyncServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SyncGetBootstrapMetadata", + Handler: _SyncService_SyncGetBootstrapMetadata_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "SyncGetEpochInformation", + Handler: _SyncService_SyncGetEpochInformation_Handler, + ServerStreams: true, + }, + { + StreamName: "SyncGetTickInformation", + Handler: _SyncService_SyncGetTickInformation_Handler, + ServerStreams: true, + }, + }, + Metadata: "sync.proto", +} diff --git a/rpc/rpc_server.go b/rpc/rpc_server.go index 35a47c2..f6df899 100644 --- a/rpc/rpc_server.go +++ b/rpc/rpc_server.go @@ -25,6 +25,12 @@ import ( "net/http" ) +type BootstrapConfiguration struct { + Enable bool + MaximumRequestedItems int + BatchSize int +} + var _ protobuff.ArchiveServiceServer = &Server{} var emptyTd = &protobuff.TickData{} @@ -36,22 +42,24 @@ type TransactionInfo struct { type Server struct { protobuff.UnimplementedArchiveServiceServer - listenAddrGRPC string - listenAddrHTTP string - syncThreshold int - chainTickFetchUrl string - store *store.PebbleStore - pool *qubic.Pool + listenAddrGRPC string + listenAddrHTTP string + syncThreshold int + chainTickFetchUrl string + store *store.PebbleStore + pool *qubic.Pool + bootstrapConfiguration BootstrapConfiguration } -func NewServer(listenAddrGRPC, listenAddrHTTP string, syncThreshold int, chainTickUrl string, store *store.PebbleStore, pool *qubic.Pool) *Server { +func NewServer(listenAddrGRPC, listenAddrHTTP string, syncThreshold int, chainTickUrl string, store *store.PebbleStore, pool *qubic.Pool, bootstrapConfiguration BootstrapConfiguration) *Server { return &Server{ - listenAddrGRPC: listenAddrGRPC, - listenAddrHTTP: listenAddrHTTP, - syncThreshold: syncThreshold, - chainTickFetchUrl: chainTickUrl, - store: store, - pool: pool, + listenAddrGRPC: listenAddrGRPC, + listenAddrHTTP: listenAddrHTTP, + syncThreshold: syncThreshold, + chainTickFetchUrl: chainTickUrl, + store: store, + pool: pool, + bootstrapConfiguration: bootstrapConfiguration, } } @@ -389,7 +397,7 @@ func (s *Server) GetStatus(ctx context.Context, _ *emptypb.Empty) (*protobuff.Ge }, nil } -type response struct { +type chainTickResponse struct { ChainTick int `json:"max_tick"` } @@ -406,7 +414,7 @@ func fetchChainTick(ctx context.Context, url string) (int, error) { } defer res.Body.Close() - var resp response + var resp chainTickResponse body, err := io.ReadAll(res.Body) if err != nil { return 0, errors.Wrap(err, "reading response body") @@ -639,6 +647,11 @@ func (s *Server) Start() error { grpc.MaxSendMsgSize(600*1024*1024), ) protobuff.RegisterArchiveServiceServer(srv, s) + if s.bootstrapConfiguration.Enable { + syncService := NewSyncService(s.store, s.bootstrapConfiguration) + + protobuff.RegisterSyncServiceServer(srv, syncService) + } reflection.Register(srv) lis, err := net.Listen("tcp", s.listenAddrGRPC) diff --git a/rpc/sync.go b/rpc/sync.go new file mode 100644 index 0000000..c92796d --- /dev/null +++ b/rpc/sync.go @@ -0,0 +1,177 @@ +package rpc + +import ( + "context" + "fmt" + "github.com/pkg/errors" + "github.com/qubic/go-archiver/protobuff" + "github.com/qubic/go-archiver/store" + "github.com/qubic/go-archiver/utils" + "github.com/qubic/go-archiver/validator/quorum" + "github.com/qubic/go-archiver/validator/tick" + "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +var _ protobuff.SyncServiceServer = &SyncService{} + +type SyncService struct { + protobuff.UnimplementedSyncServiceServer + store *store.PebbleStore + bootstrapConfiguration BootstrapConfiguration +} + +func NewSyncService(pebbleStore *store.PebbleStore, bootstrapConfiguration BootstrapConfiguration) *SyncService { + return &SyncService{ + bootstrapConfiguration: bootstrapConfiguration, + store: pebbleStore, + } +} + +func (ss *SyncService) SyncGetBootstrapMetadata(ctx context.Context, _ *emptypb.Empty) (*protobuff.SyncMetadataResponse, error) { + + processedIntervals, err := ss.store.GetProcessedTickIntervals(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "cannot get processed tick intervals: %v", err) + } + + /*skippedIntervals, err := ss.store.GetSkippedTicksInterval(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "cannot get skipped tick intervals: %v", err) + }*/ + + return &protobuff.SyncMetadataResponse{ + ArchiverVersion: utils.ArchiverVersion, + MaxObjectRequest: int32(ss.bootstrapConfiguration.MaximumRequestedItems), + //SkippedTickIntervals: skippedIntervals.SkippedTicks, + ProcessedTickIntervals: processedIntervals, + }, nil +} + +func (ss *SyncService) sendEpochInformationResponse(epochs []*protobuff.SyncEpochData, stream protobuff.SyncService_SyncGetEpochInformationServer) error { + response := &protobuff.SyncEpochInfoResponse{ + Epochs: epochs, + } + if err := stream.Send(response); err != nil { + return errors.Wrapf(err, "streaming epoch response") + } + return nil +} + +func (ss *SyncService) SyncGetEpochInformation(req *protobuff.SyncEpochInfoRequest, stream protobuff.SyncService_SyncGetEpochInformationServer) error { + + if len(req.Epochs) > ss.bootstrapConfiguration.MaximumRequestedItems { + return status.Errorf(codes.OutOfRange, "the number of requested epochs (%d) exceeds the maximum allowed (%d)", len(req.Epochs), ss.bootstrapConfiguration.MaximumRequestedItems) + } + + var epochs []*protobuff.SyncEpochData + + for _, epoch := range req.Epochs { + computors, err := ss.store.GetComputors(context.Background(), epoch) + if err != nil { + return status.Errorf(codes.Internal, "getting epoch computors: %v", err) + } + + lastTickQuorumDataPerIntervals, err := ss.store.GetLastTickQuorumDataListPerEpochInterval(epoch) + if err != nil { + return status.Errorf(codes.Internal, "getting quorum data for epoch's last tick: %v", err) + } + + epochData := &protobuff.SyncEpochData{ + ComputorList: computors, + LastTickQuorumDataPerIntervals: lastTickQuorumDataPerIntervals, + } + + epochs = append(epochs, epochData) + + if len(epochs) >= ss.bootstrapConfiguration.BatchSize { + err := ss.sendEpochInformationResponse(epochs, stream) + if err != nil { + return errors.Wrap(err, "sending epoch information") + } + epochs = make([]*protobuff.SyncEpochData, 0) + } + } + + err := ss.sendEpochInformationResponse(epochs, stream) + if err != nil { + return errors.Wrap(err, "sending epoch information") + } + + return nil +} + +func (ss *SyncService) sendTickInformationResponse(ticks []*protobuff.SyncTickData, stream protobuff.SyncService_SyncGetTickInformationServer) error { + response := &protobuff.SyncTickInfoResponse{ + Ticks: ticks, + } + if err := stream.Send(response); err != nil { + return errors.Wrapf(err, "streaming tick response") + } + return nil +} + +func (ss *SyncService) SyncGetTickInformation(req *protobuff.SyncTickInfoRequest, stream protobuff.SyncService_SyncGetTickInformationServer) error { + + tickDifference := int(req.LastTick - req.FirstTick) + + if tickDifference > ss.bootstrapConfiguration.MaximumRequestedItems || tickDifference < 0 { + return status.Errorf(codes.OutOfRange, "the number of requested ticks (%d) is not within the allowed range (0 - %d)", tickDifference, ss.bootstrapConfiguration.MaximumRequestedItems) + } + + var ticks []*protobuff.SyncTickData + + fmt.Printf("RANGE: [%d - %d]\n", req.FirstTick, req.LastTick) + + for tickNumber := req.FirstTick; tickNumber <= req.LastTick; tickNumber++ { + tickData, err := ss.store.GetTickData(context.Background(), tickNumber) + if err != nil { + return status.Errorf(codes.Internal, "getting tick data for tick %d: %v", tickNumber, err) + } + + quorumData, err := quorum.GetQuorumTickData(tickNumber, ss.store) + if err != nil { + return status.Errorf(codes.Internal, "getting quorum data for tick %d: %v", tickNumber, err) + } + + transactions, err := ss.store.GetTickTransactions(context.Background(), tickNumber) + if err != nil { + return status.Errorf(codes.Internal, "getting transactions for tick %d: %v", tickNumber, err) + } + + transactionStatuses, err := ss.store.GetTickTransactionsStatus(context.Background(), uint64(tickNumber)) + if err != nil { + return status.Errorf(codes.Internal, "getting transaction statuses for tick %d: %v", tickNumber, err) + } + + if tickNumber != quorumData.QuorumTickStructure.TickNumber || (!tick.CheckIfTickIsEmptyProto(tickData) && tickData.TickNumber != tickNumber) { + fmt.Printf("Asked: %d, Got Quorum: %d, Got TickNumber: %d\n", tickNumber, quorumData.QuorumTickStructure.TickNumber, tickData.TickNumber) + return errors.New("read tick from store does not match asked tick") + } + + syncTickData := &protobuff.SyncTickData{ + TickData: tickData, + QuorumData: quorumData, + Transactions: transactions, + TransactionsStatus: transactionStatuses.Transactions, + } + + ticks = append(ticks, syncTickData) + + if len(ticks) >= ss.bootstrapConfiguration.BatchSize { + err := ss.sendTickInformationResponse(ticks, stream) + if err != nil { + return errors.Wrap(err, "sending tick information") + } + ticks = make([]*protobuff.SyncTickData, 0) + } + + } + err := ss.sendTickInformationResponse(ticks, stream) + if err != nil { + return errors.Wrap(err, "sending tick information") + } + return nil +} diff --git a/store/keys.go b/store/keys.go index 5e4cf2a..56f3788 100644 --- a/store/keys.go +++ b/store/keys.go @@ -21,6 +21,7 @@ const ( EmptyTicksPerEpoch = 0x13 LastTickQuorumDataPerEpochInterval = 0x14 EmptyTickListPerEpoch = 0x15 + SyncLastSynchronizedTick = 0x20 ) func emptyTickListPerEpochKey(epoch uint32) []byte { @@ -139,3 +140,42 @@ func tickTxStatusKey(tickNumber uint64) []byte { return key } + +func syncLastSynchronizedTick() []byte { + return []byte{SyncLastSynchronizedTick} +} + +type IDType interface { + uint32 | uint64 | string +} + +func AssembleKey[T IDType](keyPrefix int, id T) []byte { + + prefix := byte(keyPrefix) + + key := []byte{prefix} + + switch any(id).(type) { + + case uint32: + asserted := any(id).(uint32) + + if keyPrefix == LastProcessedTickPerEpoch || keyPrefix == ProcessedTickIntervals { + key = binary.BigEndian.AppendUint32(key, asserted) + break + } + + key = binary.BigEndian.AppendUint64(key, uint64(asserted)) + break + + case uint64: + asserted := any(id).(uint64) + key = binary.BigEndian.AppendUint64(key, asserted) + break + + case string: + asserted := any(id).(string) + key = append(key, []byte(asserted)...) + } + return key +} diff --git a/store/store.go b/store/store.go index 87c521d..69ec015 100644 --- a/store/store.go +++ b/store/store.go @@ -269,7 +269,7 @@ func (s *PebbleStore) SetLastProcessedTick(ctx context.Context, lastProcessedTic return errors.Wrap(err, "committing batch") } - ptie, err := s.getProcessedTickIntervalsPerEpoch(ctx, lastProcessedTick.Epoch) + ptie, err := s.GetProcessedTickIntervalsPerEpoch(ctx, lastProcessedTick.Epoch) if err != nil { return errors.Wrap(err, "getting ptie") } @@ -560,7 +560,7 @@ func (s *PebbleStore) SetTickTransactionsStatus(ctx context.Context, tickNumber return nil } -func (s *PebbleStore) getProcessedTickIntervalsPerEpoch(ctx context.Context, epoch uint32) (*protobuff.ProcessedTickIntervalsPerEpoch, error) { +func (s *PebbleStore) GetProcessedTickIntervalsPerEpoch(ctx context.Context, epoch uint32) (*protobuff.ProcessedTickIntervalsPerEpoch, error) { key := processedTickIntervalsPerEpochKey(epoch) value, closer, err := s.db.Get(key) if err != nil { @@ -596,7 +596,7 @@ func (s *PebbleStore) SetProcessedTickIntervalPerEpoch(ctx context.Context, epoc } func (s *PebbleStore) AppendProcessedTickInterval(ctx context.Context, epoch uint32, pti *protobuff.ProcessedTickInterval) error { - existing, err := s.getProcessedTickIntervalsPerEpoch(ctx, epoch) + existing, err := s.GetProcessedTickIntervalsPerEpoch(ctx, epoch) if err != nil { return errors.Wrap(err, "getting existing processed tick intervals") } @@ -740,7 +740,7 @@ func (s *PebbleStore) GetLastTickQuorumDataListPerEpochInterval(epoch uint32) (* func (s *PebbleStore) SetQuorumDataForCurrentEpochInterval(epoch uint32, quorumData *protobuff.QuorumTickData) error { - processedIntervals, err := s.getProcessedTickIntervalsPerEpoch(nil, epoch) + processedIntervals, err := s.GetProcessedTickIntervalsPerEpoch(nil, epoch) if err != nil { return errors.Wrapf(err, "getting processed tick intervals for epoch %d", epoch) } @@ -834,3 +834,57 @@ func (s *PebbleStore) DeleteEmptyTickListKeyForEpoch(epoch uint32) error { } return nil } + +func (s *PebbleStore) GetDB() *pebble.DB { + return s.db +} + +func (s *PebbleStore) SetSyncLastSynchronizedTick(tick *protobuff.SyncLastSynchronizedTick) error { + key := syncLastSynchronizedTick() + serialized, err := proto.Marshal(tick) + if err != nil { + return errors.Wrap(err, "serializing last synchronized tick") + } + + err = s.db.Set(key, serialized, pebble.Sync) + if err != nil { + return errors.Wrap(err, "saving last synchronized tick to store") + } + return nil +} + +func (s *PebbleStore) GetSyncLastSynchronizedTick() (*protobuff.SyncLastSynchronizedTick, error) { + key := syncLastSynchronizedTick() + value, closer, err := s.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return &protobuff.SyncLastSynchronizedTick{ + TickNumber: 0, + Epoch: 0, + ChainHash: nil, + StoreHash: nil, + }, nil + } + + return nil, errors.Wrap(err, "getting last synchronized tick from store") + } + defer closer.Close() + + var tick protobuff.SyncLastSynchronizedTick + err = proto.Unmarshal(value, &tick) + if err != nil { + return nil, errors.Wrap(err, "de-serializing last synchronized tick") + } + + return &tick, nil +} + +func (s *PebbleStore) DeleteSyncLastSynchronizedTick() error { + + err := s.db.Delete(syncLastSynchronizedTick(), pebble.Sync) + if err != nil { + return errors.Wrap(err, "deleting last synchronized tick from store") + } + + return nil +} diff --git a/utils/utils.go b/utils/utils.go index 7fc790b..072e930 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -8,6 +8,8 @@ import ( "github.com/pkg/errors" ) +const ArchiverVersion = "dev" + type SigVerifierFunc = func(ctx context.Context, pubkey [32]byte, digest [32]byte, sig [64]byte) error func K12Hash(data []byte) ([32]byte, error) { diff --git a/validator/chain/chain.go b/validator/chain/chain.go index 5723366..7534a2b 100644 --- a/validator/chain/chain.go +++ b/validator/chain/chain.go @@ -14,7 +14,7 @@ func ComputeAndSave(ctx context.Context, store *store.PebbleStore, initialEpochT return errors.Wrap(err, "getting prev chain digest") } - currentDigest, err := computeCurrentTickDigest(ctx, quorumVote, prevDigest) + currentDigest, err := ComputeCurrentTickDigest(ctx, quorumVote, prevDigest) if err != nil { return errors.Wrap(err, "computing current tick digest") } @@ -37,7 +37,7 @@ func ComputeStoreAndSave(ctx context.Context, store *store.PebbleStore, initialE return errors.Wrap(err, "getting prev chain digest") } - currentDigest, err := computeCurrentTickStoreDigest(ctx, validTxs, tickTxsStatus, prevDigest) + currentDigest, err := ComputeCurrentTickStoreDigest(ctx, validTxs, tickTxsStatus, prevDigest) if err != nil { return errors.Wrap(err, "computing current tick digest") } @@ -85,7 +85,7 @@ func getPrevChainDigest(ctx context.Context, store *store.PebbleStore, initialEp return previousTickChainDigest, nil } -func computeCurrentTickDigest(ctx context.Context, vote types.QuorumTickVote, previousTickChainDigest [32]byte) ([32]byte, error) { +func ComputeCurrentTickDigest(ctx context.Context, vote types.QuorumTickVote, previousTickChainDigest [32]byte) ([32]byte, error) { chain := Chain{ Epoch: vote.Epoch, Tick: vote.Tick, @@ -111,7 +111,7 @@ func computeCurrentTickDigest(ctx context.Context, vote types.QuorumTickVote, pr return digest, nil } -func computeCurrentTickStoreDigest(ctx context.Context, validTxs []types.Transaction, tickTxsStatus *protobuff.TickTransactionsStatus, previousTickChainDigest [32]byte) ([32]byte, error) { +func ComputeCurrentTickStoreDigest(ctx context.Context, validTxs []types.Transaction, tickTxsStatus *protobuff.TickTransactionsStatus, previousTickChainDigest [32]byte) ([32]byte, error) { s := Store{ PreviousTickStoreDigest: previousTickChainDigest, ValidTxs: validTxs, diff --git a/validator/computors/models.go b/validator/computors/models.go index e71bbb6..a69298c 100644 --- a/validator/computors/models.go +++ b/validator/computors/models.go @@ -36,7 +36,7 @@ func pubKeysToIdentities(pubKeys [types.NumberOfComputors][32]byte) ([]string, e return identities, nil } -func protoToQubic(computors *protobuff.Computors) (types.Computors, error) { +func ProtoToQubic(computors *protobuff.Computors) (types.Computors, error) { pubKeys, err := identitiesToPubKeys(computors.Identities) if err != nil { return types.Computors{}, errors.Wrap(err, "converting proto identities to qubic model") diff --git a/validator/computors/models_test.go b/validator/computors/models_test.go index 3aaac75..2f7278f 100644 --- a/validator/computors/models_test.go +++ b/validator/computors/models_test.go @@ -33,7 +33,7 @@ func TestQubicModelToProtoAndReverse(t *testing.T) { t.Fatalf("qubicToProto() mismatch (-got +want):\n%s", diff) } - converted, err := protoToQubic(got) + converted, err := ProtoToQubic(got) if err != nil { t.Fatalf("protoToQubic() error: %v", err) } diff --git a/validator/computors/validator.go b/validator/computors/validator.go index 1cd7fff..bde5733 100644 --- a/validator/computors/validator.go +++ b/validator/computors/validator.go @@ -2,6 +2,7 @@ package computors import ( "context" + "github.com/qubic/go-archiver/protobuff" "github.com/qubic/go-archiver/store" "github.com/pkg/errors" @@ -65,10 +66,20 @@ func Get(ctx context.Context, store *store.PebbleStore, epoch uint32) (types.Com return types.Computors{}, errors.Wrap(err, "get computors") } - model, err := protoToQubic(protoModel) + model, err := ProtoToQubic(protoModel) if err != nil { return types.Computors{}, errors.Wrap(err, "proto to qubic") } return model, nil } + +func ValidateProto(ctx context.Context, sigVerifierFunc utils.SigVerifierFunc, computors *protobuff.Computors) error { + + qubicComputors, err := ProtoToQubic(computors) + if err != nil { + return errors.Wrap(err, "converting computors to qubic format") + } + + return Validate(ctx, sigVerifierFunc, qubicComputors) +} diff --git a/validator/quorum/models.go b/validator/quorum/models.go index e7f5453..9eb2af3 100644 --- a/validator/quorum/models.go +++ b/validator/quorum/models.go @@ -1,16 +1,20 @@ package quorum import ( + "context" "encoding/binary" "encoding/hex" + "fmt" "github.com/pkg/errors" "github.com/qubic/go-archiver/protobuff" + "github.com/qubic/go-archiver/store" "github.com/qubic/go-archiver/utils" + "github.com/qubic/go-archiver/validator/tick" "github.com/qubic/go-node-connector/types" "time" ) -func qubicToProto(votes types.QuorumVotes) *protobuff.QuorumTickData { +func QubicToProto(votes types.QuorumVotes) *protobuff.QuorumTickData { firstQuorumTickData := votes[0] protoQuorumTickData := protobuff.QuorumTickData{ QuorumTickStructure: qubicTickStructureToProto(firstQuorumTickData), @@ -59,7 +63,7 @@ func convertUint64ToHex(value uint64) string { return hex.EncodeToString(b) } -func qubicToProtoStored(votes types.QuorumVotes) *protobuff.QuorumTickDataStored { +func QubicToProtoStored(votes types.QuorumVotes) *protobuff.QuorumTickDataStored { firstQuorumTickData := votes[0] protoQuorumTickData := protobuff.QuorumTickDataStored{ QuorumTickStructure: qubicTickStructureToProto(firstQuorumTickData), @@ -155,3 +159,219 @@ func ReconstructQuorumData(currentTickQuorumData, nextTickQuorumData *protobuff. return &reconstructedQuorumData, nil } + +func GetQuorumTickData(tickNumber uint32, pebbleStore *store.PebbleStore) (*protobuff.QuorumTickData, error) { + lastProcessedTick, err := pebbleStore.GetLastProcessedTick(context.Background()) + if err != nil { + return nil, errors.Wrap(err, "getting last processed tick") + } + if tickNumber > lastProcessedTick.TickNumber { + + return nil, errors.New(fmt.Sprintf("requested tick number %d is greater than last processed tick %d", tickNumber, lastProcessedTick.TickNumber)) + } + + processedTickIntervalsPerEpoch, err := pebbleStore.GetProcessedTickIntervals(context.Background()) + if err != nil { + return nil, errors.Wrap(err, "getting processed tick intervals per epoch") + } + + epoch, err := tick.GetTickEpoch(tickNumber, processedTickIntervalsPerEpoch) + if err != nil { + return nil, errors.Wrap(err, "getting tick epoch") + } + + lastTickFlag, index, err := tick.IsTickLastInAnyEpochInterval(tickNumber, epoch, processedTickIntervalsPerEpoch) + if err != nil { + return nil, errors.Wrap(err, "checking if tick is last tick in it's epoch") + } + + if lastTickFlag { + lastQuorumDataPerEpochInterval, err := pebbleStore.GetLastTickQuorumDataListPerEpochInterval(epoch) + if err != nil { + return nil, errors.Wrap(err, "getting quorum data for last processed tick") + } + + return lastQuorumDataPerEpochInterval.QuorumDataPerInterval[int32(index)], nil + } + + wasSkipped, nextAvailableTick := tick.WasSkippedByArchive(tickNumber, processedTickIntervalsPerEpoch) + if wasSkipped == true { + + return nil, errors.New(fmt.Sprintf("provided tick number %d was skipped by the system, next available tick is %d", tickNumber, nextAvailableTick)) + } + + if tickNumber == lastProcessedTick.TickNumber { + tickData, err := pebbleStore.GetQuorumTickData(context.Background(), tickNumber) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "quorum tick data not found") + } + return nil, errors.Wrap(err, "getting quorum tick data") + } + + quorumTickData := &protobuff.QuorumTickData{ + QuorumTickStructure: tickData.QuorumTickStructure, + QuorumDiffPerComputor: make(map[uint32]*protobuff.QuorumDiff), + } + + for id, diff := range tickData.QuorumDiffPerComputor { + quorumTickData.QuorumDiffPerComputor[id] = &protobuff.QuorumDiff{ + ExpectedNextTickTxDigestHex: diff.ExpectedNextTickTxDigestHex, + SignatureHex: diff.SignatureHex, + } + } + + return quorumTickData, nil + } + + nextTick := tickNumber + 1 + + nextTickQuorumData, err := pebbleStore.GetQuorumTickData(context.Background(), nextTick) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "quorum data for next tick was not found") + } + return nil, errors.Wrap(err, "getting tick data") + } + + currentTickQuorumData, err := pebbleStore.GetQuorumTickData(context.Background(), tickNumber) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + return nil, errors.Wrap(err, "quorum data for tick was not found") + } + return nil, errors.Wrap(err, "getting tick data") + } + + computors, err := pebbleStore.GetComputors(context.Background(), currentTickQuorumData.QuorumTickStructure.Epoch) + if err != nil { + return nil, errors.Wrap(err, "getting computor list") + } + + reconstructedQuorumData, err := ReconstructQuorumData(currentTickQuorumData, nextTickQuorumData, computors) + if err != nil { + return nil, errors.Wrap(err, "reconstructing quorum data") + } + + return reconstructedQuorumData, nil +} + +func convertHexToUint64(value string) (uint64, error) { + decoded, err := hex.DecodeString(value) + if err != nil { + return 0, errors.Wrap(err, "decoding uint64 hex string") + } + + return binary.LittleEndian.Uint64(decoded), nil +} + +func decode32ByteDigestFromString(value string) ([32]byte, error) { + decoded, err := hex.DecodeString(value) + if err != nil { + return [32]byte{}, errors.Wrap(err, "decoding 32 byte digest string") + } + + var returnedValue [32]byte + copy(returnedValue[:], decoded[:]) + + return returnedValue, nil +} + +func decode64ByteDigestFromString(value string) ([64]byte, error) { + decoded, err := hex.DecodeString(value) + if err != nil { + return [64]byte{}, errors.Wrap(err, "decoding 64 byte digest string") + } + + var returnedValue [64]byte + copy(returnedValue[:], decoded[:]) + + return returnedValue, nil +} + +func ProtoToQubic(quorumData *protobuff.QuorumTickData) (types.QuorumVotes, error) { + + votes := types.QuorumVotes{} + + tickStructure := quorumData.QuorumTickStructure + + tickTime := time.UnixMilli(int64(tickStructure.Timestamp)).UTC() + tickTimeNoMilli := time.Date(tickTime.Year(), tickTime.Month(), tickTime.Day(), tickTime.Hour(), tickTime.Minute(), tickTime.Second(), 0, time.UTC) + milli := tickTime.UnixMilli() - tickTimeNoMilli.UnixMilli() + + prevResourceTestingDigest, err := convertHexToUint64(tickStructure.PrevResourceTestingDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding previous resource testing digest") + } + prevSpectrumDigest, err := decode32ByteDigestFromString(tickStructure.PrevSpectrumDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding previous spectrum digest") + } + prevUniverseDigest, err := decode32ByteDigestFromString(tickStructure.PrevUniverseDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding previous universe digest") + } + prevComputerDigest, err := decode32ByteDigestFromString(tickStructure.PrevComputerDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding previous computer digest") + } + txDigest, err := decode32ByteDigestFromString(tickStructure.TxDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding tx digest") + } + + for index, diff := range quorumData.QuorumDiffPerComputor { + + saltedResourceTestingDigest, err := convertHexToUint64(diff.SaltedResourceTestingDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding salted resource testing digest") + } + saltedSpectrumDigest, err := decode32ByteDigestFromString(diff.SaltedSpectrumDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding salted spectrum digest") + } + saltedUniverseDigest, err := decode32ByteDigestFromString(diff.SaltedUniverseDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding salted universe digest") + } + saltedComputerDigest, err := decode32ByteDigestFromString(diff.SaltedComputerDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding salted computer digest") + } + expectedNextTickTxDigest, err := decode32ByteDigestFromString(diff.ExpectedNextTickTxDigestHex) + if err != nil { + return nil, errors.Wrap(err, "decoding expected next tick transaction digest") + } + signature, err := decode64ByteDigestFromString(diff.SignatureHex) + if err != nil { + return nil, errors.Wrap(err, "decoding signature") + } + + computorVote := types.QuorumTickVote{ + ComputorIndex: uint16(index), + Epoch: uint16(tickStructure.Epoch), + Tick: tickStructure.TickNumber, + Millisecond: uint16(milli), + Second: uint8(tickTime.Second()), + Minute: uint8(tickTime.Minute()), + Hour: uint8(tickTime.Hour()), + Day: uint8(tickTime.Day()), + Month: uint8(tickTime.Month()), + Year: uint8(tickTime.Year() - 2000), + PreviousResourceTestingDigest: prevResourceTestingDigest, + SaltedResourceTestingDigest: saltedResourceTestingDigest, + PreviousSpectrumDigest: prevSpectrumDigest, + PreviousUniverseDigest: prevUniverseDigest, + PreviousComputerDigest: prevComputerDigest, + SaltedSpectrumDigest: saltedSpectrumDigest, + SaltedUniverseDigest: saltedUniverseDigest, + SaltedComputerDigest: saltedComputerDigest, + TxDigest: txDigest, + ExpectedNextTickTxDigest: expectedNextTickTxDigest, + Signature: signature, + } + + votes = append(votes, computorVote) + + } + return votes, nil +} diff --git a/validator/quorum/models_test.go b/validator/quorum/models_test.go index 5ab2569..a5d550d 100644 --- a/validator/quorum/models_test.go +++ b/validator/quorum/models_test.go @@ -97,7 +97,7 @@ func TestQubicModelToProto(t *testing.T) { } // Invoke the function under test - result := qubicToProto(mockData) + result := QubicToProto(mockData) if diff := cmp.Diff(expectedProtoQuorumTickData, result, cmpopts.IgnoreUnexported(protobuff.QuorumTickData{}, protobuff.QuorumTickStructure{}, protobuff.QuorumDiff{})); diff != "" { t.Errorf("Unexpected result: %v", diff) diff --git a/validator/quorum/validator.go b/validator/quorum/validator.go index e721707..6e2e486 100644 --- a/validator/quorum/validator.go +++ b/validator/quorum/validator.go @@ -156,14 +156,14 @@ func getDigestFromQuorumTickData(data types.QuorumTickVote) ([32]byte, error) { } func Store(ctx context.Context, store *store.PebbleStore, tickNumber uint32, quorumVotes types.QuorumVotes) error { - protoModel := qubicToProtoStored(quorumVotes) + protoModel := QubicToProtoStored(quorumVotes) err := store.SetQuorumTickData(ctx, tickNumber, protoModel) if err != nil { return errors.Wrap(err, "set quorum votes") } - fullProtoModel := qubicToProto(quorumVotes) + fullProtoModel := QubicToProto(quorumVotes) err = store.SetQuorumDataForCurrentEpochInterval(fullProtoModel.QuorumTickStructure.Epoch, fullProtoModel) if err != nil { diff --git a/validator/sync.go b/validator/sync.go new file mode 100644 index 0000000..36ab876 --- /dev/null +++ b/validator/sync.go @@ -0,0 +1,235 @@ +package validator + +import ( + "cmp" + "fmt" + "github.com/pingcap/errors" + "github.com/qubic/go-archiver/protobuff" + "github.com/qubic/go-archiver/store" + "github.com/qubic/go-archiver/validator/chain" + "github.com/qubic/go-archiver/validator/quorum" + "github.com/qubic/go-archiver/validator/tick" + "github.com/qubic/go-archiver/validator/tx" + "github.com/qubic/go-node-connector/types" + "log" + "runtime" + "slices" + "sync" + "time" +) + +type ValidatedTick struct { + AlignedVotes *protobuff.QuorumTickDataStored + TickData *protobuff.TickData + ValidTransactions []*protobuff.Transaction + ApprovedTransactions *protobuff.TickTransactionsStatus + ChainHash [32]byte + StoreHash [32]byte + + firstVote types.QuorumTickVote + validTransactionsQubic []types.Transaction +} + +type ValidatedTicks []*ValidatedTick + +type SyncValidator struct { + initialIntervalTick uint32 + + computors types.Computors + ticks []*protobuff.SyncTickData + lastSynchronizedTick *protobuff.SyncLastSynchronizedTick + + pebbleStore *store.PebbleStore + processTickTimeout time.Duration +} + +func NewSyncValidator(initialIntervalTick uint32, computors types.Computors, ticks []*protobuff.SyncTickData, processTickTimeout time.Duration, pebbleStore *store.PebbleStore, lastSynchronizedTick *protobuff.SyncLastSynchronizedTick) *SyncValidator { + + return &SyncValidator{ + initialIntervalTick: initialIntervalTick, + computors: computors, + ticks: ticks, + + lastSynchronizedTick: lastSynchronizedTick, + + pebbleStore: pebbleStore, + processTickTimeout: processTickTimeout, + } +} + +func (sv *SyncValidator) Validate() (ValidatedTicks, error) { + + /*ctx, cancel := context.WithTimeout(context.Background(), sv.processTickTimeout) + defer cancel()*/ + + var validatedTicks ValidatedTicks + counter := 0 + mutex := sync.RWMutex{} + + routineCount := runtime.NumCPU() + batchSize := len(sv.ticks) / routineCount + errChannel := make(chan error, routineCount) + var waitGroup sync.WaitGroup + startTime := time.Now() + + for index := range routineCount { + waitGroup.Add(1) + + start := batchSize * index + end := start + batchSize + if end > (len(sv.ticks)) || index == (routineCount-1) { + end = len(sv.ticks) + } + + tickRange := sv.ticks[start:end] + + go func(errChanel chan<- error) { + defer waitGroup.Done() + log.Printf("[Routine %d] Validating tick range %d - %d\n", index, start, end) + + for _, tickInfo := range tickRange { + + log.Printf("[Routine %d] Validating tick %d \n", index, tickInfo.QuorumData.QuorumTickStructure.TickNumber) + + quorumVotes, err := quorum.ProtoToQubic(tickInfo.QuorumData) + if err != nil { + errChannel <- errors.Wrap(err, "converting quorum data to qubic format") + return + } + + alignedVotes, err := quorum.Validate(nil, GoSchnorrqVerify, quorumVotes, sv.computors) + if err != nil { + errChannel <- errors.Wrap(err, "validating quorum") + return + } + + log.Printf("Quorum validated. Aligned %d. Misaligned %d.\n", len(alignedVotes), len(quorumVotes)-len(alignedVotes)) + + tickData, err := tick.ProtoToQubic(tickInfo.TickData) + if err != nil { + errChannel <- errors.Wrapf(err, "converting tick data to qubic format") + return + } + + if tickInfo.QuorumData.QuorumTickStructure.Epoch < 124 { + + fullTickData, err := tick.ProtoToQubicFull(tickInfo.TickData) + if err != nil { + errChanel <- errors.Wrap(err, "converting tick data to qubic format") + return + } + + err = fullTickData.Validate(nil, GoSchnorrqVerify, alignedVotes[0], sv.computors) + if err != nil { + errChanel <- errors.Wrap(err, "validating full tick data") + return + } + } else { + err := tick.Validate(nil, GoSchnorrqVerify, tickData, alignedVotes[0], sv.computors) + if err != nil { + errChanel <- errors.Wrap(err, "validating tick data") + return + } + } + + log.Println("Tick data validated") + + transactions, err := tx.ProtoToQubic(tickInfo.Transactions) + if err != nil { + errChannel <- errors.Wrap(err, "converting transactions to qubic format") + return + } + + log.Printf("Validating %d transactions\n", len(transactions)) + + validTransactions, err := tx.Validate(nil, GoSchnorrqVerify, transactions, tickData) + if err != nil { + errChannel <- errors.Wrap(err, "validating transactions") + return + } + log.Printf("Validated %d transactions\n", len(validTransactions)) + + transactionsProto, err := tx.QubicToProto(validTransactions) + if err != nil { + errChannel <- errors.Wrap(err, "converting transactions to proto format") + return + } + + approvedTransactions := &protobuff.TickTransactionsStatus{ + Transactions: tickInfo.TransactionsStatus, + } + + mutex.Lock() + + validatedTick := ValidatedTick{ + AlignedVotes: quorum.QubicToProtoStored(alignedVotes), + TickData: tickInfo.TickData, + ValidTransactions: transactionsProto, + ApprovedTransactions: approvedTransactions, + + firstVote: alignedVotes[0], + validTransactionsQubic: validTransactions, + } + + validatedTicks = append(validatedTicks, &validatedTick) + counter += 1 + + mutex.Unlock() + } + + errChannel <- nil + + }(errChannel) + } + + waitGroup.Wait() + log.Printf("Done processing %d ticks. Took: %v\n", counter, time.Since(startTime)) + + for _ = range routineCount { + err := <-errChannel + if err != nil { + return nil, errors.Wrap(err, "processing ticks concurrently") + } + } + + slices.SortFunc(validatedTicks, func(a, b *ValidatedTick) int { + return cmp.Compare(a.AlignedVotes.QuorumTickStructure.TickNumber, b.AlignedVotes.QuorumTickStructure.TickNumber) + }) + + log.Printf("Computing chain and store digests...\n") + + var lastChainHash [32]byte + var lastStoreHash [32]byte + + if sv.initialIntervalTick <= sv.lastSynchronizedTick.TickNumber { + copy(lastChainHash[:], sv.lastSynchronizedTick.ChainHash) + copy(lastStoreHash[:], sv.lastSynchronizedTick.StoreHash) + } + + for _, validatedTick := range validatedTicks { + + if sv.lastSynchronizedTick.TickNumber == validatedTick.AlignedVotes.QuorumTickStructure.TickNumber { + continue + } + + fmt.Printf("Computing hashes for tick %d\r", validatedTick.AlignedVotes.QuorumTickStructure.TickNumber) + + chainHash, err := chain.ComputeCurrentTickDigest(nil, validatedTick.firstVote, lastChainHash) + if err != nil { + return nil, errors.Wrapf(err, "calculating chain digest for tick %d", validatedTick.AlignedVotes.QuorumTickStructure.TickNumber) + + } + storeHash, err := chain.ComputeCurrentTickStoreDigest(nil, validatedTick.validTransactionsQubic, validatedTick.ApprovedTransactions, lastStoreHash) + if err != nil { + return nil, errors.Wrapf(err, "calculating store digest for tich %d", validatedTick.AlignedVotes.QuorumTickStructure.TickNumber) + } + + validatedTick.ChainHash = chainHash + validatedTick.StoreHash = storeHash + + lastChainHash = chainHash + lastStoreHash = storeHash + } + + return validatedTicks, nil +} diff --git a/validator/tick/empty_tick.go b/validator/tick/empty_tick.go index 1114237..c71d1b8 100644 --- a/validator/tick/empty_tick.go +++ b/validator/tick/empty_tick.go @@ -60,7 +60,7 @@ func CheckIfTickIsEmptyProto(tickData *protobuff.TickData) bool { } func CheckIfTickIsEmpty(tickData types.TickData) (bool, error) { - data, err := qubicToProto(tickData) + data, err := QubicToProto(tickData) if err != nil { return false, errors.Wrap(err, "converting tick data to protobuf format") } @@ -68,7 +68,7 @@ func CheckIfTickIsEmpty(tickData types.TickData) (bool, error) { return CheckIfTickIsEmptyProto(data), nil } -func CalculateEmptyTicksForAllEpochs(ps *store.PebbleStore) error { +func CalculateEmptyTicksForAllEpochs(ps *store.PebbleStore, force bool) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() @@ -81,11 +81,11 @@ func CalculateEmptyTicksForAllEpochs(ps *store.PebbleStore) error { for epoch, _ := range epochs { _, err := ps.GetEmptyTicksForEpoch(epoch) - if err == nil { + if err == nil && !force { return nil // We have the empty ticks } if !errors.Is(err, pebble.ErrNotFound) { - return errors.Wrap(err, "checking if epoch has empty ticks") // Some other error occured + return errors.Wrap(err, "checking if epoch has empty ticks") // Some other error occurred } fmt.Printf("Calculating empty ticks for epoch %d\n", epoch) diff --git a/validator/tick/full_tick_data.go b/validator/tick/full_tick_data.go new file mode 100644 index 0000000..034d248 --- /dev/null +++ b/validator/tick/full_tick_data.go @@ -0,0 +1,102 @@ +package tick + +import ( + "context" + "encoding/hex" + "github.com/pkg/errors" + "github.com/qubic/go-archiver/utils" + "github.com/qubic/go-node-connector/types" +) + +type FullTickData struct { + ComputorIndex uint16 + Epoch uint16 + Tick uint32 + Millisecond uint16 + Second uint8 + Minute uint8 + Hour uint8 + Day uint8 + Month uint8 + Year uint8 + UnionData [256]byte + Timelock [32]byte + TransactionDigests [types.NumberOfTransactionsPerTick][32]byte `json:",omitempty"` + ContractFees [1024]int64 `json:",omitempty"` + Signature [types.SignatureSize]byte +} + +func (ftd *FullTickData) IsEmpty() bool { + if ftd == nil { + return true + } + + return *ftd == FullTickData{} +} + +func (ftd *FullTickData) Validate(ctx context.Context, sigVerifierFunc utils.SigVerifierFunc, quorumTickVote types.QuorumTickVote, comps types.Computors) error { + //empty tick with empty quorum tx digest means other verification is not needed + if (ftd.IsEmpty()) && quorumTickVote.TxDigest == [32]byte{} { + return nil + } + + computorPubKey := comps.PubKeys[ftd.ComputorIndex] + + digest, err := ftd.getDigestFromTickData() + if err != nil { + return errors.Wrap(err, "getting partial tick data digest") + } + + // verify tick signature + err = sigVerifierFunc(ctx, computorPubKey, digest, ftd.Signature) + if err != nil { + return errors.Wrap(err, "verifying tick signature") + } + + fullDigest, err := ftd.getFullDigestFromTickData() + if err != nil { + return errors.Wrap(err, "getting full tick data digest") + } + + if fullDigest != quorumTickVote.TxDigest { + return errors.Wrapf(err, "quorum tx digest mismatch. full digest: %s. quorum tx digest: %s", hex.EncodeToString(fullDigest[:]), hex.EncodeToString(quorumTickVote.TxDigest[:])) + } + + return nil +} + +func (ftd *FullTickData) getDigestFromTickData() ([32]byte, error) { + // xor computor index with 8 + ftd.ComputorIndex ^= 8 + + sData, err := utils.BinarySerialize(ftd) + if err != nil { + return [32]byte{}, errors.Wrap(err, "serializing data") + } + + tickData := sData[:len(sData)-64] + digest, err := utils.K12Hash(tickData) + if err != nil { + return [32]byte{}, errors.Wrap(err, "hashing tick data") + } + + return digest, nil +} + +func (ftd *FullTickData) getFullDigestFromTickData() ([32]byte, error) { + // xor computor index with 8 + ftd.ComputorIndex ^= 8 + + sData, err := utils.BinarySerialize(ftd) + if err != nil { + return [32]byte{}, errors.Wrap(err, "serializing data") + } + + tickData := sData[:] + digest, err := utils.K12Hash(tickData) + if err != nil { + return [32]byte{}, errors.Wrap(err, "hashing tick data") + } + + return digest, nil +} diff --git a/validator/tick/models.go b/validator/tick/models.go index 558ae3e..f93a213 100644 --- a/validator/tick/models.go +++ b/validator/tick/models.go @@ -2,13 +2,14 @@ package tick import ( "encoding/hex" + "fmt" "github.com/pkg/errors" "github.com/qubic/go-archiver/protobuff" "github.com/qubic/go-node-connector/types" "time" ) -func qubicToProto(tickData types.TickData) (*protobuff.TickData, error) { +func QubicToProto(tickData types.TickData) (*protobuff.TickData, error) { if tickData.IsEmpty() { return nil, nil } @@ -117,3 +118,145 @@ func IsTickLastInAnyEpochInterval(tickNumber uint32, epoch uint32, intervals []* return false, -1, nil } + +func identitiesToDigests(identities []string) ([types.NumberOfTransactionsPerTick][32]byte, error) { + + var digests [types.NumberOfTransactionsPerTick][32]byte + + for index, identity := range identities { + + id := types.Identity(identity) + digest, err := id.ToPubKey(true) + if err != nil { + return [1024][32]byte{}, errors.Wrapf(err, "obtaining digest from transaction id %s", identity) + } + digests[index] = digest + } + + return digests, nil +} + +func contractFeesFromProto(feesProto []int64) ([1024]int64, error) { + + if len(feesProto) > 1024 { + return [1024]int64{}, errors.New(fmt.Sprintf("fees array length larger than maximum allowed: %d > 1024 ", len(feesProto))) + } + + var contractFees [1024]int64 + for index, fee := range feesProto { + contractFees[index] = fee + } + return contractFees, nil + +} + +func ProtoToQubic(tickData *protobuff.TickData) (types.TickData, error) { + + if CheckIfTickIsEmptyProto(tickData) { + return types.TickData{}, nil + } + + tickTime := time.UnixMilli(int64(tickData.Timestamp)).UTC() + tickTimeNoMilli := time.Date(tickTime.Year(), tickTime.Month(), tickTime.Day(), tickTime.Hour(), tickTime.Minute(), tickTime.Second(), 0, time.UTC) + milli := tickTime.UnixMilli() - tickTimeNoMilli.UnixMilli() + + var timeLock [32]byte + copy(timeLock[:], tickData.TimeLock[:]) + + transactionDigests, err := identitiesToDigests(tickData.TransactionIds) + if err != nil { + return types.TickData{}, errors.Wrap(err, "decoding transaction ids to digests") + } + + contractFees, err := contractFeesFromProto(tickData.ContractFees) + if err != nil { + return types.TickData{}, errors.Wrap(err, "converting contract fees") + } + + decodedSignature, err := hex.DecodeString(tickData.SignatureHex) + if err != nil { + return types.TickData{}, errors.Wrap(err, "decoding signature") + } + + var signature [types.SignatureSize]byte + copy(signature[:], decodedSignature[:]) + + data := types.TickData{ + ComputorIndex: uint16(tickData.ComputorIndex), + Epoch: uint16(tickData.Epoch), + Tick: tickData.TickNumber, + Millisecond: uint16(milli), + Second: uint8(tickTime.Second()), + Minute: uint8(tickTime.Minute()), + Hour: uint8(tickTime.Hour()), + Day: uint8(tickTime.Day()), + Month: uint8(tickTime.Month()), + Year: uint8(tickTime.Year() - 2000), + Timelock: timeLock, + TransactionDigests: transactionDigests, + ContractFees: contractFees, + Signature: signature, + } + + return data, nil +} + +func ProtoToQubicFull(tickData *protobuff.TickData) (FullTickData, error) { + + qubicTickData, err := ProtoToQubic(tickData) + if err != nil { + return FullTickData{}, errors.Wrap(err, "converting tick data to qubic format") + } + + varStruct := tickData.VarStruct + var unionData [256]byte + copy(unionData[:], varStruct[:]) + + return FullTickData{ + ComputorIndex: qubicTickData.ComputorIndex, + Epoch: qubicTickData.Epoch, + Tick: qubicTickData.Tick, + Millisecond: qubicTickData.Millisecond, + Second: qubicTickData.Second, + Minute: qubicTickData.Minute, + Hour: qubicTickData.Hour, + Day: qubicTickData.Day, + Month: qubicTickData.Month, + Year: qubicTickData.Year, + Timelock: qubicTickData.Timelock, + UnionData: unionData, + TransactionDigests: qubicTickData.TransactionDigests, + ContractFees: qubicTickData.ContractFees, + Signature: qubicTickData.Signature, + }, nil +} + +func QubicFullToProto(tickData FullTickData) (*protobuff.TickData, error) { + + oldTickData := types.TickData{ + ComputorIndex: tickData.ComputorIndex, + Epoch: tickData.Epoch, + Tick: tickData.Tick, + Millisecond: tickData.Millisecond, + Second: tickData.Second, + Minute: tickData.Minute, + Hour: tickData.Hour, + Day: tickData.Day, + Month: tickData.Month, + Year: tickData.Year, + Timelock: tickData.Timelock, + TransactionDigests: tickData.TransactionDigests, + ContractFees: tickData.ContractFees, + Signature: tickData.Signature, + } + + proto, err := QubicToProto(oldTickData) + if err != nil { + return nil, errors.Wrapf(err, "qubic to proto") + } + + proto.VarStruct = tickData.UnionData[:] + + return proto, nil + +} diff --git a/validator/tick/models_test.go b/validator/tick/models_test.go index d025851..17ec9f3 100644 --- a/validator/tick/models_test.go +++ b/validator/tick/models_test.go @@ -85,7 +85,7 @@ func TestQubicToProto(t *testing.T) { SignatureHex: fillStringTo(128, "0102030405060708091001020304050607080910"), } - got, err := qubicToProto(qubicTickData) + got, err := QubicToProto(qubicTickData) if err != nil { t.Fatalf("qubicToProto() unexpected error: %v", err) } @@ -93,7 +93,7 @@ func TestQubicToProto(t *testing.T) { t.Fatalf("qubicToProto() mismatch (-got +want):\n%s", diff) } - got, err = qubicToProto(types.TickData{}) + got, err = QubicToProto(types.TickData{}) if err != nil { t.Fatalf("qubicToProto() unexpected error: %v", err) } diff --git a/validator/tick/validator.go b/validator/tick/validator.go index 23968b8..fa6c631 100644 --- a/validator/tick/validator.go +++ b/validator/tick/validator.go @@ -77,7 +77,7 @@ func getFullDigestFromTickData(data types.TickData) ([32]byte, error) { } func Store(ctx context.Context, store *store.PebbleStore, tickNumber uint32, tickData types.TickData) error { - protoTickData, err := qubicToProto(tickData) + protoTickData, err := QubicToProto(tickData) if err != nil { return errors.Wrap(err, "converting qubic tick data to proto") } diff --git a/validator/tx/models.go b/validator/tx/models.go index 22eb5f8..e7805d5 100644 --- a/validator/tx/models.go +++ b/validator/tx/models.go @@ -7,7 +7,7 @@ import ( "github.com/qubic/go-node-connector/types" ) -func qubicToProto(txs types.Transactions) ([]*protobuff.Transaction, error) { +func QubicToProto(txs types.Transactions) ([]*protobuff.Transaction, error) { protoTxs := make([]*protobuff.Transaction, len(txs)) for i, tx := range txs { txProto, err := txToProto(tx) @@ -55,3 +55,49 @@ func txToProto(tx types.Transaction) (*protobuff.Transaction, error) { TxId: txID.String(), }, nil } + +func ProtoToQubic(protoTransactions []*protobuff.Transaction) (types.Transactions, error) { + + transactions := types.Transactions{} + + for _, protoTransaction := range protoTransactions { + + sourceId := types.Identity(protoTransaction.SourceId) + sourcePubKey, err := sourceId.ToPubKey(false) + if err != nil { + return nil, errors.Wrap(err, "decoding source public key") + } + + destinationId := types.Identity(protoTransaction.DestId) + destinationPubKey, err := destinationId.ToPubKey(false) + if err != nil { + return nil, errors.Wrap(err, "decoding destination public key") + } + + input, err := hex.DecodeString(protoTransaction.InputHex) + if err != nil { + return nil, errors.Wrap(err, "decoding input hex") + } + + decodedSignature, err := hex.DecodeString(protoTransaction.SignatureHex) + if err != nil { + return nil, errors.Wrap(err, "decoding signature hex") + } + + var signature [types.SignatureSize]byte + copy(signature[:], decodedSignature[:]) + + transaction := types.Transaction{ + SourcePublicKey: sourcePubKey, + DestinationPublicKey: destinationPubKey, + Amount: protoTransaction.Amount, + Tick: protoTransaction.TickNumber, + InputType: uint16(protoTransaction.InputType), + InputSize: uint16(protoTransaction.InputSize), + Input: input, + Signature: signature, + } + transactions = append(transactions, transaction) + } + return transactions, nil +} diff --git a/validator/tx/models_test.go b/validator/tx/models_test.go index 49f438e..1c00853 100644 --- a/validator/tx/models_test.go +++ b/validator/tx/models_test.go @@ -80,7 +80,7 @@ func TestQubicToProto(t *testing.T) { }, } - got, err := qubicToProto(qubicTransactions) + got, err := QubicToProto(qubicTransactions) if err != nil { t.Fatalf("qubicToProto() unexpected error: %v", err) } diff --git a/validator/tx/validator.go b/validator/tx/validator.go index 4d832b4..58b2b7a 100644 --- a/validator/tx/validator.go +++ b/validator/tx/validator.go @@ -115,7 +115,7 @@ func Store(ctx context.Context, store *store.PebbleStore, tickNumber uint32, tra } func storeTickTransactions(ctx context.Context, store *store.PebbleStore, transactions types.Transactions) error { - protoModel, err := qubicToProto(transactions) + protoModel, err := QubicToProto(transactions) if err != nil { return errors.Wrap(err, "converting to proto") } diff --git a/validator/validator.go b/validator/validator.go index b99796c..6be02a2 100644 --- a/validator/validator.go +++ b/validator/validator.go @@ -164,33 +164,41 @@ func (v *Validator) ValidateTick(ctx context.Context, initialEpochTick, tickNumb } if isEmpty { - emptyTicks, err := v.store.GetEmptyTicksForEpoch(uint32(epoch)) + err = handleEmptyTick(v.store, tickNumber, uint32(epoch)) if err != nil { - if !errors.Is(err, pebble.ErrNotFound) { - return errors.Wrap(err, "getting empty ticks for current epoch") - } + return errors.Wrap(err, "handling empty tick") } + } + return nil +} - if emptyTicks == 0 { - fmt.Printf("Initializing empty ticks for epoch: %d\n", epoch) - err := v.store.SetEmptyTickListPerEpoch(uint32(epoch), make([]uint32, 0)) - if err != nil { - return errors.Wrapf(err, "initializing empty tick list for epoch %d", epoch) - } +func handleEmptyTick(pebbleStore *store.PebbleStore, tickNumber, epoch uint32) error { + emptyTicks, err := pebbleStore.GetEmptyTicksForEpoch(epoch) + if err != nil { + if !errors.Is(err, pebble.ErrNotFound) { + return errors.Wrap(err, "getting empty ticks for current epoch") } + } - emptyTicks += 1 - - err = v.store.SetEmptyTicksForEpoch(uint32(epoch), emptyTicks) + if emptyTicks == 0 { + fmt.Printf("Initializing empty ticks for epoch: %d\n", epoch) + err := pebbleStore.SetEmptyTickListPerEpoch(epoch, make([]uint32, 0)) if err != nil { - return errors.Wrap(err, "setting current ticks for current epoch") + return errors.Wrapf(err, "initializing empty tick list for epoch %d", epoch) } - fmt.Printf("Empty ticks for epoch %d: %d\n", epoch, emptyTicks) + } - err = v.store.AppendEmptyTickToEmptyTickListPerEpoch(uint32(epoch), tickNumber) - if err != nil { - return errors.Wrap(err, "appending tick to empty tick list") - } + emptyTicks += 1 + + err = pebbleStore.SetEmptyTicksForEpoch(epoch, emptyTicks) + if err != nil { + return errors.Wrap(err, "setting current ticks for current epoch") + } + fmt.Printf("Empty ticks for epoch %d: %d\n", epoch, emptyTicks) + + err = pebbleStore.AppendEmptyTickToEmptyTickListPerEpoch(epoch, tickNumber) + if err != nil { + return errors.Wrap(err, "appending tick to empty tick list") } return nil } From e2c15a71894c0b9c2b7e857a6fa5b2943681ab55 Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Thu, 5 Dec 2024 19:26:17 +0200 Subject: [PATCH 02/10] Fix store test --- store/store_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/store/store_test.go b/store/store_test.go index 09d575b..d04c03a 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -664,14 +664,14 @@ func TestPebbleStore_LastProcessedTickIntervals(t *testing.T) { err = store.AppendProcessedTickInterval(ctx, firstEpochInitialTick.Epoch, expected.Intervals[0]) require.NoError(t, err) - got, err := store.getProcessedTickIntervalsPerEpoch(ctx, firstEpochInitialTick.Epoch) + got, err := store.GetProcessedTickIntervalsPerEpoch(ctx, firstEpochInitialTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) err = store.SetLastProcessedTick(ctx, &firstEpochInitialTick) require.NoError(t, err) - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, firstEpochInitialTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, firstEpochInitialTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) @@ -688,7 +688,7 @@ func TestPebbleStore_LastProcessedTickIntervals(t *testing.T) { err = store.SetLastProcessedTick(ctx, &firstEpochSecondTick) require.NoError(t, err, "setting last processed tick") - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) @@ -711,14 +711,14 @@ func TestPebbleStore_LastProcessedTickIntervals(t *testing.T) { err = store.AppendProcessedTickInterval(ctx, firstEpochInitialTick.Epoch, expected.Intervals[1]) require.NoError(t, err) - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) err = store.SetLastProcessedTick(ctx, &firstEpochFourthTick) require.NoError(t, err, "setting last processed tick") - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) @@ -739,7 +739,7 @@ func TestPebbleStore_LastProcessedTickIntervals(t *testing.T) { err = store.SetLastProcessedTick(ctx, &firstEpochFifthTick) require.NoError(t, err, "setting last processed tick") - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, firstEpochSecondTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) @@ -758,14 +758,14 @@ func TestPebbleStore_LastProcessedTickIntervals(t *testing.T) { err = store.AppendProcessedTickInterval(ctx, secondEpochInitialTick.Epoch, expected.Intervals[0]) require.NoError(t, err) - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, secondEpochInitialTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, secondEpochInitialTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) err = store.SetLastProcessedTick(ctx, &secondEpochInitialTick) require.NoError(t, err) - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, secondEpochInitialTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, secondEpochInitialTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) @@ -782,7 +782,7 @@ func TestPebbleStore_LastProcessedTickIntervals(t *testing.T) { err = store.SetLastProcessedTick(ctx, &secondEpochSecondTick) require.NoError(t, err, "setting last processed tick") - got, err = store.getProcessedTickIntervalsPerEpoch(ctx, secondEpochSecondTick.Epoch) + got, err = store.GetProcessedTickIntervalsPerEpoch(ctx, secondEpochSecondTick.Epoch) require.NoError(t, err) require.True(t, proto.Equal(got, expected)) From c4fd2212027ada7ec22f96afbe896c0fbb3b56af Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Thu, 5 Dec 2024 20:06:35 +0200 Subject: [PATCH 03/10] Fix message --- processor/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/processor/sync.go b/processor/sync.go index d7164c9..d1756b3 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -350,7 +350,7 @@ func (sp *SyncProcessor) synchronize() error { elapsed := time.Since(duration) - log.Printf("Done processing %d ticks. Took: %v | Average time / tick: %v\n", sp.maxObjectRequest, elapsed, elapsed.Seconds()/float64(sp.maxObjectRequest)) + log.Printf("Done processing %d ticks. Took: %v | Average time / tick: %v\n", len(processedTicks), elapsed, elapsed.Seconds()/float64(sp.maxObjectRequest)) } } From 7dcf6b5d97e869417cf22f69beb72ff3883138ca Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Thu, 5 Dec 2024 23:55:16 +0200 Subject: [PATCH 04/10] Skip fetching missing tick --- processor/sync.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/processor/sync.go b/processor/sync.go index d1756b3..75c884e 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -404,6 +404,10 @@ func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.Syn end = endTick } + if end == 15959704 { + end = 15959703 + } + go func(errChannel chan<- error) { defer waitGroup.Done() From ce2c5e2d01e0f036bead0b746736168a97437c53 Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Tue, 10 Dec 2024 21:35:27 +0200 Subject: [PATCH 05/10] Implement fetching from multiple sources. --- main.go | 4 +- processor/processor.go | 2 +- processor/sync.go | 101 +++++++++++++++++++++++++++++------------ 3 files changed, 74 insertions(+), 33 deletions(-) diff --git a/main.go b/main.go index 29e0e30..c1cf07e 100644 --- a/main.go +++ b/main.go @@ -55,7 +55,7 @@ func run() error { } Sync struct { Enable bool `conf:"default:false"` - Source string `conf:"default:localhost:8001"` + Sources []string `conf:"default:localhost:8001"` ResponseTimeout time.Duration `conf:"default:5s"` EnableCompression bool `conf:"default:true"` } @@ -191,7 +191,7 @@ func run() error { syncConfiguration := processor.SyncConfiguration{ Enable: cfg.Sync.Enable, - Source: cfg.Sync.Source, + Sources: cfg.Sync.Sources, ResponseTimeout: cfg.Sync.ResponseTimeout, EnableCompression: cfg.Sync.EnableCompression, } diff --git a/processor/processor.go b/processor/processor.go index e12e44b..9c76d51 100644 --- a/processor/processor.go +++ b/processor/processor.go @@ -27,7 +27,7 @@ func (e *TickInTheFutureError) Error() string { type SyncConfiguration struct { Enable bool - Source string + Sources []string ResponseTimeout time.Duration EnableCompression bool } diff --git a/processor/sync.go b/processor/sync.go index 75c884e..60b26fd 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -20,6 +20,7 @@ import ( "google.golang.org/protobuf/proto" "io" "log" + "math/rand" "runtime" "slices" "sync" @@ -28,7 +29,7 @@ import ( type SyncProcessor struct { syncConfiguration SyncConfiguration - syncServiceClient protobuff.SyncServiceClient + syncServiceClients []protobuff.SyncServiceClient pebbleStore *store.PebbleStore syncDelta SyncDelta processTickTimeout time.Duration @@ -41,24 +42,56 @@ func NewSyncProcessor(syncConfiguration SyncConfiguration, pebbleStore *store.Pe syncConfiguration: syncConfiguration, pebbleStore: pebbleStore, processTickTimeout: processTickTimeout, + syncServiceClients: make([]protobuff.SyncServiceClient, 0), } } +func (sp *SyncProcessor) getRandomClient() (protobuff.SyncServiceClient, error) { + + if len(sp.syncServiceClients) == 0 { + return nil, errors.New("no bootstrap connections available") + } + + r := rand.New(rand.NewSource(time.Now().Unix())) + index := r.Intn(len(sp.syncServiceClients)) + + return sp.syncServiceClients[index], nil +} + func (sp *SyncProcessor) Start() error { - log.Printf("Connecting to bootstrap node %s...", sp.syncConfiguration.Source) + grpcConnections := make([]*grpc.ClientConn, 0) + + fmt.Println("Sync sources:") + for _, source := range sp.syncConfiguration.Sources { + grpcConnection, err := grpc.NewClient(source, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return errors.Wrap(err, "creating grpc connection to bootstrap") + } + fmt.Println(source) + + grpcConnections = append(grpcConnections, grpcConnection) + + syncServiceClient := protobuff.NewSyncServiceClient(grpcConnection) + sp.syncServiceClients = append(sp.syncServiceClients, syncServiceClient) + + } + + defer func() { + for _, grpcConnection := range grpcConnections { + grpcConnection.Close() + } + }() - grpcConnection, err := grpc.NewClient(sp.syncConfiguration.Source, grpc.WithTransportCredentials(insecure.NewCredentials())) + metadataClient, err := sp.getRandomClient() if err != nil { - return errors.Wrap(err, "creating grpc connection to bootstrap") + return errors.Wrap(err, "getting random sync client") } - defer grpcConnection.Close() - syncServiceClient := protobuff.NewSyncServiceClient(grpcConnection) - sp.syncServiceClient = syncServiceClient + log.Printf("Connecting to random bootstrap node...\n") log.Println("Fetching bootstrap metadata...") - bootstrapMetadata, err := sp.getBootstrapMetadata() + bootstrapMetadata, err := sp.getBootstrapMetadata(metadataClient) if err != nil { return err } @@ -89,7 +122,7 @@ func (sp *SyncProcessor) Start() error { } log.Println("Synchronizing missing epoch information...") - err = sp.syncEpochInfo(syncDelta, bootstrapMetadata) + err = sp.syncEpochInfo(syncDelta, metadataClient) if err != nil { return errors.Wrap(err, "syncing epoch info") } @@ -105,11 +138,11 @@ func (sp *SyncProcessor) Start() error { return nil } -func (sp *SyncProcessor) getBootstrapMetadata() (*protobuff.SyncMetadataResponse, error) { +func (sp *SyncProcessor) getBootstrapMetadata(syncClient protobuff.SyncServiceClient) (*protobuff.SyncMetadataResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), sp.syncConfiguration.ResponseTimeout) defer cancel() - metadata, err := sp.syncServiceClient.SyncGetBootstrapMetadata(ctx, nil) + metadata, err := syncClient.SyncGetBootstrapMetadata(ctx, nil) if err != nil { return nil, errors.Wrap(err, "getting bootstrap metadata") } @@ -232,15 +265,7 @@ func (sp *SyncProcessor) storeEpochInfo(response *protobuff.SyncEpochInfoRespons return nil } -func (sp *SyncProcessor) syncEpochInfo(delta SyncDelta, metadata *protobuff.SyncMetadataResponse) error { - - // TODO: remove skipped tick intervals from proto file - /*err := sp.pebbleStore.SetSkippedTickIntervalList(&protobuff.SkippedTicksIntervalList{ - SkippedTicks: metadata.SkippedTickIntervals, - }) - if err != nil { - return errors.Wrap(err, "saving skipped tick intervals from bootstrap") - }*/ +func (sp *SyncProcessor) syncEpochInfo(delta SyncDelta, syncClient protobuff.SyncServiceClient) error { var epochs []uint32 @@ -252,7 +277,7 @@ func (sp *SyncProcessor) syncEpochInfo(delta SyncDelta, metadata *protobuff.Sync defer cancel() - stream, err := sp.syncServiceClient.SyncGetEpochInformation(ctx, &protobuff.SyncEpochInfoRequest{Epochs: epochs}) + stream, err := syncClient.SyncGetEpochInformation(ctx, &protobuff.SyncEpochInfoRequest{Epochs: epochs}) if err != nil { return errors.Wrap(err, "fetching epoch info") } @@ -278,6 +303,10 @@ func (sp *SyncProcessor) synchronize() error { for _, epochDelta := range sp.syncDelta { + /*if epochDelta.Epoch != 128 { + continue + }*/ + if sp.lastSynchronizedTick.Epoch > epochDelta.Epoch { continue } @@ -413,7 +442,12 @@ func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.Syn log.Printf("[Routine %d] Fetching tick range %d - %d", index, start, end) - stream, err := sp.syncServiceClient.SyncGetTickInformation(ctx, &protobuff.SyncTickInfoRequest{ + randomClient, err := sp.getRandomClient() + if err != nil { + errChannel <- errors.Wrap(err, "getting random sync client") + } + + stream, err := randomClient.SyncGetTickInformation(ctx, &protobuff.SyncTickInfoRequest{ FirstTick: start, LastTick: end, }, compression) @@ -488,7 +522,7 @@ func (sp *SyncProcessor) processTicks(tickInfoResponses []*protobuff.SyncTickDat return validatedTicks, nil } -func (sp *SyncProcessor) storeTicks(validatedTicks validator.ValidatedTicks, epoch uint32, processedTickIntervalsPerEpoch *protobuff.ProcessedTickIntervalsPerEpoch, initialTickInterval uint32) (*protobuff.SyncLastSynchronizedTick, error) { +func (sp *SyncProcessor) storeTicks(validatedTicks validator.ValidatedTicks, epoch uint32, processedTickIntervalsPerEpoch *protobuff.ProcessedTickIntervalsPerEpoch, initialIntervalTick uint32) (*protobuff.SyncLastSynchronizedTick, error) { if epoch == 0 { return nil, errors.Errorf("epoch is 0") @@ -622,15 +656,22 @@ func (sp *SyncProcessor) storeTicks(validatedTicks validator.ValidatedTicks, epo } if len(processedTickIntervalsPerEpoch.Intervals) == 0 { - processedTickIntervalsPerEpoch = &protobuff.ProcessedTickIntervalsPerEpoch{ - Epoch: epoch, - Intervals: []*protobuff.ProcessedTickInterval{ - { - InitialProcessedTick: initialTickInterval, - LastProcessedTick: lastSynchronizedTick.TickNumber, - }, + processedTickIntervalsPerEpoch.Intervals = []*protobuff.ProcessedTickInterval{ + { + InitialProcessedTick: initialIntervalTick, + LastProcessedTick: lastSynchronizedTick.TickNumber, }, } + + } + fmt.Printf("Initial: %d | Last: %d", initialIntervalTick, lastSynchronizedTick.TickNumber) + time.Sleep(5 * time.Second) + + if initialIntervalTick > lastSynchronizedTick.TickNumber { + processedTickIntervalsPerEpoch.Intervals = append(processedTickIntervalsPerEpoch.Intervals, &protobuff.ProcessedTickInterval{ + InitialProcessedTick: initialIntervalTick, + LastProcessedTick: lastSynchronizedTick.TickNumber, + }) } else { processedTickIntervalsPerEpoch.Intervals[len(processedTickIntervalsPerEpoch.Intervals)-1].LastProcessedTick = lastSynchronizedTick.TickNumber } From d9820728ecc10be0f51aba596b9b27e04db65ddd Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Tue, 10 Dec 2024 22:58:26 +0200 Subject: [PATCH 06/10] Properly handle new epoch interval. --- processor/sync.go | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/processor/sync.go b/processor/sync.go index 60b26fd..8421c2a 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -303,10 +303,6 @@ func (sp *SyncProcessor) synchronize() error { for _, epochDelta := range sp.syncDelta { - /*if epochDelta.Epoch != 128 { - continue - }*/ - if sp.lastSynchronizedTick.Epoch > epochDelta.Epoch { continue } @@ -340,6 +336,8 @@ func (sp *SyncProcessor) synchronize() error { for _, interval := range epochDelta.ProcessedIntervals { + fmt.Printf("Processing range [%d - %d]\n", interval.InitialProcessedTick, interval.LastProcessedTick) + initialIntervalTick := interval.InitialProcessedTick if initialIntervalTick > sp.lastSynchronizedTick.TickNumber { @@ -662,20 +660,17 @@ func (sp *SyncProcessor) storeTicks(validatedTicks validator.ValidatedTicks, epo LastProcessedTick: lastSynchronizedTick.TickNumber, }, } - } - fmt.Printf("Initial: %d | Last: %d", initialIntervalTick, lastSynchronizedTick.TickNumber) - time.Sleep(5 * time.Second) - if initialIntervalTick > lastSynchronizedTick.TickNumber { + if processedTickIntervalsPerEpoch.Intervals[len(processedTickIntervalsPerEpoch.Intervals)-1].InitialProcessedTick != initialIntervalTick { processedTickIntervalsPerEpoch.Intervals = append(processedTickIntervalsPerEpoch.Intervals, &protobuff.ProcessedTickInterval{ InitialProcessedTick: initialIntervalTick, LastProcessedTick: lastSynchronizedTick.TickNumber, }) - } else { - processedTickIntervalsPerEpoch.Intervals[len(processedTickIntervalsPerEpoch.Intervals)-1].LastProcessedTick = lastSynchronizedTick.TickNumber } + processedTickIntervalsPerEpoch.Intervals[len(processedTickIntervalsPerEpoch.Intervals)-1].LastProcessedTick = lastSynchronizedTick.TickNumber + processedTickIntervalsPerEpochKey := store.AssembleKey(store.ProcessedTickIntervals, epoch) serializedData, err = proto.Marshal(processedTickIntervalsPerEpoch) if err != nil { From 6963c42d05b4e46387fed1b35bb986bd74596b39 Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Fri, 13 Dec 2024 22:47:46 +0200 Subject: [PATCH 07/10] Implement retry mechanism, max number of connections, status endpoint. Improve version matching and add documentation. --- README.md | 86 ++++-- main.go | 32 ++- processor/processor.go | 3 +- processor/sync.go | 304 +++++++++++++++++---- processor/sync_test.go | 3 +- protobuff/sync.pb.go | 552 +++++++++++++++++++++++++++++--------- protobuff/sync.pb.gw.go | 123 +++++++++ protobuff/sync.proto | 32 ++- protobuff/sync_grpc.pb.go | 90 +++++++ rpc/rpc_server.go | 28 +- rpc/sync.go | 91 ++++++- store/event_listener.go | 34 ++- utils/utils.go | 2 + validator/sync.go | 12 +- 14 files changed, 1152 insertions(+), 240 deletions(-) diff --git a/README.md b/README.md index b59a42a..32591a3 100644 --- a/README.md +++ b/README.md @@ -23,26 +23,80 @@ This can be configured using the `QUBIC_NODES_QUBIC_PEER_LIST` environment varia ## Other optional configuration parameters for qubic-archiver can be specified as env variable by adding them to docker compose: ```bash - $QUBIC_ARCHIVER_SERVER_READ_TIMEOUT (default: 5s) - $QUBIC_ARCHIVER_SERVER_WRITE_TIMEOUT (default: 5s) - $QUBIC_ARCHIVER_SERVER_SHUTDOWN_TIMEOUT (default: 5s) - $QUBIC_ARCHIVER_SERVER_HTTP_HOST (default: 0.0.0.0:8000) - $QUBIC_ARCHIVER_SERVER_GRPC_HOST (default: 0.0.0.0:8001) - $QUBIC_ARCHIVER_SERVER_NODE_SYNC_THRESHOLD (default: 3) - $QUBIC_ARCHIVER_SERVER_CHAIN_TICK_FETCH_URL (default: http://127.0.0.1:8080/max-tick) + $QUBIC_ARCHIVER_SERVER_READ_TIMEOUT (default: 5s) + $QUBIC_ARCHIVER_SERVER_WRITE_TIMEOUT (default: 5s) + $QUBIC_ARCHIVER_SERVER_SHUTDOWN_TIMEOUT (default: 5s) + $QUBIC_ARCHIVER_SERVER_HTTP_HOST (default: 0.0.0.0:8000) + $QUBIC_ARCHIVER_SERVER_GRPC_HOST (default: 0.0.0.0:8001) + $QUBIC_ARCHIVER_SERVER_NODE_SYNC_THRESHOLD (default: 3) + $QUBIC_ARCHIVER_SERVER_CHAIN_TICK_FETCH_URL (default: http://127.0.0.1:8080/max-tick) + + $QUBIC_ARCHIVER_POOL_NODE_FETCHER_URL (default: http://127.0.0.1:8080/status) + $QUBIC_ARCHIVER_POOL_NODE_FETCHER_TIMEOUT (default: 2s) + $QUBIC_ARCHIVER_POOL_INITIAL_CAP (default: 5) + $QUBIC_ARCHIVER_POOL_MAX_IDLE (default: 20) + $QUBIC_ARCHIVER_POOL_MAX_CAP (default: 30) + $QUBIC_ARCHIVER_POOL_IDLE_TIMEOUT (default: 15s) + + $QUBIC_ARCHIVER_QUBIC_NODE_PORT (default: 21841) + $QUBIC_ARCHIVER_QUBIC_STORAGE_FOLDER (default: store) + $QUBIC_ARCHIVER_QUBIC_PROCESS_TICK_TIMEOUT (default: 5s) - $QUBIC_ARCHIVER_POOL_NODE_FETCHER_URL (default: http://127.0.0.1:8080/status) - $QUBIC_ARCHIVER_POOL_NODE_FETCHER_TIMEOUT (default: 2s) - $QUBIC_ARCHIVER_POOL_INITIAL_CAP (default: 5) - $QUBIC_ARCHIVER_POOL_MAX_IDLE (default: 20) - $QUBIC_ARCHIVER_POOL_MAX_CAP (default: 30) - $QUBIC_ARCHIVER_POOL_IDLE_TIMEOUT (default: 15s) + $QUBIC_ARCHIVER_STORE_RESET_EMPTY_TICK_KEYS (default: false) - $QUBIC_ARCHIVER_QUBIC_NODE_PORT (default: 21841) - $QUBIC_ARCHIVER_QUBIC_STORAGE_FOLDER (default: store) - $QUBIC_ARCHIVER_QUBIC_PROCESS_TICK_TIMEOUT (default: 5s) + $QUBIC_ARCHIVER_SYNC_ENABLE (default: false) + $QUBIC_ARCHIVER_SYNC_SOURCES ,[string...] (default: localhost:8001) // TODO: To be changed with official bootstrap node list + $QUBIC_ARCHIVER_SYNC_RESPONSE_TIMEOUT (default: 5s) // TODO: Review implementation + $QUBIC_ARCHIVER_SYNC_ENABLE_COMPRESSION (default: true) + $QUBIC_ARCHIVER_SYNC_RETRY_COUNT (default: 10) + + $QUBIC_ARCHIVER_BOOTSTRAP_ENABLE (default: true) + $QUBIC_ARCHIVER_BOOTSTRAP_MAX_REQUESTED_ITEMS (default: 1000) + $QUBIC_ARCHIVER_BOOTSTRAP_MAX_CONCURRENT_CONNECTIONS (default: 30) // TODO: Figure out the optimal count + $QUBIC_ARCHIVER_BOOTSTRAP_BATCH_SIZE (default: 10) ``` +## Peer to Peer data synchronization + +Archiver supports data synchronization between nodes. +Nodes can be configured to either synchronize from other nodes (client nodes), or provide information to other nodes once they are up to date (bootstrap nodes). + +### Overview +Upon starting a node with the `QUBIC_ARCHIVER_SYNC_ENABLE` path variable set to `true` it will start synchronizing information from the bootstrap nodes specified using the `QUBIC_ARCHIVER_SYNC_SOURCES` variable. +The synchronization works as follows: +1. The client will attempt to establish a connection to the specified bootstrap nodes. +2. The client will verify that it's version is compatible with each node. Incompatible nodes will not be used to synchronize information. +3. From one of the nodes, metadata will be fetched in order to calculate the synchronization delta, or the difference in information between the client and the bootstrap, as well as how many ticks the bootstrap can provide per request. +4. Missing epoch related information such as the computor list will be fetched and saved. +5. The client will fetch missing tick ranges in batches. This is done concurrently in order to lower the synchronization duration. +6. Upon fetching a batch of ticks, they are cryptographically verified to ensure data accuracy. This is also done concurrently to save time. +7. After verification is finished, the ticks are saved to the database, and the last two steps are repeated until all the information has been synchronized. +8. After the synchronization is finished, the client will resume to normal operation and synchronize the current epoch directly from the Qubic network. + +Depending on the hardware and network conditions, the synchronization duration can vary. +Out tests show that a machine with a 16 core, 5Ghz CPU can synchronize an epoch in about 20 - 30 minutes at a rate of 3 - 4 thousand ticks per minute. +Storage speed is also a factor to consider, and in some cases may become a bottleneck. + +>[!WARNING] +> It is not recommended to synchronize a node from zero close to the epoch transition. +> While synchronization of past epochs may finish before the transition, synchronization from the network itself is a couple of times slower, thus the current epoch may not be synchronized in time. + +### Configuration + +#### Client + +- `QUBIC_ARCHIVER_SYNC_ENABLE`: Whether to enable the synchronization feature or not. +- `QUBIC_ARCHIVER_SYNC_SOURCES`: The list of bootstrap nodes to fetch from. +- `QUBIC_ARCHIVER_SYNC_RESPONSE_TIMEOUT`: **TODO** +- `QUBIC_ARCHIVER_SYNC_RETRY_COUNT`: The number of times to retry fetching a tick range, in the event that the bootstrap has reached the maximum number of connections. + +#### Bootstrap + +- `QUBIC_ARCHIVER_BOOTSTRAP_ENABLE`: Whether to enable the bootstrap functionality or not. +- `QUBIC_ARCHIVER_BOOTSTRAP_MAX_REQUESTED_ITEMS`: The maximum number of ticks per request. +- `QUBIC_ARCHIVER_BOOTSTRAP_MAX_CONCURRENT_CONNECTIONS`: The maximum number of concurrent connections across all clients. +- `QUBIC_ARCHIVER_BOOTSTRAP_BATCH_SIZE`: The number of ticks that are sent to the client at a time. + ## Run with docker-compose: ```bash diff --git a/main.go b/main.go index c1cf07e..956f7d2 100644 --- a/main.go +++ b/main.go @@ -58,11 +58,13 @@ func run() error { Sources []string `conf:"default:localhost:8001"` ResponseTimeout time.Duration `conf:"default:5s"` EnableCompression bool `conf:"default:true"` + RetryCount int `conf:"default:10"` } Bootstrap struct { - Enable bool `conf:"default:true"` - MaxRequestedItems int `conf:"default:100"` - BatchSize int `conf:"default:10"` + Enable bool `conf:"default:true"` + MaxRequestedItems int `conf:"default:1000"` + MaxConcurrentConnections int `conf:"default:30"` + BatchSize int `conf:"default:10"` } } @@ -175,12 +177,21 @@ func run() error { } bootstrapConfiguration := rpc.BootstrapConfiguration{ - Enable: cfg.Bootstrap.Enable, - MaximumRequestedItems: cfg.Bootstrap.MaxRequestedItems, - BatchSize: cfg.Bootstrap.BatchSize, + Enable: cfg.Bootstrap.Enable, + MaximumRequestedItems: cfg.Bootstrap.MaxRequestedItems, + BatchSize: cfg.Bootstrap.BatchSize, + MaxConcurrentConnections: cfg.Bootstrap.MaxConcurrentConnections, } - rpcServer := rpc.NewServer(cfg.Server.GrpcHost, cfg.Server.HttpHost, cfg.Server.NodeSyncThreshold, cfg.Server.ChainTickFetchUrl, ps, p, bootstrapConfiguration) + syncConfiguration := processor.SyncConfiguration{ + Enable: cfg.Sync.Enable, + Sources: cfg.Sync.Sources, + ResponseTimeout: cfg.Sync.ResponseTimeout, + EnableCompression: cfg.Sync.EnableCompression, + RetryCount: cfg.Sync.RetryCount, + } + + rpcServer := rpc.NewServer(cfg.Server.GrpcHost, cfg.Server.HttpHost, cfg.Server.NodeSyncThreshold, cfg.Server.ChainTickFetchUrl, ps, p, bootstrapConfiguration, syncConfiguration) err = rpcServer.Start() if err != nil { return errors.Wrap(err, "starting rpc server") @@ -189,13 +200,6 @@ func run() error { shutdown := make(chan os.Signal, 1) signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM) - syncConfiguration := processor.SyncConfiguration{ - Enable: cfg.Sync.Enable, - Sources: cfg.Sync.Sources, - ResponseTimeout: cfg.Sync.ResponseTimeout, - EnableCompression: cfg.Sync.EnableCompression, - } - proc := processor.NewProcessor(p, ps, cfg.Qubic.ProcessTickTimeout, syncConfiguration) procErrors := make(chan error, 1) diff --git a/processor/processor.go b/processor/processor.go index 9c76d51..70dda49 100644 --- a/processor/processor.go +++ b/processor/processor.go @@ -30,6 +30,7 @@ type SyncConfiguration struct { Sources []string ResponseTimeout time.Duration EnableCompression bool + RetryCount int } type Processor struct { @@ -51,7 +52,7 @@ func NewProcessor(p *qubic.Pool, ps *store.PebbleStore, processTickTimeout time. func (p *Processor) Start() error { if p.SyncConfiguration.Enable { - syncProcessor := NewSyncProcessor(p.SyncConfiguration, p.ps, p.processTickTimeout) + syncProcessor := NewSyncProcessor(p.SyncConfiguration, p.ps) err := syncProcessor.Start() if err != nil { return errors.Wrap(err, "performing synchronization") diff --git a/processor/sync.go b/processor/sync.go index 8421c2a..a58f1f1 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -8,6 +8,7 @@ import ( "github.com/cockroachdb/pebble" "github.com/pkg/errors" "github.com/qubic/go-archiver/protobuff" + "github.com/qubic/go-archiver/store" "github.com/qubic/go-archiver/utils" "github.com/qubic/go-archiver/validator" @@ -23,26 +24,128 @@ import ( "math/rand" "runtime" "slices" + "strings" "sync" "time" ) +type SyncStatus struct { + NodeVersion string + BootstrapAddresses []string + Delta SyncDelta + LastSynchronizedTick *protobuff.SyncLastSynchronizedTick + CurrentEpoch uint32 + CurrentTickRange *protobuff.ProcessedTickInterval + AverageTicksPerMinute int + LastFetchDuration float32 + LastValidationDuration float32 + LastStoreDuration float32 + LastTotalDuration float32 + ObjectRequestCount uint32 + FetchRoutineCount int + ValidationRoutineCount int +} + +type SyncStatusMutex struct { + mutex sync.RWMutex + Status *SyncStatus +} + +func (ssm *SyncStatusMutex) setLastSynchronizedTick(lastSynchronizedTick *protobuff.SyncLastSynchronizedTick) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.LastSynchronizedTick = lastSynchronizedTick +} + +func (ssm *SyncStatusMutex) setCurrentEpoch(epoch uint32) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.CurrentEpoch = epoch +} + +func (ssm *SyncStatusMutex) setCurrentTickRange(currentTickRange *protobuff.ProcessedTickInterval) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.CurrentTickRange = currentTickRange +} + +func (ssm *SyncStatusMutex) setAverageTicksPerMinute(tickCount int) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.AverageTicksPerMinute = tickCount +} + +func (ssm *SyncStatusMutex) setLastFetchDuration(seconds float32) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.LastFetchDuration = seconds +} + +func (ssm *SyncStatusMutex) setLastValidationDuration(seconds float32) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.LastValidationDuration = seconds +} + +func (ssm *SyncStatusMutex) setLastStoreDuration(seconds float32) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.LastStoreDuration = seconds +} + +func (ssm *SyncStatusMutex) setLastTotalDuration(seconds float32) { + ssm.mutex.Lock() + defer ssm.mutex.Unlock() + ssm.Status.LastTotalDuration = seconds +} + +func (ssm *SyncStatusMutex) Get() SyncStatus { + ssm.mutex.RLock() + defer ssm.mutex.RUnlock() + + return SyncStatus{ + NodeVersion: ssm.Status.NodeVersion, + BootstrapAddresses: ssm.Status.BootstrapAddresses, + Delta: ssm.Status.Delta, + LastSynchronizedTick: proto.Clone(ssm.Status.LastSynchronizedTick).(*protobuff.SyncLastSynchronizedTick), + CurrentEpoch: ssm.Status.CurrentEpoch, + CurrentTickRange: proto.Clone(ssm.Status.CurrentTickRange).(*protobuff.ProcessedTickInterval), + AverageTicksPerMinute: ssm.Status.AverageTicksPerMinute, + LastFetchDuration: ssm.Status.LastFetchDuration, + LastValidationDuration: ssm.Status.LastValidationDuration, + LastStoreDuration: ssm.Status.LastStoreDuration, + LastTotalDuration: ssm.Status.LastTotalDuration, + ObjectRequestCount: ssm.Status.ObjectRequestCount, + FetchRoutineCount: ssm.Status.FetchRoutineCount, + ValidationRoutineCount: ssm.Status.ValidationRoutineCount, + } + +} + +var SynchronizationStatus *SyncStatusMutex + type SyncProcessor struct { - syncConfiguration SyncConfiguration - syncServiceClients []protobuff.SyncServiceClient - pebbleStore *store.PebbleStore - syncDelta SyncDelta - processTickTimeout time.Duration - maxObjectRequest uint32 - lastSynchronizedTick *protobuff.SyncLastSynchronizedTick + syncConfiguration SyncConfiguration + syncServiceClients []protobuff.SyncServiceClient + pebbleStore *store.PebbleStore + syncDelta SyncDelta + processTickTimeout time.Duration + maxObjectRequest uint32 + lastSynchronizedTick *protobuff.SyncLastSynchronizedTick + fetchRoutineCount int + validationRoutineCount int } -func NewSyncProcessor(syncConfiguration SyncConfiguration, pebbleStore *store.PebbleStore, processTickTimeout time.Duration) *SyncProcessor { +func NewSyncProcessor(syncConfiguration SyncConfiguration, pebbleStore *store.PebbleStore) *SyncProcessor { + + fetchRoutineCount := min(6, runtime.NumCPU()) + return &SyncProcessor{ - syncConfiguration: syncConfiguration, - pebbleStore: pebbleStore, - processTickTimeout: processTickTimeout, - syncServiceClients: make([]protobuff.SyncServiceClient, 0), + syncConfiguration: syncConfiguration, + pebbleStore: pebbleStore, + syncServiceClients: make([]protobuff.SyncServiceClient, 0), + fetchRoutineCount: fetchRoutineCount, + validationRoutineCount: runtime.NumCPU(), } } @@ -62,6 +165,13 @@ func (sp *SyncProcessor) Start() error { grpcConnections := make([]*grpc.ClientConn, 0) + var bootstrapMetadata *protobuff.SyncMetadataResponse + + clientMetadata, err := sp.getClientMetadata() + if err != nil { + return errors.Wrap(err, "getting client metadata") + } + fmt.Println("Sync sources:") for _, source := range sp.syncConfiguration.Sources { grpcConnection, err := grpc.NewClient(source, grpc.WithTransportCredentials(insecure.NewCredentials())) @@ -73,8 +183,33 @@ func (sp *SyncProcessor) Start() error { grpcConnections = append(grpcConnections, grpcConnection) syncServiceClient := protobuff.NewSyncServiceClient(grpcConnection) + + metadata, err := sp.getBootstrapMetadata(syncServiceClient) + if err != nil { + log.Printf("Unable to get metadata for bootstrap node %s. It will not be used.\n", source) + continue + } + + versionCheck, err := sp.checkVersionSupport(clientMetadata.ArchiverVersion, metadata.ArchiverVersion) + if err != nil { + log.Printf("Bootstrap node %s does not match client version. It will not be used: %v\n", source, err) + continue + } + + if !versionCheck { + log.Printf("Bootstrap node %s does not match client version. It will not be used. Client:%s, Bootstrap: %v\n", source, clientMetadata.ArchiverVersion, metadata.ArchiverVersion) + } + sp.syncServiceClients = append(sp.syncServiceClients, syncServiceClient) + if bootstrapMetadata == nil { + bootstrapMetadata = metadata + } + + if sp.maxObjectRequest == 0 { + sp.maxObjectRequest = uint32(bootstrapMetadata.MaxObjectRequest) + } + sp.maxObjectRequest = min(uint32(bootstrapMetadata.MaxObjectRequest), sp.maxObjectRequest) } defer func() { @@ -83,24 +218,14 @@ func (sp *SyncProcessor) Start() error { } }() - metadataClient, err := sp.getRandomClient() - if err != nil { - return errors.Wrap(err, "getting random sync client") - } - - log.Printf("Connecting to random bootstrap node...\n") - - log.Println("Fetching bootstrap metadata...") - bootstrapMetadata, err := sp.getBootstrapMetadata(metadataClient) - if err != nil { - return err + if len(sp.syncServiceClients) == 0 || bootstrapMetadata == nil { + log.Println("No suitable sync sources found, resuming to synchronizing current epoch.") + return nil } - sp.maxObjectRequest = uint32(bootstrapMetadata.MaxObjectRequest) - - clientMetadata, err := sp.getClientMetadata() + metadataClient, err := sp.getRandomClient() if err != nil { - return errors.Wrap(err, "getting client metadata") + return errors.Wrap(err, "getting random sync client") } lastSynchronizedTick, err := sp.pebbleStore.GetSyncLastSynchronizedTick() @@ -130,6 +255,20 @@ func (sp *SyncProcessor) Start() error { sp.syncDelta = syncDelta log.Println("Starting tick synchronization") + + SynchronizationStatus = &SyncStatusMutex{ + mutex: sync.RWMutex{}, + Status: &SyncStatus{ + NodeVersion: utils.ArchiverVersion, + BootstrapAddresses: sp.syncConfiguration.Sources, + Delta: sp.syncDelta, + LastSynchronizedTick: sp.lastSynchronizedTick, + ObjectRequestCount: sp.maxObjectRequest, + FetchRoutineCount: sp.fetchRoutineCount, + ValidationRoutineCount: sp.validationRoutineCount, + }, + } + err = sp.synchronize() if err != nil { return errors.Wrap(err, "performing synchronization") @@ -186,10 +325,6 @@ func areIntervalsEqual(a, b []*protobuff.ProcessedTickInterval) bool { func (sp *SyncProcessor) CalculateSyncDelta(bootstrapMetadata, clientMetadata *protobuff.SyncMetadataResponse, lastSynchronizedTick *protobuff.SyncLastSynchronizedTick) (SyncDelta, error) { - if bootstrapMetadata.ArchiverVersion != clientMetadata.ArchiverVersion { - return nil, errors.New(fmt.Sprintf("client version (%s) does not match bootstrap version (%s)", clientMetadata.ArchiverVersion, bootstrapMetadata.ArchiverVersion)) - } - bootstrapProcessedTicks := make(map[uint32][]*protobuff.ProcessedTickInterval) clientProcessedTicks := make(map[uint32][]*protobuff.ProcessedTickInterval) @@ -248,6 +383,27 @@ func (sp *SyncProcessor) CalculateSyncDelta(bootstrapMetadata, clientMetadata *p return syncDelta, nil } +func (sp *SyncProcessor) checkVersionSupport(clientVersion, bootstrapVersion string) (bool, error) { + + if clientVersion[0] != 'v' && bootstrapVersion[0] != 'v' { + return clientVersion == bootstrapVersion, nil + } + + clientSplit := strings.Split(clientVersion, ".") + bootstrapSplit := strings.Split(bootstrapVersion, ".") + + if len(clientSplit) != len(bootstrapSplit) { + return false, errors.Errorf("mismatch between client and bootstrap version format: client: %s, bootstrap: %s", clientVersion, bootstrapVersion) + } + + for index := 0; index < len(clientSplit)-1; index++ { + if clientSplit[index] != bootstrapSplit[index] { + return false, errors.Errorf("mismatch between client and bootstrap versions: client: %s, bootstrap: %s", clientVersion, bootstrapVersion) + } + } + return true, nil +} + func (sp *SyncProcessor) storeEpochInfo(response *protobuff.SyncEpochInfoResponse) error { for _, epoch := range response.Epochs { @@ -307,6 +463,8 @@ func (sp *SyncProcessor) synchronize() error { continue } + SynchronizationStatus.setCurrentEpoch(epochDelta.Epoch) + log.Printf("Synchronizing ticks for epoch %d...\n", epochDelta.Epoch) processedTickIntervalsForEpoch, err := sp.pebbleStore.GetProcessedTickIntervalsPerEpoch(nil, epochDelta.Epoch) @@ -336,6 +494,8 @@ func (sp *SyncProcessor) synchronize() error { for _, interval := range epochDelta.ProcessedIntervals { + SynchronizationStatus.setCurrentTickRange(interval) + fmt.Printf("Processing range [%d - %d]\n", interval.InitialProcessedTick, interval.LastProcessedTick) initialIntervalTick := interval.InitialProcessedTick @@ -358,26 +518,41 @@ func (sp *SyncProcessor) synchronize() error { endTick = interval.LastProcessedTick } - duration := time.Now() + start := time.Now() + + secondStart := time.Now() fetchedTicks, err := sp.fetchTicks(startTick, endTick) if err != nil { return errors.Wrapf(err, "fetching tick range %d - %d", startTick, endTick) } + SynchronizationStatus.setLastFetchDuration(float32(time.Since(secondStart).Seconds())) + secondStart = time.Now() processedTicks, err := sp.processTicks(fetchedTicks, initialIntervalTick, qubicComputors) if err != nil { return errors.Wrapf(err, "processing tick range %d - %d", startTick, endTick) } + SynchronizationStatus.setLastValidationDuration(float32(time.Since(secondStart).Seconds())) + secondStart = time.Now() + lastSynchronizedTick, err := sp.storeTicks(processedTicks, epochDelta.Epoch, processedTickIntervalsForEpoch, initialIntervalTick) - sp.lastSynchronizedTick = lastSynchronizedTick if err != nil { return errors.Wrapf(err, "storing processed tick range %d - %d", startTick, endTick) } + SynchronizationStatus.setLastStoreDuration(float32(time.Since(secondStart).Seconds())) + secondStart = time.Now() - elapsed := time.Since(duration) + sp.lastSynchronizedTick = lastSynchronizedTick + SynchronizationStatus.setLastSynchronizedTick(lastSynchronizedTick) + + elapsed := time.Since(start) + SynchronizationStatus.setLastTotalDuration(float32(elapsed.Seconds())) - log.Printf("Done processing %d ticks. Took: %v | Average time / tick: %v\n", len(processedTicks), elapsed, elapsed.Seconds()/float64(sp.maxObjectRequest)) + ticksPerMinute := int(float64(len(processedTicks)) / elapsed.Seconds() * 60) + SynchronizationStatus.setAverageTicksPerMinute(ticksPerMinute) + + log.Printf("Done processing %d ticks. Took: %v | Average time / tick: %v\n", len(processedTicks), elapsed, elapsed.Seconds()/float64(len(processedTicks))) } } @@ -396,6 +571,18 @@ func (sp *SyncProcessor) synchronize() error { return nil } +func (sp *SyncProcessor) performTickInfoRequest(ctx context.Context, randomClient protobuff.SyncServiceClient, compression grpc.CallOption, start, end uint32) (protobuff.SyncService_SyncGetTickInformationClient, error) { + + stream, err := randomClient.SyncGetTickInformation(ctx, &protobuff.SyncTickInfoRequest{ + FirstTick: start, + LastTick: end, + }, compression) + if err != nil { + return nil, errors.Wrap(err, "fetching tick information") + } + return stream, nil +} + func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.SyncTickData, error) { //TODO: We are currently fetching a large process of ticks, and using the default will cause the method to error before we are finished @@ -411,7 +598,7 @@ func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.Syn var responses []*protobuff.SyncTickData mutex := sync.RWMutex{} - routineCount := runtime.NumCPU() / 2 + routineCount := sp.fetchRoutineCount tickDifference := endTick - startTick batchSize := tickDifference / uint32(routineCount) errChannel := make(chan error, routineCount) @@ -440,21 +627,36 @@ func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.Syn log.Printf("[Routine %d] Fetching tick range %d - %d", index, start, end) - randomClient, err := sp.getRandomClient() - if err != nil { - errChannel <- errors.Wrap(err, "getting random sync client") - } + lastTime := time.Now() - stream, err := randomClient.SyncGetTickInformation(ctx, &protobuff.SyncTickInfoRequest{ - FirstTick: start, - LastTick: end, - }, compression) - if err != nil { - errChannel <- errors.Wrap(err, "fetching tick information") - return - } + var stream protobuff.SyncService_SyncGetTickInformationClient - lastTime := time.Now() + for i := 0; i < sp.syncConfiguration.RetryCount; i++ { + + if i == sp.syncConfiguration.RetryCount-1 { + errChannel <- errors.Errorf("failed to fetch tick range [%d - %d] after retrying %d times", start, end, sp.syncConfiguration.RetryCount) + return + } + + randomClient, err := sp.getRandomClient() + if err != nil { + errChannel <- errors.Wrap(err, "getting random sync client") + return + } + + s, err := sp.performTickInfoRequest(ctx, randomClient, compression, start, end) + if err != nil { + if errors.Is(err, utils.SyncMaxConnReachedErr) { + log.Printf("Failed to fetch tick range [%d - %d]: %v\n", start, end, err) + continue + } + errChannel <- errors.Wrapf(err, "fetching tick range [%d - %d]", start, end) + return + } + + stream = s + break + } for { data, err := stream.Recv() @@ -511,8 +713,8 @@ func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.Syn func (sp *SyncProcessor) processTicks(tickInfoResponses []*protobuff.SyncTickData, initialIntervalTick uint32, computors types.Computors) (validator.ValidatedTicks, error) { - syncValidator := validator.NewSyncValidator(initialIntervalTick, computors, tickInfoResponses, sp.processTickTimeout, sp.pebbleStore, sp.lastSynchronizedTick) - validatedTicks, err := syncValidator.Validate() + syncValidator := validator.NewSyncValidator(initialIntervalTick, computors, tickInfoResponses, sp.pebbleStore, sp.lastSynchronizedTick) + validatedTicks, err := syncValidator.Validate(sp.validationRoutineCount) if err != nil { return nil, errors.Wrap(err, "validating ticks") } diff --git a/processor/sync_test.go b/processor/sync_test.go index 470b552..1b537a8 100644 --- a/processor/sync_test.go +++ b/processor/sync_test.go @@ -3,12 +3,11 @@ package processor import ( "github.com/qubic/go-archiver/protobuff" "testing" - "time" ) func TestSyncProcessor_CalculateSyncDelta(t *testing.T) { - mockSyncProcessor := NewSyncProcessor(SyncConfiguration{}, nil, time.Second) + mockSyncProcessor := NewSyncProcessor(SyncConfiguration{}, nil) testData := []struct { name string diff --git a/protobuff/sync.pb.go b/protobuff/sync.pb.go index 1a800ba..b2b1cf6 100644 --- a/protobuff/sync.pb.go +++ b/protobuff/sync.pb.go @@ -7,6 +7,7 @@ package protobuff import ( + _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" @@ -461,119 +462,393 @@ func (x *SyncLastSynchronizedTick) GetStoreHash() []byte { return nil } +type SyncDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DeltaPerEpoch []*ProcessedTickIntervalsPerEpoch `protobuf:"bytes,1,rep,name=delta_per_epoch,json=deltaPerEpoch,proto3" json:"delta_per_epoch,omitempty"` +} + +func (x *SyncDelta) Reset() { + *x = SyncDelta{} + mi := &file_sync_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDelta) ProtoMessage() {} + +func (x *SyncDelta) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDelta.ProtoReflect.Descriptor instead. +func (*SyncDelta) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{8} +} + +func (x *SyncDelta) GetDeltaPerEpoch() []*ProcessedTickIntervalsPerEpoch { + if x != nil { + return x.DeltaPerEpoch + } + return nil +} + +type SyncStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeVersion string `protobuf:"bytes,1,opt,name=node_version,json=nodeVersion,proto3" json:"node_version,omitempty"` + BootstrapAddresses []string `protobuf:"bytes,2,rep,name=bootstrap_addresses,json=bootstrapAddresses,proto3" json:"bootstrap_addresses,omitempty"` + LastSynchronizedTick *SyncLastSynchronizedTick `protobuf:"bytes,3,opt,name=last_synchronized_tick,json=lastSynchronizedTick,proto3" json:"last_synchronized_tick,omitempty"` + CurrentEpoch uint32 `protobuf:"varint,4,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` + CurrentTickRange *ProcessedTickInterval `protobuf:"bytes,5,opt,name=current_tick_range,json=currentTickRange,proto3" json:"current_tick_range,omitempty"` + AverageTicksPerMinute int32 `protobuf:"varint,6,opt,name=average_ticks_per_minute,json=averageTicksPerMinute,proto3" json:"average_ticks_per_minute,omitempty"` + LastFetchDuration float32 `protobuf:"fixed32,7,opt,name=last_fetch_duration,json=lastFetchDuration,proto3" json:"last_fetch_duration,omitempty"` + LastValidationDuration float32 `protobuf:"fixed32,8,opt,name=last_validation_duration,json=lastValidationDuration,proto3" json:"last_validation_duration,omitempty"` + LastStoreDuration float32 `protobuf:"fixed32,9,opt,name=last_store_duration,json=lastStoreDuration,proto3" json:"last_store_duration,omitempty"` + LastTotalDuration float32 `protobuf:"fixed32,10,opt,name=last_total_duration,json=lastTotalDuration,proto3" json:"last_total_duration,omitempty"` + ObjectRequestCount int32 `protobuf:"varint,11,opt,name=object_request_count,json=objectRequestCount,proto3" json:"object_request_count,omitempty"` + FetchRoutineCount int32 `protobuf:"varint,12,opt,name=fetch_routine_count,json=fetchRoutineCount,proto3" json:"fetch_routine_count,omitempty"` + ValidationRoutineCount int32 `protobuf:"varint,13,opt,name=validation_routine_count,json=validationRoutineCount,proto3" json:"validation_routine_count,omitempty"` + CurrentCompactionCount int32 `protobuf:"varint,14,opt,name=current_compaction_count,json=currentCompactionCount,proto3" json:"current_compaction_count,omitempty"` + Delta *SyncDelta `protobuf:"bytes,15,opt,name=delta,proto3" json:"delta,omitempty"` +} + +func (x *SyncStatus) Reset() { + *x = SyncStatus{} + mi := &file_sync_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncStatus) ProtoMessage() {} + +func (x *SyncStatus) ProtoReflect() protoreflect.Message { + mi := &file_sync_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncStatus.ProtoReflect.Descriptor instead. +func (*SyncStatus) Descriptor() ([]byte, []int) { + return file_sync_proto_rawDescGZIP(), []int{9} +} + +func (x *SyncStatus) GetNodeVersion() string { + if x != nil { + return x.NodeVersion + } + return "" +} + +func (x *SyncStatus) GetBootstrapAddresses() []string { + if x != nil { + return x.BootstrapAddresses + } + return nil +} + +func (x *SyncStatus) GetLastSynchronizedTick() *SyncLastSynchronizedTick { + if x != nil { + return x.LastSynchronizedTick + } + return nil +} + +func (x *SyncStatus) GetCurrentEpoch() uint32 { + if x != nil { + return x.CurrentEpoch + } + return 0 +} + +func (x *SyncStatus) GetCurrentTickRange() *ProcessedTickInterval { + if x != nil { + return x.CurrentTickRange + } + return nil +} + +func (x *SyncStatus) GetAverageTicksPerMinute() int32 { + if x != nil { + return x.AverageTicksPerMinute + } + return 0 +} + +func (x *SyncStatus) GetLastFetchDuration() float32 { + if x != nil { + return x.LastFetchDuration + } + return 0 +} + +func (x *SyncStatus) GetLastValidationDuration() float32 { + if x != nil { + return x.LastValidationDuration + } + return 0 +} + +func (x *SyncStatus) GetLastStoreDuration() float32 { + if x != nil { + return x.LastStoreDuration + } + return 0 +} + +func (x *SyncStatus) GetLastTotalDuration() float32 { + if x != nil { + return x.LastTotalDuration + } + return 0 +} + +func (x *SyncStatus) GetObjectRequestCount() int32 { + if x != nil { + return x.ObjectRequestCount + } + return 0 +} + +func (x *SyncStatus) GetFetchRoutineCount() int32 { + if x != nil { + return x.FetchRoutineCount + } + return 0 +} + +func (x *SyncStatus) GetValidationRoutineCount() int32 { + if x != nil { + return x.ValidationRoutineCount + } + return 0 +} + +func (x *SyncStatus) GetCurrentCompactionCount() int32 { + if x != nil { + return x.CurrentCompactionCount + } + return 0 +} + +func (x *SyncStatus) GetDelta() *SyncDelta { + if x != nil { + return x.Delta + } + return nil +} + var File_sync_proto protoreflect.FileDescriptor var file_sync_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, - 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, - 0x68, 0x44, 0x61, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, - 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, - 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, - 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, - 0x72, 0x73, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x8b, 0x01, 0x0a, 0x23, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x71, - 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, - 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x54, - 0x69, 0x63, 0x6b, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x65, 0x72, - 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x52, 0x1e, - 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, - 0x74, 0x61, 0x50, 0x65, 0x72, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x22, 0xc7, - 0x02, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x40, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x54, - 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x4a, 0x0a, 0x0b, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, - 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, - 0x70, 0x62, 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, - 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, - 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x13, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, - 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, - 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x53, 0x79, 0x6e, - 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x72, 0x63, - 0x68, 0x69, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x10, - 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x73, 0x0a, 0x18, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x71, 0x75, 0x62, + 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x0d, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, 0x72, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, - 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, - 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x50, 0x65, 0x72, - 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x16, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, - 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x22, 0x2e, 0x0a, - 0x14, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x22, 0x59, 0x0a, - 0x15, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, - 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, - 0x52, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x22, 0x51, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, - 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x12, 0x1b, - 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x53, - 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x05, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x74, 0x69, 0x63, - 0x6b, 0x73, 0x22, 0x8f, 0x01, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x4c, 0x61, 0x73, 0x74, 0x53, - 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x12, - 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x69, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x48, 0x61, 0x73, 0x68, 0x32, 0xf6, 0x02, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x42, - 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, - 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x17, - 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, - 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, - 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x7d, - 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, + 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, 0x72, 0x73, + 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x8b, + 0x01, 0x0a, 0x23, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x71, 0x75, 0x6f, + 0x72, 0x75, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x71, + 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x63, + 0x6b, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x65, 0x72, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x52, 0x1e, 0x6c, 0x61, + 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, + 0x50, 0x65, 0x72, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x22, 0xc7, 0x02, 0x0a, + 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, + 0x09, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, + 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x69, 0x63, + 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x4a, 0x0a, 0x0b, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, + 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x0a, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, 0x0c, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x13, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x53, 0x79, 0x6e, 0x63, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x72, 0x63, 0x68, 0x69, + 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x73, 0x0a, 0x18, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x54, 0x69, + 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x52, 0x16, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x54, 0x69, + 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x22, 0x2e, 0x0a, 0x14, 0x53, + 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x22, 0x59, 0x0a, 0x15, 0x53, + 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, 0x52, 0x06, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x22, 0x51, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, + 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x53, 0x79, 0x6e, + 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3d, 0x0a, 0x05, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, + 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, + 0x63, 0x54, 0x69, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x74, 0x69, 0x63, 0x6b, 0x73, + 0x22, 0x8f, 0x01, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, + 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x69, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x65, + 0x70, 0x6f, 0x63, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x22, 0x6e, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, + 0x61, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x29, 0x5a, - 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x62, 0x69, - 0x63, 0x2f, 0x67, 0x6f, 0x2d, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x66, 0x2f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x54, 0x69, + 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x50, 0x65, 0x72, 0x45, 0x70, 0x6f, + 0x63, 0x68, 0x22, 0xe5, 0x06, 0x0a, 0x0a, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x13, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x12, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x79, + 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, + 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, + 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x69, 0x63, 0x6b, + 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x5e, 0x0a, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x63, 0x6b, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, + 0x54, 0x69, 0x63, 0x6b, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x12, 0x2e, + 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6c, 0x61, 0x73, + 0x74, 0x46, 0x65, 0x74, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, + 0x0a, 0x18, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x02, + 0x52, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x65, + 0x74, 0x63, 0x68, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x66, 0x65, 0x74, 0x63, 0x68, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x18, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3a, + 0x0a, 0x05, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, + 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x52, 0x05, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x32, 0xf6, 0x02, 0x0a, 0x0b, 0x53, + 0x79, 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x18, 0x53, 0x79, + 0x6e, 0x63, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, + 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x80, 0x01, 0x0a, 0x17, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x45, 0x70, 0x6f, + 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, + 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x61, + 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, + 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x45, + 0x70, 0x6f, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x30, 0x01, 0x12, 0x7d, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x54, + 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, + 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x54, + 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, + 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x54, + 0x69, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x30, 0x01, 0x32, 0x7c, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, + 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x25, 0x2e, 0x71, 0x75, 0x62, 0x69, 0x63, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, + 0x12, 0x0f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x2d, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x71, 0x75, 0x62, 0x69, 0x63, 0x2f, 0x67, 0x6f, 0x2d, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, + 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x66, 0x2f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -588,7 +863,7 @@ func file_sync_proto_rawDescGZIP() []byte { return file_sync_proto_rawDescData } -var file_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_sync_proto_goTypes = []any{ (*SyncEpochData)(nil), // 0: qubic.archiver.archive.pb.SyncEpochData (*SyncTickData)(nil), // 1: qubic.archiver.archive.pb.SyncTickData @@ -598,36 +873,45 @@ var file_sync_proto_goTypes = []any{ (*SyncTickInfoRequest)(nil), // 5: qubic.archiver.archive.pb.SyncTickInfoRequest (*SyncTickInfoResponse)(nil), // 6: qubic.archiver.archive.pb.SyncTickInfoResponse (*SyncLastSynchronizedTick)(nil), // 7: qubic.archiver.archive.pb.SyncLastSynchronizedTick - (*Computors)(nil), // 8: qubic.archiver.archive.pb.Computors - (*LastTickQuorumDataPerEpochIntervals)(nil), // 9: qubic.archiver.archive.pb.LastTickQuorumDataPerEpochIntervals - (*TickData)(nil), // 10: qubic.archiver.archive.pb.TickData - (*QuorumTickData)(nil), // 11: qubic.archiver.archive.pb.QuorumTickData - (*Transaction)(nil), // 12: qubic.archiver.archive.pb.Transaction - (*TransactionStatus)(nil), // 13: qubic.archiver.archive.pb.TransactionStatus - (*ProcessedTickIntervalsPerEpoch)(nil), // 14: qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch - (*emptypb.Empty)(nil), // 15: google.protobuf.Empty + (*SyncDelta)(nil), // 8: qubic.archiver.archive.pb.SyncDelta + (*SyncStatus)(nil), // 9: qubic.archiver.archive.pb.SyncStatus + (*Computors)(nil), // 10: qubic.archiver.archive.pb.Computors + (*LastTickQuorumDataPerEpochIntervals)(nil), // 11: qubic.archiver.archive.pb.LastTickQuorumDataPerEpochIntervals + (*TickData)(nil), // 12: qubic.archiver.archive.pb.TickData + (*QuorumTickData)(nil), // 13: qubic.archiver.archive.pb.QuorumTickData + (*Transaction)(nil), // 14: qubic.archiver.archive.pb.Transaction + (*TransactionStatus)(nil), // 15: qubic.archiver.archive.pb.TransactionStatus + (*ProcessedTickIntervalsPerEpoch)(nil), // 16: qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch + (*ProcessedTickInterval)(nil), // 17: qubic.archiver.archive.pb.ProcessedTickInterval + (*emptypb.Empty)(nil), // 18: google.protobuf.Empty } var file_sync_proto_depIdxs = []int32{ - 8, // 0: qubic.archiver.archive.pb.SyncEpochData.computor_list:type_name -> qubic.archiver.archive.pb.Computors - 9, // 1: qubic.archiver.archive.pb.SyncEpochData.last_tick_quorum_data_per_intervals:type_name -> qubic.archiver.archive.pb.LastTickQuorumDataPerEpochIntervals - 10, // 2: qubic.archiver.archive.pb.SyncTickData.tick_data:type_name -> qubic.archiver.archive.pb.TickData - 11, // 3: qubic.archiver.archive.pb.SyncTickData.quorum_data:type_name -> qubic.archiver.archive.pb.QuorumTickData - 12, // 4: qubic.archiver.archive.pb.SyncTickData.transactions:type_name -> qubic.archiver.archive.pb.Transaction - 13, // 5: qubic.archiver.archive.pb.SyncTickData.transactions_status:type_name -> qubic.archiver.archive.pb.TransactionStatus - 14, // 6: qubic.archiver.archive.pb.SyncMetadataResponse.processed_tick_intervals:type_name -> qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch + 10, // 0: qubic.archiver.archive.pb.SyncEpochData.computor_list:type_name -> qubic.archiver.archive.pb.Computors + 11, // 1: qubic.archiver.archive.pb.SyncEpochData.last_tick_quorum_data_per_intervals:type_name -> qubic.archiver.archive.pb.LastTickQuorumDataPerEpochIntervals + 12, // 2: qubic.archiver.archive.pb.SyncTickData.tick_data:type_name -> qubic.archiver.archive.pb.TickData + 13, // 3: qubic.archiver.archive.pb.SyncTickData.quorum_data:type_name -> qubic.archiver.archive.pb.QuorumTickData + 14, // 4: qubic.archiver.archive.pb.SyncTickData.transactions:type_name -> qubic.archiver.archive.pb.Transaction + 15, // 5: qubic.archiver.archive.pb.SyncTickData.transactions_status:type_name -> qubic.archiver.archive.pb.TransactionStatus + 16, // 6: qubic.archiver.archive.pb.SyncMetadataResponse.processed_tick_intervals:type_name -> qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch 0, // 7: qubic.archiver.archive.pb.SyncEpochInfoResponse.epochs:type_name -> qubic.archiver.archive.pb.SyncEpochData 1, // 8: qubic.archiver.archive.pb.SyncTickInfoResponse.ticks:type_name -> qubic.archiver.archive.pb.SyncTickData - 15, // 9: qubic.archiver.archive.pb.SyncService.SyncGetBootstrapMetadata:input_type -> google.protobuf.Empty - 3, // 10: qubic.archiver.archive.pb.SyncService.SyncGetEpochInformation:input_type -> qubic.archiver.archive.pb.SyncEpochInfoRequest - 5, // 11: qubic.archiver.archive.pb.SyncService.SyncGetTickInformation:input_type -> qubic.archiver.archive.pb.SyncTickInfoRequest - 2, // 12: qubic.archiver.archive.pb.SyncService.SyncGetBootstrapMetadata:output_type -> qubic.archiver.archive.pb.SyncMetadataResponse - 4, // 13: qubic.archiver.archive.pb.SyncService.SyncGetEpochInformation:output_type -> qubic.archiver.archive.pb.SyncEpochInfoResponse - 6, // 14: qubic.archiver.archive.pb.SyncService.SyncGetTickInformation:output_type -> qubic.archiver.archive.pb.SyncTickInfoResponse - 12, // [12:15] is the sub-list for method output_type - 9, // [9:12] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 16, // 9: qubic.archiver.archive.pb.SyncDelta.delta_per_epoch:type_name -> qubic.archiver.archive.pb.ProcessedTickIntervalsPerEpoch + 7, // 10: qubic.archiver.archive.pb.SyncStatus.last_synchronized_tick:type_name -> qubic.archiver.archive.pb.SyncLastSynchronizedTick + 17, // 11: qubic.archiver.archive.pb.SyncStatus.current_tick_range:type_name -> qubic.archiver.archive.pb.ProcessedTickInterval + 8, // 12: qubic.archiver.archive.pb.SyncStatus.delta:type_name -> qubic.archiver.archive.pb.SyncDelta + 18, // 13: qubic.archiver.archive.pb.SyncService.SyncGetBootstrapMetadata:input_type -> google.protobuf.Empty + 3, // 14: qubic.archiver.archive.pb.SyncService.SyncGetEpochInformation:input_type -> qubic.archiver.archive.pb.SyncEpochInfoRequest + 5, // 15: qubic.archiver.archive.pb.SyncService.SyncGetTickInformation:input_type -> qubic.archiver.archive.pb.SyncTickInfoRequest + 18, // 16: qubic.archiver.archive.pb.SyncClientService.SyncGetStatus:input_type -> google.protobuf.Empty + 2, // 17: qubic.archiver.archive.pb.SyncService.SyncGetBootstrapMetadata:output_type -> qubic.archiver.archive.pb.SyncMetadataResponse + 4, // 18: qubic.archiver.archive.pb.SyncService.SyncGetEpochInformation:output_type -> qubic.archiver.archive.pb.SyncEpochInfoResponse + 6, // 19: qubic.archiver.archive.pb.SyncService.SyncGetTickInformation:output_type -> qubic.archiver.archive.pb.SyncTickInfoResponse + 9, // 20: qubic.archiver.archive.pb.SyncClientService.SyncGetStatus:output_type -> qubic.archiver.archive.pb.SyncStatus + 17, // [17:21] is the sub-list for method output_type + 13, // [13:17] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name } func init() { file_sync_proto_init() } @@ -642,9 +926,9 @@ func file_sync_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sync_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 10, NumExtensions: 0, - NumServices: 1, + NumServices: 2, }, GoTypes: file_sync_proto_goTypes, DependencyIndexes: file_sync_proto_depIdxs, diff --git a/protobuff/sync.pb.gw.go b/protobuff/sync.pb.gw.go index 4828016..cd8fdac 100644 --- a/protobuff/sync.pb.gw.go +++ b/protobuff/sync.pb.gw.go @@ -100,6 +100,24 @@ func request_SyncService_SyncGetTickInformation_0(ctx context.Context, marshaler } +func request_SyncClientService_SyncGetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SyncClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := client.SyncGetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SyncClientService_SyncGetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SyncClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := server.SyncGetStatus(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterSyncServiceHandlerServer registers the http handlers for service SyncService to "mux". // UnaryRPC :call SyncServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -148,6 +166,40 @@ func RegisterSyncServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux return nil } +// RegisterSyncClientServiceHandlerServer registers the http handlers for service SyncClientService to "mux". +// UnaryRPC :call SyncClientServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSyncClientServiceHandlerFromEndpoint instead. +func RegisterSyncClientServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SyncClientServiceServer) error { + + mux.Handle("GET", pattern_SyncClientService_SyncGetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qubic.archiver.archive.pb.SyncClientService/SyncGetStatus", runtime.WithHTTPPathPattern("/v2/sync-status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SyncClientService_SyncGetStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncClientService_SyncGetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + // RegisterSyncServiceHandlerFromEndpoint is same as RegisterSyncServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterSyncServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { @@ -270,3 +322,74 @@ var ( forward_SyncService_SyncGetTickInformation_0 = runtime.ForwardResponseStream ) + +// RegisterSyncClientServiceHandlerFromEndpoint is same as RegisterSyncClientServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterSyncClientServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterSyncClientServiceHandler(ctx, mux, conn) +} + +// RegisterSyncClientServiceHandler registers the http handlers for service SyncClientService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterSyncClientServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterSyncClientServiceHandlerClient(ctx, mux, NewSyncClientServiceClient(conn)) +} + +// RegisterSyncClientServiceHandlerClient registers the http handlers for service SyncClientService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SyncClientServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SyncClientServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "SyncClientServiceClient" to call the correct interceptors. +func RegisterSyncClientServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SyncClientServiceClient) error { + + mux.Handle("GET", pattern_SyncClientService_SyncGetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qubic.archiver.archive.pb.SyncClientService/SyncGetStatus", runtime.WithHTTPPathPattern("/v2/sync-status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncClientService_SyncGetStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncClientService_SyncGetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_SyncClientService_SyncGetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v2", "sync-status"}, "")) +) + +var ( + forward_SyncClientService_SyncGetStatus_0 = runtime.ForwardResponseMessage +) diff --git a/protobuff/sync.proto b/protobuff/sync.proto index 7a01bb0..485a872 100644 --- a/protobuff/sync.proto +++ b/protobuff/sync.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package qubic.archiver.archive.pb; option go_package = "github.com/qubic/go-archiver/protobuff/"; +import "google/api/annotations.proto"; import "google/protobuf/empty.proto"; import "archive.proto"; @@ -48,7 +49,28 @@ message SyncLastSynchronizedTick { uint32 epoch = 2; bytes chain_hash = 3; bytes store_hash = 4; +} + +message SyncDelta { + repeated ProcessedTickIntervalsPerEpoch delta_per_epoch = 1; +} +message SyncStatus { + string node_version = 1; + repeated string bootstrap_addresses = 2; + SyncLastSynchronizedTick last_synchronized_tick = 3; + uint32 current_epoch = 4; + ProcessedTickInterval current_tick_range = 5; + int32 average_ticks_per_minute = 6; + float last_fetch_duration = 7; + float last_validation_duration = 8; + float last_store_duration = 9; + float last_total_duration = 10; + int32 object_request_count = 11; + int32 fetch_routine_count = 12; + int32 validation_routine_count = 13; + int32 current_compaction_count = 14; + SyncDelta delta = 15; } service SyncService { @@ -57,4 +79,12 @@ service SyncService { rpc SyncGetEpochInformation(SyncEpochInfoRequest) returns (stream SyncEpochInfoResponse) {}; rpc SyncGetTickInformation(SyncTickInfoRequest) returns (stream SyncTickInfoResponse) {}; -} \ No newline at end of file +} + +service SyncClientService { + rpc SyncGetStatus(google.protobuf.Empty) returns (SyncStatus) { + option (google.api.http) = { + get: "/v2/sync-status" + }; + }; +} diff --git a/protobuff/sync_grpc.pb.go b/protobuff/sync_grpc.pb.go index d428c57..01d2f76 100644 --- a/protobuff/sync_grpc.pb.go +++ b/protobuff/sync_grpc.pb.go @@ -237,3 +237,93 @@ var SyncService_ServiceDesc = grpc.ServiceDesc{ }, Metadata: "sync.proto", } + +const ( + SyncClientService_SyncGetStatus_FullMethodName = "/qubic.archiver.archive.pb.SyncClientService/SyncGetStatus" +) + +// SyncClientServiceClient is the client API for SyncClientService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SyncClientServiceClient interface { + SyncGetStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SyncStatus, error) +} + +type syncClientServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSyncClientServiceClient(cc grpc.ClientConnInterface) SyncClientServiceClient { + return &syncClientServiceClient{cc} +} + +func (c *syncClientServiceClient) SyncGetStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SyncStatus, error) { + out := new(SyncStatus) + err := c.cc.Invoke(ctx, SyncClientService_SyncGetStatus_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SyncClientServiceServer is the server API for SyncClientService service. +// All implementations must embed UnimplementedSyncClientServiceServer +// for forward compatibility +type SyncClientServiceServer interface { + SyncGetStatus(context.Context, *emptypb.Empty) (*SyncStatus, error) + mustEmbedUnimplementedSyncClientServiceServer() +} + +// UnimplementedSyncClientServiceServer must be embedded to have forward compatible implementations. +type UnimplementedSyncClientServiceServer struct { +} + +func (UnimplementedSyncClientServiceServer) SyncGetStatus(context.Context, *emptypb.Empty) (*SyncStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncGetStatus not implemented") +} +func (UnimplementedSyncClientServiceServer) mustEmbedUnimplementedSyncClientServiceServer() {} + +// UnsafeSyncClientServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SyncClientServiceServer will +// result in compilation errors. +type UnsafeSyncClientServiceServer interface { + mustEmbedUnimplementedSyncClientServiceServer() +} + +func RegisterSyncClientServiceServer(s grpc.ServiceRegistrar, srv SyncClientServiceServer) { + s.RegisterService(&SyncClientService_ServiceDesc, srv) +} + +func _SyncClientService_SyncGetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncClientServiceServer).SyncGetStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SyncClientService_SyncGetStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncClientServiceServer).SyncGetStatus(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// SyncClientService_ServiceDesc is the grpc.ServiceDesc for SyncClientService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SyncClientService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "qubic.archiver.archive.pb.SyncClientService", + HandlerType: (*SyncClientServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SyncGetStatus", + Handler: _SyncClientService_SyncGetStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sync.proto", +} diff --git a/rpc/rpc_server.go b/rpc/rpc_server.go index f6df899..6e5b1b4 100644 --- a/rpc/rpc_server.go +++ b/rpc/rpc_server.go @@ -6,6 +6,7 @@ import ( "encoding/json" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/pkg/errors" + "github.com/qubic/go-archiver/processor" "github.com/qubic/go-archiver/protobuff" "github.com/qubic/go-archiver/store" "github.com/qubic/go-archiver/validator/quorum" @@ -26,9 +27,10 @@ import ( ) type BootstrapConfiguration struct { - Enable bool - MaximumRequestedItems int - BatchSize int + Enable bool + MaximumRequestedItems int + BatchSize int + MaxConcurrentConnections int } var _ protobuff.ArchiveServiceServer = &Server{} @@ -49,9 +51,10 @@ type Server struct { store *store.PebbleStore pool *qubic.Pool bootstrapConfiguration BootstrapConfiguration + syncConfiguration processor.SyncConfiguration } -func NewServer(listenAddrGRPC, listenAddrHTTP string, syncThreshold int, chainTickUrl string, store *store.PebbleStore, pool *qubic.Pool, bootstrapConfiguration BootstrapConfiguration) *Server { +func NewServer(listenAddrGRPC, listenAddrHTTP string, syncThreshold int, chainTickUrl string, store *store.PebbleStore, pool *qubic.Pool, bootstrapConfiguration BootstrapConfiguration, syncConfiguration processor.SyncConfiguration) *Server { return &Server{ listenAddrGRPC: listenAddrGRPC, listenAddrHTTP: listenAddrHTTP, @@ -60,6 +63,7 @@ func NewServer(listenAddrGRPC, listenAddrHTTP string, syncThreshold int, chainTi store: store, pool: pool, bootstrapConfiguration: bootstrapConfiguration, + syncConfiguration: syncConfiguration, } } @@ -649,9 +653,12 @@ func (s *Server) Start() error { protobuff.RegisterArchiveServiceServer(srv, s) if s.bootstrapConfiguration.Enable { syncService := NewSyncService(s.store, s.bootstrapConfiguration) - protobuff.RegisterSyncServiceServer(srv, syncService) } + if s.syncConfiguration.Enable { + syncClientService := NewSyncClientService() + protobuff.RegisterSyncClientServiceServer(srv, syncClientService) + } reflection.Register(srv) lis, err := net.Listen("tcp", s.listenAddrGRPC) @@ -678,6 +685,17 @@ func (s *Server) Start() error { ), } + if s.syncConfiguration.Enable { + if err := protobuff.RegisterSyncClientServiceHandlerFromEndpoint( + context.Background(), + mux, + s.listenAddrGRPC, + opts, + ); err != nil { + panic(err) + } + } + if err := protobuff.RegisterArchiveServiceHandlerFromEndpoint( context.Background(), mux, diff --git a/rpc/sync.go b/rpc/sync.go index c92796d..490e471 100644 --- a/rpc/sync.go +++ b/rpc/sync.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "github.com/pkg/errors" + "github.com/qubic/go-archiver/processor" "github.com/qubic/go-archiver/protobuff" "github.com/qubic/go-archiver/store" "github.com/qubic/go-archiver/utils" @@ -13,20 +14,57 @@ import ( _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" + "sync" ) var _ protobuff.SyncServiceServer = &SyncService{} +type currentConnectionCount struct { + mutex sync.RWMutex + value int +} + +func (c *currentConnectionCount) getValue() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.value +} + +func (c *currentConnectionCount) increment() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.value += 1 +} + +func (c *currentConnectionCount) decrement() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.value -= 1 +} + +type SyncClientService struct { + protobuff.UnimplementedSyncClientServiceServer +} + +func NewSyncClientService() *SyncClientService { + return &SyncClientService{} +} + type SyncService struct { protobuff.UnimplementedSyncServiceServer store *store.PebbleStore bootstrapConfiguration BootstrapConfiguration + connectionCount currentConnectionCount } func NewSyncService(pebbleStore *store.PebbleStore, bootstrapConfiguration BootstrapConfiguration) *SyncService { return &SyncService{ bootstrapConfiguration: bootstrapConfiguration, store: pebbleStore, + connectionCount: currentConnectionCount{ + mutex: sync.RWMutex{}, + value: 0, + }, } } @@ -37,11 +75,6 @@ func (ss *SyncService) SyncGetBootstrapMetadata(ctx context.Context, _ *emptypb. return nil, status.Errorf(codes.Internal, "cannot get processed tick intervals: %v", err) } - /*skippedIntervals, err := ss.store.GetSkippedTicksInterval(ctx) - if err != nil { - return nil, status.Errorf(codes.Internal, "cannot get skipped tick intervals: %v", err) - }*/ - return &protobuff.SyncMetadataResponse{ ArchiverVersion: utils.ArchiverVersion, MaxObjectRequest: int32(ss.bootstrapConfiguration.MaximumRequestedItems), @@ -115,6 +148,13 @@ func (ss *SyncService) sendTickInformationResponse(ticks []*protobuff.SyncTickDa func (ss *SyncService) SyncGetTickInformation(req *protobuff.SyncTickInfoRequest, stream protobuff.SyncService_SyncGetTickInformationServer) error { + available := ss.bootstrapConfiguration.MaxConcurrentConnections - ss.connectionCount.getValue() + if available < 1 { + return utils.SyncMaxConnReachedErr + } + ss.connectionCount.increment() + defer ss.connectionCount.decrement() + tickDifference := int(req.LastTick - req.FirstTick) if tickDifference > ss.bootstrapConfiguration.MaximumRequestedItems || tickDifference < 0 { @@ -175,3 +215,44 @@ func (ss *SyncService) SyncGetTickInformation(req *protobuff.SyncTickInfoRequest } return nil } + +func (scs *SyncClientService) SyncGetStatus(ctx context.Context, _ *emptypb.Empty) (*protobuff.SyncStatus, error) { + + if processor.SynchronizationStatus == nil { + return nil, status.Errorf(codes.Unavailable, "synchronization status not available yet, please try again in a couple of minutes") + } + + syncStatus := processor.SynchronizationStatus.Get() + + delta := make([]*protobuff.ProcessedTickIntervalsPerEpoch, 0) + + for _, epochDelta := range syncStatus.Delta { + delta = append(delta, &protobuff.ProcessedTickIntervalsPerEpoch{ + Epoch: epochDelta.Epoch, + Intervals: epochDelta.ProcessedIntervals, + }) + } + + currentCompactions := store.CompactionCount.Get() + + return &protobuff.SyncStatus{ + NodeVersion: syncStatus.NodeVersion, + BootstrapAddresses: syncStatus.BootstrapAddresses, + LastSynchronizedTick: syncStatus.LastSynchronizedTick, + CurrentEpoch: syncStatus.CurrentEpoch, + CurrentTickRange: syncStatus.CurrentTickRange, + AverageTicksPerMinute: int32(syncStatus.AverageTicksPerMinute), + LastFetchDuration: syncStatus.LastFetchDuration, + LastValidationDuration: syncStatus.LastValidationDuration, + LastStoreDuration: syncStatus.LastStoreDuration, + LastTotalDuration: syncStatus.LastTotalDuration, + ObjectRequestCount: int32(syncStatus.ObjectRequestCount), + FetchRoutineCount: int32(syncStatus.FetchRoutineCount), + ValidationRoutineCount: int32(syncStatus.ValidationRoutineCount), + CurrentCompactionCount: int32(currentCompactions), + Delta: &protobuff.SyncDelta{ + DeltaPerEpoch: delta, + }, + }, nil + +} diff --git a/store/event_listener.go b/store/event_listener.go index 284d038..a111d54 100644 --- a/store/event_listener.go +++ b/store/event_listener.go @@ -3,8 +3,37 @@ package store import ( "github.com/cockroachdb/pebble" "log" + "sync" ) +type CompactionCountMutex struct { + mutex sync.RWMutex + value int +} + +func (ccm *CompactionCountMutex) increment() { + ccm.mutex.Lock() + defer ccm.mutex.Unlock() + ccm.value++ +} + +func (ccm *CompactionCountMutex) decrement() { + ccm.mutex.Lock() + defer ccm.mutex.Unlock() + ccm.value-- +} + +func (ccm *CompactionCountMutex) Get() int { + ccm.mutex.RLock() + defer ccm.mutex.RUnlock() + return ccm.value +} + +var CompactionCount = &CompactionCountMutex{ + mutex: sync.RWMutex{}, + value: 0, +} + type PebbleEventListener struct { pebble.EventListener } @@ -37,12 +66,13 @@ func compactionBegin(info pebble.CompactionInfo) { for _, level := range info.Input { log.Printf(" From Level %d - %s\n", level.Level, level.String()) } - log.Printf(" To level %d %ss\n", info.Output.Level, info.Output.String()) - + log.Printf(" To level %d %s\n", info.Output.Level, info.Output.String()) + CompactionCount.increment() } func compactionEnd(info pebble.CompactionInfo) { log.Printf("[PEBBLE]: Compaction with JobID %d ended. Took %v\n", info.JobID, info.TotalDuration) + CompactionCount.decrement() } func flushBegin(info pebble.FlushInfo) { diff --git a/utils/utils.go b/utils/utils.go index 072e930..ebbe2a8 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -10,6 +10,8 @@ import ( const ArchiverVersion = "dev" +var SyncMaxConnReachedErr = errors.New("max connection count reached, please try again later") + type SigVerifierFunc = func(ctx context.Context, pubkey [32]byte, digest [32]byte, sig [64]byte) error func K12Hash(data []byte) ([32]byte, error) { diff --git a/validator/sync.go b/validator/sync.go index 36ab876..2de63f1 100644 --- a/validator/sync.go +++ b/validator/sync.go @@ -12,7 +12,6 @@ import ( "github.com/qubic/go-archiver/validator/tx" "github.com/qubic/go-node-connector/types" "log" - "runtime" "slices" "sync" "time" @@ -43,7 +42,7 @@ type SyncValidator struct { processTickTimeout time.Duration } -func NewSyncValidator(initialIntervalTick uint32, computors types.Computors, ticks []*protobuff.SyncTickData, processTickTimeout time.Duration, pebbleStore *store.PebbleStore, lastSynchronizedTick *protobuff.SyncLastSynchronizedTick) *SyncValidator { +func NewSyncValidator(initialIntervalTick uint32, computors types.Computors, ticks []*protobuff.SyncTickData, pebbleStore *store.PebbleStore, lastSynchronizedTick *protobuff.SyncLastSynchronizedTick) *SyncValidator { return &SyncValidator{ initialIntervalTick: initialIntervalTick, @@ -52,21 +51,16 @@ func NewSyncValidator(initialIntervalTick uint32, computors types.Computors, tic lastSynchronizedTick: lastSynchronizedTick, - pebbleStore: pebbleStore, - processTickTimeout: processTickTimeout, + pebbleStore: pebbleStore, } } -func (sv *SyncValidator) Validate() (ValidatedTicks, error) { - - /*ctx, cancel := context.WithTimeout(context.Background(), sv.processTickTimeout) - defer cancel()*/ +func (sv *SyncValidator) Validate(routineCount int) (ValidatedTicks, error) { var validatedTicks ValidatedTicks counter := 0 mutex := sync.RWMutex{} - routineCount := runtime.NumCPU() batchSize := len(sv.ticks) / routineCount errChannel := make(chan error, routineCount) var waitGroup sync.WaitGroup From 251b0d968c99424945f0f5700e54e08fa4d03375 Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Mon, 16 Dec 2024 18:11:37 +0200 Subject: [PATCH 08/10] Update README.md and update default config values. --- README.md | 6 +++--- main.go | 6 +++--- processor/sync.go | 6 ++---- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 32591a3..ca71f08 100644 --- a/README.md +++ b/README.md @@ -46,13 +46,13 @@ This can be configured using the `QUBIC_NODES_QUBIC_PEER_LIST` environment varia $QUBIC_ARCHIVER_SYNC_ENABLE (default: false) $QUBIC_ARCHIVER_SYNC_SOURCES ,[string...] (default: localhost:8001) // TODO: To be changed with official bootstrap node list - $QUBIC_ARCHIVER_SYNC_RESPONSE_TIMEOUT (default: 5s) // TODO: Review implementation + $QUBIC_ARCHIVER_SYNC_RESPONSE_TIMEOUT (default: 1m) $QUBIC_ARCHIVER_SYNC_ENABLE_COMPRESSION (default: true) $QUBIC_ARCHIVER_SYNC_RETRY_COUNT (default: 10) $QUBIC_ARCHIVER_BOOTSTRAP_ENABLE (default: true) $QUBIC_ARCHIVER_BOOTSTRAP_MAX_REQUESTED_ITEMS (default: 1000) - $QUBIC_ARCHIVER_BOOTSTRAP_MAX_CONCURRENT_CONNECTIONS (default: 30) // TODO: Figure out the optimal count + $QUBIC_ARCHIVER_BOOTSTRAP_MAX_CONCURRENT_CONNECTIONS (default: 20) $QUBIC_ARCHIVER_BOOTSTRAP_BATCH_SIZE (default: 10) ``` @@ -87,7 +87,7 @@ Storage speed is also a factor to consider, and in some cases may become a bottl - `QUBIC_ARCHIVER_SYNC_ENABLE`: Whether to enable the synchronization feature or not. - `QUBIC_ARCHIVER_SYNC_SOURCES`: The list of bootstrap nodes to fetch from. -- `QUBIC_ARCHIVER_SYNC_RESPONSE_TIMEOUT`: **TODO** +- `QUBIC_ARCHIVER_SYNC_RESPONSE_TIMEOUT`: The maximum time fetching a tick batch should take. - `QUBIC_ARCHIVER_SYNC_RETRY_COUNT`: The number of times to retry fetching a tick range, in the event that the bootstrap has reached the maximum number of connections. #### Bootstrap diff --git a/main.go b/main.go index 956f7d2..c0929de 100644 --- a/main.go +++ b/main.go @@ -47,7 +47,7 @@ func run() error { } Qubic struct { NodePort string `conf:"default:21841"` - StorageFolder string `conf:"default:storage"` + StorageFolder string `conf:"default:store"` ProcessTickTimeout time.Duration `conf:"default:5s"` } Store struct { @@ -56,14 +56,14 @@ func run() error { Sync struct { Enable bool `conf:"default:false"` Sources []string `conf:"default:localhost:8001"` - ResponseTimeout time.Duration `conf:"default:5s"` + ResponseTimeout time.Duration `conf:"default:1m"` EnableCompression bool `conf:"default:true"` RetryCount int `conf:"default:10"` } Bootstrap struct { Enable bool `conf:"default:true"` MaxRequestedItems int `conf:"default:1000"` - MaxConcurrentConnections int `conf:"default:30"` + MaxConcurrentConnections int `conf:"default:20"` BatchSize int `conf:"default:10"` } } diff --git a/processor/sync.go b/processor/sync.go index a58f1f1..fe1fbc3 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -585,10 +585,8 @@ func (sp *SyncProcessor) performTickInfoRequest(ctx context.Context, randomClien func (sp *SyncProcessor) fetchTicks(startTick, endTick uint32) ([]*protobuff.SyncTickData, error) { - //TODO: We are currently fetching a large process of ticks, and using the default will cause the method to error before we are finished - //ctx, cancel := context.WithTimeout(context.Background(), sp.syncConfiguration.ResponseTimeout) - //defer cancel() - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), sp.syncConfiguration.ResponseTimeout) + defer cancel() var compression grpc.CallOption = grpc.EmptyCallOption{} if sp.syncConfiguration.EnableCompression { From ff77bb71b89781e2b7c37530b69124cc28f5dd25 Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Tue, 17 Dec 2024 20:23:13 +0200 Subject: [PATCH 09/10] Skip epoch 105 --- processor/sync.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/processor/sync.go b/processor/sync.go index fe1fbc3..984d9ff 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -459,6 +459,10 @@ func (sp *SyncProcessor) synchronize() error { for _, epochDelta := range sp.syncDelta { + if epochDelta.Epoch == 105 { + continue + } + if sp.lastSynchronizedTick.Epoch > epochDelta.Epoch { continue } From 1c9901f2e55a162c99367524f8e07f0f65fb99cf Mon Sep 17 00:00:00 2001 From: LINCKODE Date: Tue, 17 Dec 2024 21:25:08 +0200 Subject: [PATCH 10/10] Skip epoch 106 --- processor/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/processor/sync.go b/processor/sync.go index 984d9ff..baf690d 100644 --- a/processor/sync.go +++ b/processor/sync.go @@ -459,7 +459,7 @@ func (sp *SyncProcessor) synchronize() error { for _, epochDelta := range sp.syncDelta { - if epochDelta.Epoch == 105 { + if epochDelta.Epoch == 105 || epochDelta.Epoch == 106 { continue }