From 08a72827f5bb6fcd8040221e1233e11c41e5e759 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 11 Jun 2024 13:43:14 -0400 Subject: [PATCH] migrate x/sync to p2p Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/p2p/p2ptest/client.go | 80 +++ network/p2p/p2ptest/client_test.go | 110 ++++ proto/pb/sync/sync.pb.go | 847 +++++++++++++---------------- proto/sync/sync.proto | 8 - scripts/mocks.mockgen.source.txt | 1 - scripts/mocks.mockgen.txt | 1 - x/merkledb/mock_db.go | 491 ----------------- x/sync/client.go | 392 ------------- x/sync/client_test.go | 785 +++----------------------- x/sync/manager.go | 500 +++++++++++++---- x/sync/mock_client.go | 72 --- x/sync/network_client.go | 368 ------------- x/sync/network_server.go | 314 ++++------- x/sync/network_server_test.go | 371 +++++-------- x/sync/response_handler.go | 41 -- x/sync/sync_test.go | 559 +++++++++++-------- x/sync/syncmock/network_client.go | 129 ----- 17 files changed, 1618 insertions(+), 3451 deletions(-) create mode 100644 network/p2p/p2ptest/client.go create mode 100644 network/p2p/p2ptest/client_test.go delete mode 100644 x/merkledb/mock_db.go delete mode 100644 x/sync/client.go delete mode 100644 x/sync/mock_client.go delete mode 100644 x/sync/network_client.go delete mode 100644 x/sync/response_handler.go delete mode 100644 x/sync/syncmock/network_client.go diff --git a/network/p2p/p2ptest/client.go b/network/p2p/p2ptest/client.go new file mode 100644 index 00000000000..747904b40ff --- /dev/null +++ b/network/p2p/p2ptest/client.go @@ -0,0 +1,80 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2ptest + +import ( + "context" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +// NewClient generates a client-server pair and returns the client used to +// communicate with a server with the specified handler +func NewClient(t *testing.T, rootCtx context.Context, handler p2p.Handler) *p2p.Client { + clientSender := &enginetest.Sender{} + serverSender := &enginetest.Sender{} + + clientNodeID := ids.GenerateTestNodeID() + clientNetwork, err := p2p.NewNetwork(logging.NoLog{}, clientSender, prometheus.NewRegistry(), "") + require.NoError(t, err) + + serverNodeID := ids.GenerateTestNodeID() + serverNetwork, err := p2p.NewNetwork(logging.NoLog{}, serverSender, prometheus.NewRegistry(), "") + require.NoError(t, err) + + clientSender.SendAppGossipF = func(ctx context.Context, _ common.SendConfig, gossipBytes []byte) error { + go func() { + require.NoError(t, serverNetwork.AppGossip(ctx, clientNodeID, gossipBytes)) + }() + + return nil + } + + clientSender.SendAppRequestF = func(ctx context.Context, _ set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + // Send the request asynchronously to avoid deadlock when the server + // sends the response back to the client + go func() { + require.NoError(t, serverNetwork.AppRequest(ctx, clientNodeID, requestID, time.Time{}, requestBytes)) + }() + + return nil + } + + serverSender.SendAppResponseF = func(ctx context.Context, _ ids.NodeID, requestID uint32, responseBytes []byte) error { + go func() { + require.NoError(t, clientNetwork.AppResponse(ctx, serverNodeID, requestID, responseBytes)) + }() + + return nil + } + + serverSender.SendAppErrorF = func(ctx context.Context, _ ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + go func() { + require.NoError(t, clientNetwork.AppRequestFailed(ctx, serverNodeID, requestID, &common.AppError{ + Code: errorCode, + Message: errorMessage, + })) + }() + + return nil + } + + require.NoError(t, clientNetwork.Connected(rootCtx, clientNodeID, nil)) + require.NoError(t, clientNetwork.Connected(rootCtx, serverNodeID, nil)) + require.NoError(t, serverNetwork.Connected(rootCtx, clientNodeID, nil)) + require.NoError(t, serverNetwork.Connected(rootCtx, serverNodeID, nil)) + + require.NoError(t, serverNetwork.AddHandler(0, handler)) + return clientNetwork.NewClient(0) +} diff --git a/network/p2p/p2ptest/client_test.go b/network/p2p/p2ptest/client_test.go new file mode 100644 index 00000000000..cef624aaccb --- /dev/null +++ b/network/p2p/p2ptest/client_test.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2ptest + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestNewClient_AppGossip(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + appGossipChan := make(chan struct{}) + testHandler := p2p.TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + close(appGossipChan) + }, + } + + client := NewClient(t, ctx, testHandler) + require.NoError(client.AppGossip(ctx, common.SendConfig{}, []byte("foobar"))) + <-appGossipChan +} + +func TestNewClient_AppRequest(t *testing.T) { + tests := []struct { + name string + appResponse []byte + appErr error + appRequestF func(ctx context.Context, client *p2p.Client, onResponse p2p.AppResponseCallback) error + }{ + { + name: "AppRequest - response", + appResponse: []byte("foobar"), + appRequestF: func(ctx context.Context, client *p2p.Client, onResponse p2p.AppResponseCallback) error { + return client.AppRequest(ctx, set.Of(ids.GenerateTestNodeID()), []byte("foo"), onResponse) + }, + }, + { + name: "AppRequest - error", + appErr: &common.AppError{ + Code: 123, + Message: "foobar", + }, + appRequestF: func(ctx context.Context, client *p2p.Client, onResponse p2p.AppResponseCallback) error { + return client.AppRequest(ctx, set.Of(ids.GenerateTestNodeID()), []byte("foo"), onResponse) + }, + }, + { + name: "AppRequestAny - response", + appResponse: []byte("foobar"), + appRequestF: func(ctx context.Context, client *p2p.Client, onResponse p2p.AppResponseCallback) error { + return client.AppRequestAny(ctx, []byte("foo"), onResponse) + }, + }, + { + name: "AppRequestAny - error", + appErr: &common.AppError{ + Code: 123, + Message: "foobar", + }, + appRequestF: func(ctx context.Context, client *p2p.Client, onResponse p2p.AppResponseCallback) error { + return client.AppRequestAny(ctx, []byte("foo"), onResponse) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + appRequestChan := make(chan struct{}) + testHandler := p2p.TestHandler{ + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, *common.AppError) { + if tt.appErr != nil { + return nil, &common.AppError{ + Code: 123, + Message: tt.appErr.Error(), + } + } + + return tt.appResponse, nil + }, + } + + client := NewClient(t, ctx, testHandler) + require.NoError(tt.appRequestF( + ctx, + client, + func(_ context.Context, _ ids.NodeID, responseBytes []byte, err error) { + require.ErrorIs(err, tt.appErr) + require.Equal(tt.appResponse, responseBytes) + close(appRequestChan) + }, + )) + <-appRequestChan + }) + } +} diff --git a/proto/pb/sync/sync.pb.go b/proto/pb/sync/sync.pb.go index 3a80cc22830..22807caaa91 100644 --- a/proto/pb/sync/sync.pb.go +++ b/proto/pb/sync/sync.pb.go @@ -21,88 +21,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Request represents a request for information during syncing. -type Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Message: - // - // *Request_RangeProofRequest - // *Request_ChangeProofRequest - Message isRequest_Message `protobuf_oneof:"message"` -} - -func (x *Request) Reset() { - *x = Request{} - if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Request) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Request) ProtoMessage() {} - -func (x *Request) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Request.ProtoReflect.Descriptor instead. -func (*Request) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{0} -} - -func (m *Request) GetMessage() isRequest_Message { - if m != nil { - return m.Message - } - return nil -} - -func (x *Request) GetRangeProofRequest() *SyncGetRangeProofRequest { - if x, ok := x.GetMessage().(*Request_RangeProofRequest); ok { - return x.RangeProofRequest - } - return nil -} - -func (x *Request) GetChangeProofRequest() *SyncGetChangeProofRequest { - if x, ok := x.GetMessage().(*Request_ChangeProofRequest); ok { - return x.ChangeProofRequest - } - return nil -} - -type isRequest_Message interface { - isRequest_Message() -} - -type Request_RangeProofRequest struct { - RangeProofRequest *SyncGetRangeProofRequest `protobuf:"bytes,1,opt,name=range_proof_request,json=rangeProofRequest,proto3,oneof"` -} - -type Request_ChangeProofRequest struct { - ChangeProofRequest *SyncGetChangeProofRequest `protobuf:"bytes,2,opt,name=change_proof_request,json=changeProofRequest,proto3,oneof"` -} - -func (*Request_RangeProofRequest) isRequest_Message() {} - -func (*Request_ChangeProofRequest) isRequest_Message() {} - type GetMerkleRootResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -114,7 +32,7 @@ type GetMerkleRootResponse struct { func (x *GetMerkleRootResponse) Reset() { *x = GetMerkleRootResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[1] + mi := &file_sync_sync_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -127,7 +45,7 @@ func (x *GetMerkleRootResponse) String() string { func (*GetMerkleRootResponse) ProtoMessage() {} func (x *GetMerkleRootResponse) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[1] + mi := &file_sync_sync_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -140,7 +58,7 @@ func (x *GetMerkleRootResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMerkleRootResponse.ProtoReflect.Descriptor instead. func (*GetMerkleRootResponse) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{1} + return file_sync_sync_proto_rawDescGZIP(), []int{0} } func (x *GetMerkleRootResponse) GetRootHash() []byte { @@ -161,7 +79,7 @@ type GetProofRequest struct { func (x *GetProofRequest) Reset() { *x = GetProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[2] + mi := &file_sync_sync_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -174,7 +92,7 @@ func (x *GetProofRequest) String() string { func (*GetProofRequest) ProtoMessage() {} func (x *GetProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[2] + mi := &file_sync_sync_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -187,7 +105,7 @@ func (x *GetProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProofRequest.ProtoReflect.Descriptor instead. func (*GetProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{2} + return file_sync_sync_proto_rawDescGZIP(), []int{1} } func (x *GetProofRequest) GetKey() []byte { @@ -208,7 +126,7 @@ type GetProofResponse struct { func (x *GetProofResponse) Reset() { *x = GetProofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[3] + mi := &file_sync_sync_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -221,7 +139,7 @@ func (x *GetProofResponse) String() string { func (*GetProofResponse) ProtoMessage() {} func (x *GetProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[3] + mi := &file_sync_sync_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -234,7 +152,7 @@ func (x *GetProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProofResponse.ProtoReflect.Descriptor instead. func (*GetProofResponse) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{3} + return file_sync_sync_proto_rawDescGZIP(), []int{2} } func (x *GetProofResponse) GetProof() *Proof { @@ -257,7 +175,7 @@ type Proof struct { func (x *Proof) Reset() { *x = Proof{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[4] + mi := &file_sync_sync_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -270,7 +188,7 @@ func (x *Proof) String() string { func (*Proof) ProtoMessage() {} func (x *Proof) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[4] + mi := &file_sync_sync_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -283,7 +201,7 @@ func (x *Proof) ProtoReflect() protoreflect.Message { // Deprecated: Use Proof.ProtoReflect.Descriptor instead. func (*Proof) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{4} + return file_sync_sync_proto_rawDescGZIP(), []int{3} } func (x *Proof) GetKey() []byte { @@ -325,7 +243,7 @@ type SyncGetChangeProofRequest struct { func (x *SyncGetChangeProofRequest) Reset() { *x = SyncGetChangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[5] + mi := &file_sync_sync_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -338,7 +256,7 @@ func (x *SyncGetChangeProofRequest) String() string { func (*SyncGetChangeProofRequest) ProtoMessage() {} func (x *SyncGetChangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[5] + mi := &file_sync_sync_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -351,7 +269,7 @@ func (x *SyncGetChangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncGetChangeProofRequest.ProtoReflect.Descriptor instead. func (*SyncGetChangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{5} + return file_sync_sync_proto_rawDescGZIP(), []int{4} } func (x *SyncGetChangeProofRequest) GetStartRootHash() []byte { @@ -411,7 +329,7 @@ type SyncGetChangeProofResponse struct { func (x *SyncGetChangeProofResponse) Reset() { *x = SyncGetChangeProofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[6] + mi := &file_sync_sync_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -424,7 +342,7 @@ func (x *SyncGetChangeProofResponse) String() string { func (*SyncGetChangeProofResponse) ProtoMessage() {} func (x *SyncGetChangeProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[6] + mi := &file_sync_sync_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -437,7 +355,7 @@ func (x *SyncGetChangeProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncGetChangeProofResponse.ProtoReflect.Descriptor instead. func (*SyncGetChangeProofResponse) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{6} + return file_sync_sync_proto_rawDescGZIP(), []int{5} } func (m *SyncGetChangeProofResponse) GetResponse() isSyncGetChangeProofResponse_Response { @@ -492,7 +410,7 @@ type GetChangeProofRequest struct { func (x *GetChangeProofRequest) Reset() { *x = GetChangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[7] + mi := &file_sync_sync_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -505,7 +423,7 @@ func (x *GetChangeProofRequest) String() string { func (*GetChangeProofRequest) ProtoMessage() {} func (x *GetChangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[7] + mi := &file_sync_sync_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -518,7 +436,7 @@ func (x *GetChangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetChangeProofRequest.ProtoReflect.Descriptor instead. func (*GetChangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{7} + return file_sync_sync_proto_rawDescGZIP(), []int{6} } func (x *GetChangeProofRequest) GetStartRootHash() []byte { @@ -571,7 +489,7 @@ type GetChangeProofResponse struct { func (x *GetChangeProofResponse) Reset() { *x = GetChangeProofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[8] + mi := &file_sync_sync_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -584,7 +502,7 @@ func (x *GetChangeProofResponse) String() string { func (*GetChangeProofResponse) ProtoMessage() {} func (x *GetChangeProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[8] + mi := &file_sync_sync_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -597,7 +515,7 @@ func (x *GetChangeProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetChangeProofResponse.ProtoReflect.Descriptor instead. func (*GetChangeProofResponse) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{8} + return file_sync_sync_proto_rawDescGZIP(), []int{7} } func (m *GetChangeProofResponse) GetResponse() isGetChangeProofResponse_Response { @@ -652,7 +570,7 @@ type VerifyChangeProofRequest struct { func (x *VerifyChangeProofRequest) Reset() { *x = VerifyChangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[9] + mi := &file_sync_sync_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -665,7 +583,7 @@ func (x *VerifyChangeProofRequest) String() string { func (*VerifyChangeProofRequest) ProtoMessage() {} func (x *VerifyChangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[9] + mi := &file_sync_sync_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -678,7 +596,7 @@ func (x *VerifyChangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyChangeProofRequest.ProtoReflect.Descriptor instead. func (*VerifyChangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{9} + return file_sync_sync_proto_rawDescGZIP(), []int{8} } func (x *VerifyChangeProofRequest) GetProof() *ChangeProof { @@ -721,7 +639,7 @@ type VerifyChangeProofResponse struct { func (x *VerifyChangeProofResponse) Reset() { *x = VerifyChangeProofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[10] + mi := &file_sync_sync_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -734,7 +652,7 @@ func (x *VerifyChangeProofResponse) String() string { func (*VerifyChangeProofResponse) ProtoMessage() {} func (x *VerifyChangeProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[10] + mi := &file_sync_sync_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -747,7 +665,7 @@ func (x *VerifyChangeProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyChangeProofResponse.ProtoReflect.Descriptor instead. func (*VerifyChangeProofResponse) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{10} + return file_sync_sync_proto_rawDescGZIP(), []int{9} } func (x *VerifyChangeProofResponse) GetError() string { @@ -768,7 +686,7 @@ type CommitChangeProofRequest struct { func (x *CommitChangeProofRequest) Reset() { *x = CommitChangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[11] + mi := &file_sync_sync_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -781,7 +699,7 @@ func (x *CommitChangeProofRequest) String() string { func (*CommitChangeProofRequest) ProtoMessage() {} func (x *CommitChangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[11] + mi := &file_sync_sync_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -794,7 +712,7 @@ func (x *CommitChangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CommitChangeProofRequest.ProtoReflect.Descriptor instead. func (*CommitChangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{11} + return file_sync_sync_proto_rawDescGZIP(), []int{10} } func (x *CommitChangeProofRequest) GetProof() *ChangeProof { @@ -821,7 +739,7 @@ type SyncGetRangeProofRequest struct { func (x *SyncGetRangeProofRequest) Reset() { *x = SyncGetRangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[12] + mi := &file_sync_sync_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -834,7 +752,7 @@ func (x *SyncGetRangeProofRequest) String() string { func (*SyncGetRangeProofRequest) ProtoMessage() {} func (x *SyncGetRangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[12] + mi := &file_sync_sync_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -847,7 +765,7 @@ func (x *SyncGetRangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncGetRangeProofRequest.ProtoReflect.Descriptor instead. func (*SyncGetRangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{12} + return file_sync_sync_proto_rawDescGZIP(), []int{11} } func (x *SyncGetRangeProofRequest) GetRootHash() []byte { @@ -899,7 +817,7 @@ type GetRangeProofRequest struct { func (x *GetRangeProofRequest) Reset() { *x = GetRangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[13] + mi := &file_sync_sync_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -912,7 +830,7 @@ func (x *GetRangeProofRequest) String() string { func (*GetRangeProofRequest) ProtoMessage() {} func (x *GetRangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[13] + mi := &file_sync_sync_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -925,7 +843,7 @@ func (x *GetRangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRangeProofRequest.ProtoReflect.Descriptor instead. func (*GetRangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{13} + return file_sync_sync_proto_rawDescGZIP(), []int{12} } func (x *GetRangeProofRequest) GetRootHash() []byte { @@ -967,7 +885,7 @@ type GetRangeProofResponse struct { func (x *GetRangeProofResponse) Reset() { *x = GetRangeProofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[14] + mi := &file_sync_sync_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -980,7 +898,7 @@ func (x *GetRangeProofResponse) String() string { func (*GetRangeProofResponse) ProtoMessage() {} func (x *GetRangeProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[14] + mi := &file_sync_sync_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -993,7 +911,7 @@ func (x *GetRangeProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRangeProofResponse.ProtoReflect.Descriptor instead. func (*GetRangeProofResponse) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{14} + return file_sync_sync_proto_rawDescGZIP(), []int{13} } func (x *GetRangeProofResponse) GetProof() *RangeProof { @@ -1016,7 +934,7 @@ type CommitRangeProofRequest struct { func (x *CommitRangeProofRequest) Reset() { *x = CommitRangeProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[15] + mi := &file_sync_sync_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1029,7 +947,7 @@ func (x *CommitRangeProofRequest) String() string { func (*CommitRangeProofRequest) ProtoMessage() {} func (x *CommitRangeProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[15] + mi := &file_sync_sync_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1042,7 +960,7 @@ func (x *CommitRangeProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CommitRangeProofRequest.ProtoReflect.Descriptor instead. func (*CommitRangeProofRequest) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{15} + return file_sync_sync_proto_rawDescGZIP(), []int{14} } func (x *CommitRangeProofRequest) GetStartKey() *MaybeBytes { @@ -1079,7 +997,7 @@ type ChangeProof struct { func (x *ChangeProof) Reset() { *x = ChangeProof{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[16] + mi := &file_sync_sync_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1092,7 +1010,7 @@ func (x *ChangeProof) String() string { func (*ChangeProof) ProtoMessage() {} func (x *ChangeProof) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[16] + mi := &file_sync_sync_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1105,7 +1023,7 @@ func (x *ChangeProof) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeProof.ProtoReflect.Descriptor instead. func (*ChangeProof) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{16} + return file_sync_sync_proto_rawDescGZIP(), []int{15} } func (x *ChangeProof) GetStartProof() []*ProofNode { @@ -1142,7 +1060,7 @@ type RangeProof struct { func (x *RangeProof) Reset() { *x = RangeProof{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[17] + mi := &file_sync_sync_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1155,7 +1073,7 @@ func (x *RangeProof) String() string { func (*RangeProof) ProtoMessage() {} func (x *RangeProof) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[17] + mi := &file_sync_sync_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1168,7 +1086,7 @@ func (x *RangeProof) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeProof.ProtoReflect.Descriptor instead. func (*RangeProof) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{17} + return file_sync_sync_proto_rawDescGZIP(), []int{16} } func (x *RangeProof) GetStartProof() []*ProofNode { @@ -1205,7 +1123,7 @@ type ProofNode struct { func (x *ProofNode) Reset() { *x = ProofNode{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[18] + mi := &file_sync_sync_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1218,7 +1136,7 @@ func (x *ProofNode) String() string { func (*ProofNode) ProtoMessage() {} func (x *ProofNode) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[18] + mi := &file_sync_sync_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1231,7 +1149,7 @@ func (x *ProofNode) ProtoReflect() protoreflect.Message { // Deprecated: Use ProofNode.ProtoReflect.Descriptor instead. func (*ProofNode) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{18} + return file_sync_sync_proto_rawDescGZIP(), []int{17} } func (x *ProofNode) GetKey() *Key { @@ -1267,7 +1185,7 @@ type KeyChange struct { func (x *KeyChange) Reset() { *x = KeyChange{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[19] + mi := &file_sync_sync_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1280,7 +1198,7 @@ func (x *KeyChange) String() string { func (*KeyChange) ProtoMessage() {} func (x *KeyChange) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[19] + mi := &file_sync_sync_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1293,7 +1211,7 @@ func (x *KeyChange) ProtoReflect() protoreflect.Message { // Deprecated: Use KeyChange.ProtoReflect.Descriptor instead. func (*KeyChange) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{19} + return file_sync_sync_proto_rawDescGZIP(), []int{18} } func (x *KeyChange) GetKey() []byte { @@ -1322,7 +1240,7 @@ type Key struct { func (x *Key) Reset() { *x = Key{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[20] + mi := &file_sync_sync_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1335,7 +1253,7 @@ func (x *Key) String() string { func (*Key) ProtoMessage() {} func (x *Key) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[20] + mi := &file_sync_sync_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1348,7 +1266,7 @@ func (x *Key) ProtoReflect() protoreflect.Message { // Deprecated: Use Key.ProtoReflect.Descriptor instead. func (*Key) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{20} + return file_sync_sync_proto_rawDescGZIP(), []int{19} } func (x *Key) GetLength() uint64 { @@ -1379,7 +1297,7 @@ type MaybeBytes struct { func (x *MaybeBytes) Reset() { *x = MaybeBytes{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[21] + mi := &file_sync_sync_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1392,7 +1310,7 @@ func (x *MaybeBytes) String() string { func (*MaybeBytes) ProtoMessage() {} func (x *MaybeBytes) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[21] + mi := &file_sync_sync_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1405,7 +1323,7 @@ func (x *MaybeBytes) ProtoReflect() protoreflect.Message { // Deprecated: Use MaybeBytes.ProtoReflect.Descriptor instead. func (*MaybeBytes) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{21} + return file_sync_sync_proto_rawDescGZIP(), []int{20} } func (x *MaybeBytes) GetValue() []byte { @@ -1434,7 +1352,7 @@ type KeyValue struct { func (x *KeyValue) Reset() { *x = KeyValue{} if protoimpl.UnsafeEnabled { - mi := &file_sync_sync_proto_msgTypes[22] + mi := &file_sync_sync_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1447,7 +1365,7 @@ func (x *KeyValue) String() string { func (*KeyValue) ProtoMessage() {} func (x *KeyValue) ProtoReflect() protoreflect.Message { - mi := &file_sync_sync_proto_msgTypes[22] + mi := &file_sync_sync_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,7 +1378,7 @@ func (x *KeyValue) ProtoReflect() protoreflect.Message { // Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. func (*KeyValue) Descriptor() ([]byte, []int) { - return file_sync_sync_proto_rawDescGZIP(), []int{22} + return file_sync_sync_proto_rawDescGZIP(), []int{21} } func (x *KeyValue) GetKey() []byte { @@ -1483,103 +1401,104 @@ var file_sync_sync_proto_rawDesc = []byte{ 0x0a, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x50, 0x0a, 0x13, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, - 0x11, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x53, 0x0a, 0x14, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, - 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x48, 0x00, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x34, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, - 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, - 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x23, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x35, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x21, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05, 0x70, - 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, - 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xff, - 0x01, 0x0a, 0x19, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x6f, 0x6f, 0x74, - 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x65, 0x6e, 0x64, - 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x34, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x23, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, + 0x35, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x22, 0xff, 0x01, 0x0a, 0x19, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x6f, + 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x65, + 0x6e, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, + 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, + 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, + 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x22, 0x95, 0x01, 0x0a, 0x1a, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x33, 0x0a, 0x0b, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x48, 0x00, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xda, 0x01, 0x0a, 0x15, 0x47, + 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0d, + 0x65, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, + 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, + 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, 0x52, 0x0b, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x6f, 0x6f, 0x74, 0x4e, 0x6f, 0x74, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x18, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x27, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, + 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, - 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, - 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x22, 0x95, 0x01, 0x0a, 0x1a, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x36, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x33, 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, - 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, - 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xda, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x6e, - 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, - 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, - 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x36, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x6f, 0x74, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x6f, 0x6f, 0x74, 0x4e, 0x6f, 0x74, 0x50, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xcb, 0x01, 0x0a, 0x18, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, - 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, - 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, - 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, - 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, - 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, - 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x65, 0x78, - 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x31, - 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, - 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x43, 0x0a, 0x18, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, - 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, - 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x47, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, + 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x31, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x43, 0x0a, 0x18, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x27, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x53, 0x79, 0x6e, + 0x63, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, + 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, @@ -1590,123 +1509,110 @@ var file_sync_sync_proto_rawDesc = []byte{ 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, - 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, - 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x3f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, - 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, - 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xa6, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, - 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, - 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x0b, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, - 0x6f, 0x6f, 0x66, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, - 0x9f, 0x01, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, - 0x30, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x12, 0x2c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, - 0x30, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x12, 0x30, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x12, 0x2c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x12, 0x2d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, - 0xd6, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x79, 0x6e, - 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x0d, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x72, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x39, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4e, - 0x6f, 0x64, 0x65, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x3b, 0x0a, 0x0d, 0x43, - 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x45, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, - 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x33, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x41, 0x0a, 0x0a, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x6e, - 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, - 0x4e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xc3, 0x04, 0x0a, 0x02, - 0x44, 0x42, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, - 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x73, 0x79, - 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x43, 0x6c, 0x65, 0x61, - 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x15, 0x2e, - 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, - 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1b, - 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x79, - 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, - 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, - 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0d, - 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1a, 0x2e, - 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, - 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, - 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x79, - 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x3f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xa6, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, + 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, + 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x31, + 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x22, 0x9f, 0x01, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x12, 0x30, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x12, 0x2c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x12, 0x30, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, + 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x12, 0x30, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x22, 0xd6, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x12, + 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, + 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x0d, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x72, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x39, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x3b, 0x0a, + 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x45, 0x0a, 0x09, 0x4b, 0x65, + 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, + 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x33, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x41, 0x0a, 0x0a, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, + 0x5f, 0x6e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x69, 0x73, 0x4e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xc3, 0x04, + 0x0a, 0x02, 0x44, 0x42, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, + 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x43, 0x6c, + 0x65, 0x61, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x15, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, + 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x12, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x1a, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x73, + 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, + 0x73, 0x79, 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1721,90 +1627,87 @@ func file_sync_sync_proto_rawDescGZIP() []byte { return file_sync_sync_proto_rawDescData } -var file_sync_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_sync_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_sync_sync_proto_goTypes = []interface{}{ - (*Request)(nil), // 0: sync.Request - (*GetMerkleRootResponse)(nil), // 1: sync.GetMerkleRootResponse - (*GetProofRequest)(nil), // 2: sync.GetProofRequest - (*GetProofResponse)(nil), // 3: sync.GetProofResponse - (*Proof)(nil), // 4: sync.Proof - (*SyncGetChangeProofRequest)(nil), // 5: sync.SyncGetChangeProofRequest - (*SyncGetChangeProofResponse)(nil), // 6: sync.SyncGetChangeProofResponse - (*GetChangeProofRequest)(nil), // 7: sync.GetChangeProofRequest - (*GetChangeProofResponse)(nil), // 8: sync.GetChangeProofResponse - (*VerifyChangeProofRequest)(nil), // 9: sync.VerifyChangeProofRequest - (*VerifyChangeProofResponse)(nil), // 10: sync.VerifyChangeProofResponse - (*CommitChangeProofRequest)(nil), // 11: sync.CommitChangeProofRequest - (*SyncGetRangeProofRequest)(nil), // 12: sync.SyncGetRangeProofRequest - (*GetRangeProofRequest)(nil), // 13: sync.GetRangeProofRequest - (*GetRangeProofResponse)(nil), // 14: sync.GetRangeProofResponse - (*CommitRangeProofRequest)(nil), // 15: sync.CommitRangeProofRequest - (*ChangeProof)(nil), // 16: sync.ChangeProof - (*RangeProof)(nil), // 17: sync.RangeProof - (*ProofNode)(nil), // 18: sync.ProofNode - (*KeyChange)(nil), // 19: sync.KeyChange - (*Key)(nil), // 20: sync.Key - (*MaybeBytes)(nil), // 21: sync.MaybeBytes - (*KeyValue)(nil), // 22: sync.KeyValue - nil, // 23: sync.ProofNode.ChildrenEntry - (*emptypb.Empty)(nil), // 24: google.protobuf.Empty + (*GetMerkleRootResponse)(nil), // 0: sync.GetMerkleRootResponse + (*GetProofRequest)(nil), // 1: sync.GetProofRequest + (*GetProofResponse)(nil), // 2: sync.GetProofResponse + (*Proof)(nil), // 3: sync.Proof + (*SyncGetChangeProofRequest)(nil), // 4: sync.SyncGetChangeProofRequest + (*SyncGetChangeProofResponse)(nil), // 5: sync.SyncGetChangeProofResponse + (*GetChangeProofRequest)(nil), // 6: sync.GetChangeProofRequest + (*GetChangeProofResponse)(nil), // 7: sync.GetChangeProofResponse + (*VerifyChangeProofRequest)(nil), // 8: sync.VerifyChangeProofRequest + (*VerifyChangeProofResponse)(nil), // 9: sync.VerifyChangeProofResponse + (*CommitChangeProofRequest)(nil), // 10: sync.CommitChangeProofRequest + (*SyncGetRangeProofRequest)(nil), // 11: sync.SyncGetRangeProofRequest + (*GetRangeProofRequest)(nil), // 12: sync.GetRangeProofRequest + (*GetRangeProofResponse)(nil), // 13: sync.GetRangeProofResponse + (*CommitRangeProofRequest)(nil), // 14: sync.CommitRangeProofRequest + (*ChangeProof)(nil), // 15: sync.ChangeProof + (*RangeProof)(nil), // 16: sync.RangeProof + (*ProofNode)(nil), // 17: sync.ProofNode + (*KeyChange)(nil), // 18: sync.KeyChange + (*Key)(nil), // 19: sync.Key + (*MaybeBytes)(nil), // 20: sync.MaybeBytes + (*KeyValue)(nil), // 21: sync.KeyValue + nil, // 22: sync.ProofNode.ChildrenEntry + (*emptypb.Empty)(nil), // 23: google.protobuf.Empty } var file_sync_sync_proto_depIdxs = []int32{ - 12, // 0: sync.Request.range_proof_request:type_name -> sync.SyncGetRangeProofRequest - 5, // 1: sync.Request.change_proof_request:type_name -> sync.SyncGetChangeProofRequest - 4, // 2: sync.GetProofResponse.proof:type_name -> sync.Proof - 21, // 3: sync.Proof.value:type_name -> sync.MaybeBytes - 18, // 4: sync.Proof.proof:type_name -> sync.ProofNode - 21, // 5: sync.SyncGetChangeProofRequest.start_key:type_name -> sync.MaybeBytes - 21, // 6: sync.SyncGetChangeProofRequest.end_key:type_name -> sync.MaybeBytes - 16, // 7: sync.SyncGetChangeProofResponse.change_proof:type_name -> sync.ChangeProof - 17, // 8: sync.SyncGetChangeProofResponse.range_proof:type_name -> sync.RangeProof - 21, // 9: sync.GetChangeProofRequest.start_key:type_name -> sync.MaybeBytes - 21, // 10: sync.GetChangeProofRequest.end_key:type_name -> sync.MaybeBytes - 16, // 11: sync.GetChangeProofResponse.change_proof:type_name -> sync.ChangeProof - 16, // 12: sync.VerifyChangeProofRequest.proof:type_name -> sync.ChangeProof - 21, // 13: sync.VerifyChangeProofRequest.start_key:type_name -> sync.MaybeBytes - 21, // 14: sync.VerifyChangeProofRequest.end_key:type_name -> sync.MaybeBytes - 16, // 15: sync.CommitChangeProofRequest.proof:type_name -> sync.ChangeProof - 21, // 16: sync.SyncGetRangeProofRequest.start_key:type_name -> sync.MaybeBytes - 21, // 17: sync.SyncGetRangeProofRequest.end_key:type_name -> sync.MaybeBytes - 21, // 18: sync.GetRangeProofRequest.start_key:type_name -> sync.MaybeBytes - 21, // 19: sync.GetRangeProofRequest.end_key:type_name -> sync.MaybeBytes - 17, // 20: sync.GetRangeProofResponse.proof:type_name -> sync.RangeProof - 21, // 21: sync.CommitRangeProofRequest.start_key:type_name -> sync.MaybeBytes - 21, // 22: sync.CommitRangeProofRequest.end_key:type_name -> sync.MaybeBytes - 17, // 23: sync.CommitRangeProofRequest.range_proof:type_name -> sync.RangeProof - 18, // 24: sync.ChangeProof.start_proof:type_name -> sync.ProofNode - 18, // 25: sync.ChangeProof.end_proof:type_name -> sync.ProofNode - 19, // 26: sync.ChangeProof.key_changes:type_name -> sync.KeyChange - 18, // 27: sync.RangeProof.start_proof:type_name -> sync.ProofNode - 18, // 28: sync.RangeProof.end_proof:type_name -> sync.ProofNode - 22, // 29: sync.RangeProof.key_values:type_name -> sync.KeyValue - 20, // 30: sync.ProofNode.key:type_name -> sync.Key - 21, // 31: sync.ProofNode.value_or_hash:type_name -> sync.MaybeBytes - 23, // 32: sync.ProofNode.children:type_name -> sync.ProofNode.ChildrenEntry - 21, // 33: sync.KeyChange.value:type_name -> sync.MaybeBytes - 24, // 34: sync.DB.GetMerkleRoot:input_type -> google.protobuf.Empty - 24, // 35: sync.DB.Clear:input_type -> google.protobuf.Empty - 2, // 36: sync.DB.GetProof:input_type -> sync.GetProofRequest - 7, // 37: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest - 9, // 38: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest - 11, // 39: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest - 13, // 40: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest - 15, // 41: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest - 1, // 42: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse - 24, // 43: sync.DB.Clear:output_type -> google.protobuf.Empty - 3, // 44: sync.DB.GetProof:output_type -> sync.GetProofResponse - 8, // 45: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse - 10, // 46: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse - 24, // 47: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty - 14, // 48: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse - 24, // 49: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty - 42, // [42:50] is the sub-list for method output_type - 34, // [34:42] is the sub-list for method input_type - 34, // [34:34] is the sub-list for extension type_name - 34, // [34:34] is the sub-list for extension extendee - 0, // [0:34] is the sub-list for field type_name + 3, // 0: sync.GetProofResponse.proof:type_name -> sync.Proof + 20, // 1: sync.Proof.value:type_name -> sync.MaybeBytes + 17, // 2: sync.Proof.proof:type_name -> sync.ProofNode + 20, // 3: sync.SyncGetChangeProofRequest.start_key:type_name -> sync.MaybeBytes + 20, // 4: sync.SyncGetChangeProofRequest.end_key:type_name -> sync.MaybeBytes + 15, // 5: sync.SyncGetChangeProofResponse.change_proof:type_name -> sync.ChangeProof + 16, // 6: sync.SyncGetChangeProofResponse.range_proof:type_name -> sync.RangeProof + 20, // 7: sync.GetChangeProofRequest.start_key:type_name -> sync.MaybeBytes + 20, // 8: sync.GetChangeProofRequest.end_key:type_name -> sync.MaybeBytes + 15, // 9: sync.GetChangeProofResponse.change_proof:type_name -> sync.ChangeProof + 15, // 10: sync.VerifyChangeProofRequest.proof:type_name -> sync.ChangeProof + 20, // 11: sync.VerifyChangeProofRequest.start_key:type_name -> sync.MaybeBytes + 20, // 12: sync.VerifyChangeProofRequest.end_key:type_name -> sync.MaybeBytes + 15, // 13: sync.CommitChangeProofRequest.proof:type_name -> sync.ChangeProof + 20, // 14: sync.SyncGetRangeProofRequest.start_key:type_name -> sync.MaybeBytes + 20, // 15: sync.SyncGetRangeProofRequest.end_key:type_name -> sync.MaybeBytes + 20, // 16: sync.GetRangeProofRequest.start_key:type_name -> sync.MaybeBytes + 20, // 17: sync.GetRangeProofRequest.end_key:type_name -> sync.MaybeBytes + 16, // 18: sync.GetRangeProofResponse.proof:type_name -> sync.RangeProof + 20, // 19: sync.CommitRangeProofRequest.start_key:type_name -> sync.MaybeBytes + 20, // 20: sync.CommitRangeProofRequest.end_key:type_name -> sync.MaybeBytes + 16, // 21: sync.CommitRangeProofRequest.range_proof:type_name -> sync.RangeProof + 17, // 22: sync.ChangeProof.start_proof:type_name -> sync.ProofNode + 17, // 23: sync.ChangeProof.end_proof:type_name -> sync.ProofNode + 18, // 24: sync.ChangeProof.key_changes:type_name -> sync.KeyChange + 17, // 25: sync.RangeProof.start_proof:type_name -> sync.ProofNode + 17, // 26: sync.RangeProof.end_proof:type_name -> sync.ProofNode + 21, // 27: sync.RangeProof.key_values:type_name -> sync.KeyValue + 19, // 28: sync.ProofNode.key:type_name -> sync.Key + 20, // 29: sync.ProofNode.value_or_hash:type_name -> sync.MaybeBytes + 22, // 30: sync.ProofNode.children:type_name -> sync.ProofNode.ChildrenEntry + 20, // 31: sync.KeyChange.value:type_name -> sync.MaybeBytes + 23, // 32: sync.DB.GetMerkleRoot:input_type -> google.protobuf.Empty + 23, // 33: sync.DB.Clear:input_type -> google.protobuf.Empty + 1, // 34: sync.DB.GetProof:input_type -> sync.GetProofRequest + 6, // 35: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest + 8, // 36: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest + 10, // 37: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest + 12, // 38: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest + 14, // 39: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest + 0, // 40: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse + 23, // 41: sync.DB.Clear:output_type -> google.protobuf.Empty + 2, // 42: sync.DB.GetProof:output_type -> sync.GetProofResponse + 7, // 43: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse + 9, // 44: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse + 23, // 45: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty + 13, // 46: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse + 23, // 47: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty + 40, // [40:48] is the sub-list for method output_type + 32, // [32:40] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_sync_sync_proto_init() } @@ -1814,18 +1717,6 @@ func file_sync_sync_proto_init() { } if !protoimpl.UnsafeEnabled { file_sync_sync_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sync_sync_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMerkleRootResponse); i { case 0: return &v.state @@ -1837,7 +1728,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProofRequest); i { case 0: return &v.state @@ -1849,7 +1740,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProofResponse); i { case 0: return &v.state @@ -1861,7 +1752,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Proof); i { case 0: return &v.state @@ -1873,7 +1764,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SyncGetChangeProofRequest); i { case 0: return &v.state @@ -1885,7 +1776,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SyncGetChangeProofResponse); i { case 0: return &v.state @@ -1897,7 +1788,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetChangeProofRequest); i { case 0: return &v.state @@ -1909,7 +1800,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetChangeProofResponse); i { case 0: return &v.state @@ -1921,7 +1812,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VerifyChangeProofRequest); i { case 0: return &v.state @@ -1933,7 +1824,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VerifyChangeProofResponse); i { case 0: return &v.state @@ -1945,7 +1836,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CommitChangeProofRequest); i { case 0: return &v.state @@ -1957,7 +1848,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SyncGetRangeProofRequest); i { case 0: return &v.state @@ -1969,7 +1860,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRangeProofRequest); i { case 0: return &v.state @@ -1981,7 +1872,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRangeProofResponse); i { case 0: return &v.state @@ -1993,7 +1884,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CommitRangeProofRequest); i { case 0: return &v.state @@ -2005,7 +1896,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ChangeProof); i { case 0: return &v.state @@ -2017,7 +1908,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RangeProof); i { case 0: return &v.state @@ -2029,7 +1920,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProofNode); i { case 0: return &v.state @@ -2041,7 +1932,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*KeyChange); i { case 0: return &v.state @@ -2053,7 +1944,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Key); i { case 0: return &v.state @@ -2065,7 +1956,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MaybeBytes); i { case 0: return &v.state @@ -2077,7 +1968,7 @@ func file_sync_sync_proto_init() { return nil } } - file_sync_sync_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_sync_sync_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*KeyValue); i { case 0: return &v.state @@ -2090,15 +1981,11 @@ func file_sync_sync_proto_init() { } } } - file_sync_sync_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Request_RangeProofRequest)(nil), - (*Request_ChangeProofRequest)(nil), - } - file_sync_sync_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_sync_sync_proto_msgTypes[5].OneofWrappers = []interface{}{ (*SyncGetChangeProofResponse_ChangeProof)(nil), (*SyncGetChangeProofResponse_RangeProof)(nil), } - file_sync_sync_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_sync_sync_proto_msgTypes[7].OneofWrappers = []interface{}{ (*GetChangeProofResponse_ChangeProof)(nil), (*GetChangeProofResponse_RootNotPresent)(nil), } @@ -2108,7 +1995,7 @@ func file_sync_sync_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sync_sync_proto_rawDesc, NumEnums: 0, - NumMessages: 24, + NumMessages: 23, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/sync/sync.proto b/proto/sync/sync.proto index 1a799433d7e..13962e39e79 100644 --- a/proto/sync/sync.proto +++ b/proto/sync/sync.proto @@ -6,14 +6,6 @@ import "google/protobuf/empty.proto"; option go_package = "github.com/ava-labs/avalanchego/proto/pb/sync"; -// Request represents a request for information during syncing. -message Request { - oneof message { - SyncGetRangeProofRequest range_proof_request = 1; - SyncGetChangeProofRequest change_proof_request = 2; - } -} - // The interface required by an x/sync/SyncManager for syncing. // Note this service definition only exists for use in tests. // A database shouldn't expose this over the internet, as it diff --git a/scripts/mocks.mockgen.source.txt b/scripts/mocks.mockgen.source.txt index 1862ac01586..b1112e45650 100644 --- a/scripts/mocks.mockgen.source.txt +++ b/scripts/mocks.mockgen.source.txt @@ -14,4 +14,3 @@ vms/platformvm/state/state.go=State=MockChain=vms/platformvm/state/mock_chain.go vms/platformvm/txs/unsigned_tx.go==UnsignedTx=vms/platformvm/txs/txsmock/unsigned_tx.go vms/proposervm/block.go=Block=MockPostForkBlock=vms/proposervm/mock_post_fork_block.go x/merkledb/db.go=ChangeProofer,RangeProofer,Clearer,Prefetcher=MockMerkleDB=x/merkledb/mock_db.go -x/sync/client.go==MockClient=x/sync/mock_client.go diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index 256fcf1a11d..2f568e1a372 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -39,4 +39,3 @@ github.com/ava-labs/avalanchego/vms/registry=VMGetter=vms/registry/registrymock/ github.com/ava-labs/avalanchego/vms/registry=VMRegistry=vms/registry/registrymock/vm_registry.go github.com/ava-labs/avalanchego/vms=Factory=vms/vmsmock/factory.go github.com/ava-labs/avalanchego/vms=Manager=vms/vmsmock/manager.go -github.com/ava-labs/avalanchego/x/sync=NetworkClient=x/sync/syncmock/network_client.go diff --git a/x/merkledb/mock_db.go b/x/merkledb/mock_db.go deleted file mode 100644 index c3bf69cf22f..00000000000 --- a/x/merkledb/mock_db.go +++ /dev/null @@ -1,491 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: x/merkledb/db.go -// -// Generated by this command: -// -// mockgen -source=x/merkledb/db.go -destination=x/merkledb/mock_db.go -package=merkledb -exclude_interfaces=ChangeProofer,RangeProofer,Clearer,Prefetcher -mock_names=MockMerkleDB=MockMerkleDB -// - -// Package merkledb is a generated GoMock package. -package merkledb - -import ( - context "context" - reflect "reflect" - - database "github.com/ava-labs/avalanchego/database" - ids "github.com/ava-labs/avalanchego/ids" - maybe "github.com/ava-labs/avalanchego/utils/maybe" - gomock "go.uber.org/mock/gomock" -) - -// MockMerkleDB is a mock of MerkleDB interface. -type MockMerkleDB struct { - ctrl *gomock.Controller - recorder *MockMerkleDBMockRecorder -} - -// MockMerkleDBMockRecorder is the mock recorder for MockMerkleDB. -type MockMerkleDBMockRecorder struct { - mock *MockMerkleDB -} - -// NewMockMerkleDB creates a new mock instance. -func NewMockMerkleDB(ctrl *gomock.Controller) *MockMerkleDB { - mock := &MockMerkleDB{ctrl: ctrl} - mock.recorder = &MockMerkleDBMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockMerkleDB) EXPECT() *MockMerkleDBMockRecorder { - return m.recorder -} - -// Clear mocks base method. -func (m *MockMerkleDB) Clear() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Clear") - ret0, _ := ret[0].(error) - return ret0 -} - -// Clear indicates an expected call of Clear. -func (mr *MockMerkleDBMockRecorder) Clear() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockMerkleDB)(nil).Clear)) -} - -// Close mocks base method. -func (m *MockMerkleDB) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockMerkleDBMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMerkleDB)(nil).Close)) -} - -// CommitChangeProof mocks base method. -func (m *MockMerkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitChangeProof", ctx, proof) - ret0, _ := ret[0].(error) - return ret0 -} - -// CommitChangeProof indicates an expected call of CommitChangeProof. -func (mr *MockMerkleDBMockRecorder) CommitChangeProof(ctx, proof any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitChangeProof), ctx, proof) -} - -// CommitRangeProof mocks base method. -func (m *MockMerkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitRangeProof", ctx, start, end, proof) - ret0, _ := ret[0].(error) - return ret0 -} - -// CommitRangeProof indicates an expected call of CommitRangeProof. -func (mr *MockMerkleDBMockRecorder) CommitRangeProof(ctx, start, end, proof any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitRangeProof), ctx, start, end, proof) -} - -// Compact mocks base method. -func (m *MockMerkleDB) Compact(start, limit []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Compact", start, limit) - ret0, _ := ret[0].(error) - return ret0 -} - -// Compact indicates an expected call of Compact. -func (mr *MockMerkleDBMockRecorder) Compact(start, limit any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockMerkleDB)(nil).Compact), start, limit) -} - -// Delete mocks base method. -func (m *MockMerkleDB) Delete(key []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", key) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockMerkleDBMockRecorder) Delete(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMerkleDB)(nil).Delete), key) -} - -// Get mocks base method. -func (m *MockMerkleDB) Get(key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockMerkleDBMockRecorder) Get(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMerkleDB)(nil).Get), key) -} - -// GetChangeProof mocks base method. -func (m *MockMerkleDB) GetChangeProof(ctx context.Context, startRootID, endRootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*ChangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChangeProof", ctx, startRootID, endRootID, start, end, maxLength) - ret0, _ := ret[0].(*ChangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChangeProof indicates an expected call of GetChangeProof. -func (mr *MockMerkleDBMockRecorder) GetChangeProof(ctx, startRootID, endRootID, start, end, maxLength any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetChangeProof), ctx, startRootID, endRootID, start, end, maxLength) -} - -// GetMerkleRoot mocks base method. -func (m *MockMerkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMerkleRoot", ctx) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMerkleRoot indicates an expected call of GetMerkleRoot. -func (mr *MockMerkleDBMockRecorder) GetMerkleRoot(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMerkleRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetMerkleRoot), ctx) -} - -// GetProof mocks base method. -func (m *MockMerkleDB) GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProof", ctx, keyBytes) - ret0, _ := ret[0].(*Proof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProof indicates an expected call of GetProof. -func (mr *MockMerkleDBMockRecorder) GetProof(ctx, keyBytes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockMerkleDB)(nil).GetProof), ctx, keyBytes) -} - -// GetRangeProof mocks base method. -func (m *MockMerkleDB) GetRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProof", ctx, start, end, maxLength) - ret0, _ := ret[0].(*RangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRangeProof indicates an expected call of GetRangeProof. -func (mr *MockMerkleDBMockRecorder) GetRangeProof(ctx, start, end, maxLength any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProof), ctx, start, end, maxLength) -} - -// GetRangeProofAtRoot mocks base method. -func (m *MockMerkleDB) GetRangeProofAtRoot(ctx context.Context, rootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProofAtRoot", ctx, rootID, start, end, maxLength) - ret0, _ := ret[0].(*RangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRangeProofAtRoot indicates an expected call of GetRangeProofAtRoot. -func (mr *MockMerkleDBMockRecorder) GetRangeProofAtRoot(ctx, rootID, start, end, maxLength any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProofAtRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProofAtRoot), ctx, rootID, start, end, maxLength) -} - -// GetValue mocks base method. -func (m *MockMerkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValue", ctx, key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetValue indicates an expected call of GetValue. -func (mr *MockMerkleDBMockRecorder) GetValue(ctx, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValue", reflect.TypeOf((*MockMerkleDB)(nil).GetValue), ctx, key) -} - -// GetValues mocks base method. -func (m *MockMerkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValues", ctx, keys) - ret0, _ := ret[0].([][]byte) - ret1, _ := ret[1].([]error) - return ret0, ret1 -} - -// GetValues indicates an expected call of GetValues. -func (mr *MockMerkleDBMockRecorder) GetValues(ctx, keys any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValues", reflect.TypeOf((*MockMerkleDB)(nil).GetValues), ctx, keys) -} - -// Has mocks base method. -func (m *MockMerkleDB) Has(key []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", key) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockMerkleDBMockRecorder) Has(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMerkleDB)(nil).Has), key) -} - -// HealthCheck mocks base method. -func (m *MockMerkleDB) HealthCheck(arg0 context.Context) (any, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(any) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HealthCheck indicates an expected call of HealthCheck. -func (mr *MockMerkleDBMockRecorder) HealthCheck(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockMerkleDB)(nil).HealthCheck), arg0) -} - -// NewBatch mocks base method. -func (m *MockMerkleDB) NewBatch() database.Batch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBatch") - ret0, _ := ret[0].(database.Batch) - return ret0 -} - -// NewBatch indicates an expected call of NewBatch. -func (mr *MockMerkleDBMockRecorder) NewBatch() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockMerkleDB)(nil).NewBatch)) -} - -// NewIterator mocks base method. -func (m *MockMerkleDB) NewIterator() database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIterator") - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIterator indicates an expected call of NewIterator. -func (mr *MockMerkleDBMockRecorder) NewIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIterator", reflect.TypeOf((*MockMerkleDB)(nil).NewIterator)) -} - -// NewIteratorWithPrefix mocks base method. -func (m *MockMerkleDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithPrefix", prefix) - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIteratorWithPrefix indicates an expected call of NewIteratorWithPrefix. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithPrefix(prefix any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithPrefix), prefix) -} - -// NewIteratorWithStart mocks base method. -func (m *MockMerkleDB) NewIteratorWithStart(start []byte) database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithStart", start) - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIteratorWithStart indicates an expected call of NewIteratorWithStart. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithStart(start any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStart), start) -} - -// NewIteratorWithStartAndPrefix mocks base method. -func (m *MockMerkleDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithStartAndPrefix", start, prefix) - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIteratorWithStartAndPrefix indicates an expected call of NewIteratorWithStartAndPrefix. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithStartAndPrefix(start, prefix any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStartAndPrefix), start, prefix) -} - -// NewView mocks base method. -func (m *MockMerkleDB) NewView(ctx context.Context, changes ViewChanges) (View, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewView", ctx, changes) - ret0, _ := ret[0].(View) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewView indicates an expected call of NewView. -func (mr *MockMerkleDBMockRecorder) NewView(ctx, changes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockMerkleDB)(nil).NewView), ctx, changes) -} - -// PrefetchPath mocks base method. -func (m *MockMerkleDB) PrefetchPath(key []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefetchPath", key) - ret0, _ := ret[0].(error) - return ret0 -} - -// PrefetchPath indicates an expected call of PrefetchPath. -func (mr *MockMerkleDBMockRecorder) PrefetchPath(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPath", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPath), key) -} - -// PrefetchPaths mocks base method. -func (m *MockMerkleDB) PrefetchPaths(keys [][]byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefetchPaths", keys) - ret0, _ := ret[0].(error) - return ret0 -} - -// PrefetchPaths indicates an expected call of PrefetchPaths. -func (mr *MockMerkleDBMockRecorder) PrefetchPaths(keys any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPaths", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPaths), keys) -} - -// Put mocks base method. -func (m *MockMerkleDB) Put(key, value []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Put", key, value) - ret0, _ := ret[0].(error) - return ret0 -} - -// Put indicates an expected call of Put. -func (mr *MockMerkleDBMockRecorder) Put(key, value any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMerkleDB)(nil).Put), key, value) -} - -// VerifyChangeProof mocks base method. -func (m *MockMerkleDB) VerifyChangeProof(ctx context.Context, proof *ChangeProof, start, end maybe.Maybe[[]byte], expectedEndRootID ids.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyChangeProof", ctx, proof, start, end, expectedEndRootID) - ret0, _ := ret[0].(error) - return ret0 -} - -// VerifyChangeProof indicates an expected call of VerifyChangeProof. -func (mr *MockMerkleDBMockRecorder) VerifyChangeProof(ctx, proof, start, end, expectedEndRootID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).VerifyChangeProof), ctx, proof, start, end, expectedEndRootID) -} - -// getEditableNode mocks base method. -func (m *MockMerkleDB) getEditableNode(key Key, hasValue bool) (*node, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getEditableNode", key, hasValue) - ret0, _ := ret[0].(*node) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getEditableNode indicates an expected call of getEditableNode. -func (mr *MockMerkleDBMockRecorder) getEditableNode(key, hasValue any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getEditableNode", reflect.TypeOf((*MockMerkleDB)(nil).getEditableNode), key, hasValue) -} - -// getNode mocks base method. -func (m *MockMerkleDB) getNode(key Key, hasValue bool) (*node, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getNode", key, hasValue) - ret0, _ := ret[0].(*node) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getNode indicates an expected call of getNode. -func (mr *MockMerkleDBMockRecorder) getNode(key, hasValue any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getNode", reflect.TypeOf((*MockMerkleDB)(nil).getNode), key, hasValue) -} - -// getRoot mocks base method. -func (m *MockMerkleDB) getRoot() maybe.Maybe[*node] { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getRoot") - ret0, _ := ret[0].(maybe.Maybe[*node]) - return ret0 -} - -// getRoot indicates an expected call of getRoot. -func (mr *MockMerkleDBMockRecorder) getRoot() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getRoot", reflect.TypeOf((*MockMerkleDB)(nil).getRoot)) -} - -// getTokenSize mocks base method. -func (m *MockMerkleDB) getTokenSize() int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getTokenSize") - ret0, _ := ret[0].(int) - return ret0 -} - -// getTokenSize indicates an expected call of getTokenSize. -func (mr *MockMerkleDBMockRecorder) getTokenSize() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTokenSize", reflect.TypeOf((*MockMerkleDB)(nil).getTokenSize)) -} - -// getValue mocks base method. -func (m *MockMerkleDB) getValue(key Key) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getValue", key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getValue indicates an expected call of getValue. -func (mr *MockMerkleDBMockRecorder) getValue(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValue", reflect.TypeOf((*MockMerkleDB)(nil).getValue), key) -} diff --git a/x/sync/client.go b/x/sync/client.go deleted file mode 100644 index cc983db3fcb..00000000000 --- a/x/sync/client.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "context" - "errors" - "fmt" - "math" - "sync/atomic" - "time" - - "go.uber.org/zap" - "google.golang.org/protobuf/proto" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/maybe" - "github.com/ava-labs/avalanchego/x/merkledb" - - pb "github.com/ava-labs/avalanchego/proto/pb/sync" -) - -const ( - initialRetryWait = 10 * time.Millisecond - maxRetryWait = time.Second - retryWaitFactor = 1.5 // Larger --> timeout grows more quickly - - epsilon = 1e-6 // small amount to add to time to avoid division by 0 -) - -var ( - _ Client = (*client)(nil) - - errInvalidRangeProof = errors.New("failed to verify range proof") - errInvalidChangeProof = errors.New("failed to verify change proof") - errTooManyKeys = errors.New("response contains more than requested keys") - errTooManyBytes = errors.New("response contains more than requested bytes") - errUnexpectedChangeProofResponse = errors.New("unexpected response type") -) - -// Client synchronously fetches data from the network -// to fulfill state sync requests. -// Repeatedly retries failed requests until the context is canceled. -type Client interface { - // GetRangeProof synchronously sends the given request - // and returns the parsed response. - // This method verifies the range proof before returning it. - GetRangeProof( - ctx context.Context, - request *pb.SyncGetRangeProofRequest, - ) (*merkledb.RangeProof, error) - - // GetChangeProof synchronously sends the given request - // and returns the parsed response. - // This method verifies the change proof / range proof - // before returning it. - // If the server responds with a change proof, - // it's verified using [verificationDB]. - GetChangeProof( - ctx context.Context, - request *pb.SyncGetChangeProofRequest, - verificationDB DB, - ) (*merkledb.ChangeOrRangeProof, error) -} - -type client struct { - networkClient NetworkClient - stateSyncNodes []ids.NodeID - stateSyncNodeIdx uint32 - log logging.Logger - metrics SyncMetrics - tokenSize int - hasher merkledb.Hasher -} - -type ClientConfig struct { - NetworkClient NetworkClient - StateSyncNodeIDs []ids.NodeID - Log logging.Logger - Metrics SyncMetrics - BranchFactor merkledb.BranchFactor - // If not specified, [merkledb.DefaultHasher] will be used. - Hasher merkledb.Hasher -} - -func NewClient(config *ClientConfig) (Client, error) { - if err := config.BranchFactor.Valid(); err != nil { - return nil, err - } - hasher := config.Hasher - if hasher == nil { - hasher = merkledb.DefaultHasher - } - return &client{ - networkClient: config.NetworkClient, - stateSyncNodes: config.StateSyncNodeIDs, - log: config.Log, - metrics: config.Metrics, - tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], - hasher: hasher, - }, nil -} - -// GetChangeProof synchronously retrieves the change proof given by [req]. -// Upon failure, retries until the context is expired. -// The returned change proof is verified. -func (c *client) GetChangeProof( - ctx context.Context, - req *pb.SyncGetChangeProofRequest, - db DB, -) (*merkledb.ChangeOrRangeProof, error) { - parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.ChangeOrRangeProof, error) { - if len(responseBytes) > int(req.BytesLimit) { - return nil, fmt.Errorf("%w: (%d) > %d)", errTooManyBytes, len(responseBytes), req.BytesLimit) - } - - var changeProofResp pb.SyncGetChangeProofResponse - if err := proto.Unmarshal(responseBytes, &changeProofResp); err != nil { - return nil, err - } - - startKey := maybeBytesToMaybe(req.StartKey) - endKey := maybeBytesToMaybe(req.EndKey) - - switch changeProofResp := changeProofResp.Response.(type) { - case *pb.SyncGetChangeProofResponse_ChangeProof: - // The server had enough history to send us a change proof - var changeProof merkledb.ChangeProof - if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof); err != nil { - return nil, err - } - - // Ensure the response does not contain more than the requested number of leaves - // and the start and end roots match the requested roots. - if len(changeProof.KeyChanges) > int(req.KeyLimit) { - return nil, fmt.Errorf( - "%w: (%d) > %d)", - errTooManyKeys, len(changeProof.KeyChanges), req.KeyLimit, - ) - } - - endRoot, err := ids.ToID(req.EndRootHash) - if err != nil { - return nil, err - } - - if err := db.VerifyChangeProof( - ctx, - &changeProof, - startKey, - endKey, - endRoot, - ); err != nil { - return nil, fmt.Errorf("%w due to %w", errInvalidChangeProof, err) - } - - return &merkledb.ChangeOrRangeProof{ - ChangeProof: &changeProof, - }, nil - case *pb.SyncGetChangeProofResponse_RangeProof: - - var rangeProof merkledb.RangeProof - if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { - return nil, err - } - - // The server did not have enough history to send us a change proof - // so they sent a range proof instead. - err := verifyRangeProof( - ctx, - &rangeProof, - int(req.KeyLimit), - startKey, - endKey, - req.EndRootHash, - c.tokenSize, - c.hasher, - ) - if err != nil { - return nil, err - } - - return &merkledb.ChangeOrRangeProof{ - RangeProof: &rangeProof, - }, nil - default: - return nil, fmt.Errorf( - "%w: %T", - errUnexpectedChangeProofResponse, changeProofResp, - ) - } - } - - reqBytes, err := proto.Marshal(&pb.Request{ - Message: &pb.Request_ChangeProofRequest{ - ChangeProofRequest: req, - }, - }) - if err != nil { - return nil, err - } - return getAndParse(ctx, c, reqBytes, parseFn) -} - -// Verify [rangeProof] is a valid range proof for keys in [start, end] for -// root [rootBytes]. Returns [errTooManyKeys] if the response contains more -// than [keyLimit] keys. -func verifyRangeProof( - ctx context.Context, - rangeProof *merkledb.RangeProof, - keyLimit int, - start maybe.Maybe[[]byte], - end maybe.Maybe[[]byte], - rootBytes []byte, - tokenSize int, - hasher merkledb.Hasher, -) error { - root, err := ids.ToID(rootBytes) - if err != nil { - return err - } - - // Ensure the response does not contain more than the maximum requested number of leaves. - if len(rangeProof.KeyValues) > keyLimit { - return fmt.Errorf( - "%w: (%d) > %d)", - errTooManyKeys, len(rangeProof.KeyValues), keyLimit, - ) - } - - if err := rangeProof.Verify( - ctx, - start, - end, - root, - tokenSize, - hasher, - ); err != nil { - return fmt.Errorf("%w due to %w", errInvalidRangeProof, err) - } - return nil -} - -// GetRangeProof synchronously retrieves the range proof given by [req]. -// Upon failure, retries until the context is expired. -// The returned range proof is verified. -func (c *client) GetRangeProof( - ctx context.Context, - req *pb.SyncGetRangeProofRequest, -) (*merkledb.RangeProof, error) { - parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.RangeProof, error) { - if len(responseBytes) > int(req.BytesLimit) { - return nil, fmt.Errorf( - "%w: (%d) > %d)", - errTooManyBytes, len(responseBytes), req.BytesLimit, - ) - } - - var rangeProofProto pb.RangeProof - if err := proto.Unmarshal(responseBytes, &rangeProofProto); err != nil { - return nil, err - } - - var rangeProof merkledb.RangeProof - if err := rangeProof.UnmarshalProto(&rangeProofProto); err != nil { - return nil, err - } - - if err := verifyRangeProof( - ctx, - &rangeProof, - int(req.KeyLimit), - maybeBytesToMaybe(req.StartKey), - maybeBytesToMaybe(req.EndKey), - req.RootHash, - c.tokenSize, - c.hasher, - ); err != nil { - return nil, err - } - return &rangeProof, nil - } - - reqBytes, err := proto.Marshal(&pb.Request{ - Message: &pb.Request_RangeProofRequest{ - RangeProofRequest: req, - }, - }) - if err != nil { - return nil, err - } - - return getAndParse(ctx, c, reqBytes, parseFn) -} - -// getAndParse uses [client] to send [request] to an arbitrary peer. -// Returns the response to the request. -// [parseFn] parses the raw response. -// If the request is unsuccessful or the response can't be parsed, -// retries the request to a different peer until [ctx] expires. -// Returns [errAppSendFailed] if we fail to send an AppRequest/AppResponse. -// This should be treated as a fatal error. -func getAndParse[T any]( - ctx context.Context, - client *client, - request []byte, - parseFn func(context.Context, []byte) (*T, error), -) (*T, error) { - var ( - lastErr error - response *T - ) - // Loop until the context is cancelled or we get a valid response. - for attempt := 1; ; attempt++ { - nodeID, responseBytes, err := client.get(ctx, request) - if err == nil { - if response, err = parseFn(ctx, responseBytes); err == nil { - return response, nil - } - } - - if errors.Is(err, errAppSendFailed) { - // Failing to send an AppRequest is a fatal error. - return nil, err - } - - client.log.Debug("request failed, retrying", - zap.Stringer("nodeID", nodeID), - zap.Int("attempt", attempt), - zap.Error(err), - ) - // if [err] is being propagated from [ctx], avoid overwriting [lastErr]. - if err != ctx.Err() { - lastErr = err - } - - retryWait := initialRetryWait * time.Duration(math.Pow(retryWaitFactor, float64(attempt))) - if retryWait > maxRetryWait || retryWait < 0 { // Handle overflows with negative check. - retryWait = maxRetryWait - } - - select { - case <-ctx.Done(): - if lastErr != nil { - // prefer reporting [lastErr] if it's not nil. - return nil, fmt.Errorf( - "request failed after %d attempts with last error %w and ctx error %w", - attempt, lastErr, ctx.Err(), - ) - } - return nil, ctx.Err() - case <-time.After(retryWait): - } - } -} - -// get sends [request] to an arbitrary peer and blocks -// until the node receives a response, failure notification -// or [ctx] is canceled. -// Returns the peer's NodeID and response. -// Returns [errAppSendFailed] if we failed to send an AppRequest/AppResponse. -// This should be treated as fatal. -// It's safe to call this method multiple times concurrently. -func (c *client) get(ctx context.Context, request []byte) (ids.NodeID, []byte, error) { - var ( - response []byte - nodeID ids.NodeID - err error - ) - - c.metrics.RequestMade() - - if len(c.stateSyncNodes) == 0 { - nodeID, response, err = c.networkClient.RequestAny(ctx, request) - } else { - // Get the next nodeID to query using the [nodeIdx] offset. - // If we're out of nodes, loop back to 0. - // We do this try to query a different node each time if possible. - nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) - nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] - response, err = c.networkClient.Request(ctx, nodeID, request) - } - if err != nil { - c.metrics.RequestFailed() - return nodeID, response, err - } - - c.metrics.RequestSucceeded() - return nodeID, response, nil -} diff --git a/x/sync/client_test.go b/x/sync/client_test.go index 5c5304a2e64..2633071439d 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -5,28 +5,26 @@ package sync import ( "context" - "math/rand" + "sync" "testing" "time" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" "google.golang.org/protobuf/proto" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common/commonmock" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" - "github.com/ava-labs/avalanchego/x/sync/syncmock" pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) +var _ p2p.Handler = (*flakyHandler)(nil) + func newDefaultDBConfig() merkledb.Config { return merkledb.Config{ IntermediateWriteBatchSize: 100, @@ -40,739 +38,110 @@ func newDefaultDBConfig() merkledb.Config { } } -// Create a client and send a range proof request to a server -// whose underlying database is [serverDB]. -// The server's response is modified with [modifyResponse] before -// being returned to the server. -// The client makes at most [maxAttempts] attempts to fulfill -// the request before returning an error. -func sendRangeProofRequest( +func newModifiedRangeProofHandler( t *testing.T, - serverDB DB, - request *pb.SyncGetRangeProofRequest, - maxAttempts int, - modifyResponse func(*merkledb.RangeProof), -) (*merkledb.RangeProof, error) { - t.Helper() - - require := require.New(t) - ctrl := gomock.NewController(t) - - var ( - // Number of calls from the client to the server so far. - numAttempts int - - // Sends messages from server to client. - sender = commonmock.NewSender(ctrl) - - // Serves the range proof. - server = NewNetworkServer(sender, serverDB, logging.NoLog{}) - - clientNodeID, serverNodeID = ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - - // "Sends" the request from the client to the server and - // "receives" the response from the server. In reality, - // it just invokes the server's method and receives - // the response on [serverResponseChan]. - networkClient = syncmock.NewNetworkClient(ctrl) - - serverResponseChan = make(chan []byte, 1) - - // The context used in client.GetRangeProof. - // Canceled after the first response is received because - // the client will keep sending requests until its context - // expires or it succeeds. - ctx, cancel = context.WithCancel(context.Background()) - ) - - defer cancel() - - // The client fetching a range proof. - client, err := NewClient(&ClientConfig{ - NetworkClient: networkClient, - Metrics: &mockMetrics{}, - Log: logging.NoLog{}, - BranchFactor: merkledb.BranchFactor16, - }) - require.NoError(err) - - networkClient.EXPECT().RequestAny( - gomock.Any(), // ctx - gomock.Any(), // request - ).DoAndReturn( - func(_ context.Context, request []byte) (ids.NodeID, []byte, error) { - go func() { - // Get response from server - require.NoError(server.AppRequest(context.Background(), clientNodeID, 0, time.Now().Add(time.Hour), request)) - }() - - // Wait for response from server - serverResponse := <-serverResponseChan - - numAttempts++ - - if numAttempts >= maxAttempts { - defer cancel() + db merkledb.MerkleDB, + modifyResponse func(response *merkledb.RangeProof), +) p2p.Handler { + handler := NewSyncGetRangeProofHandler(logging.NoLog{}, db) + + c := counter{m: 2} + return &p2p.TestHandler{ + AppRequestF: func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + responseBytes, appErr := handler.AppRequest(ctx, nodeID, deadline, requestBytes) + if appErr != nil { + return nil, appErr } - return serverNodeID, serverResponse, nil - }, - ).AnyTimes() - - // The server should expect to "send" a response to the client. - sender.EXPECT().SendAppResponse( - gomock.Any(), // ctx - clientNodeID, - gomock.Any(), // requestID - gomock.Any(), // responseBytes - ).DoAndReturn( - func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { - // deserialize the response so we can modify it if needed. - var responseProto pb.RangeProof - require.NoError(proto.Unmarshal(responseBytes, &responseProto)) + response := &pb.RangeProof{} + require.NoError(t, proto.Unmarshal(responseBytes, response)) - var response merkledb.RangeProof - require.NoError(response.UnmarshalProto(&responseProto)) + proof := &merkledb.RangeProof{} + require.NoError(t, proof.UnmarshalProto(response)) - // modify if needed - if modifyResponse != nil { - modifyResponse(&response) + // Half of requests are modified + if c.Inc() == 0 { + modifyResponse(proof) } - // reserialize the response and pass it to the client to complete the handling. - responseBytes, err := proto.Marshal(response.ToProto()) - require.NoError(err) - - serverResponseChan <- responseBytes - - return nil - }, - ).AnyTimes() - - return client.GetRangeProof(ctx, request) -} - -func TestGetRangeProof(t *testing.T) { - now := time.Now().UnixNano() - t.Logf("seed: %d", now) - r := rand.New(rand.NewSource(now)) // #nosec G404 - - smallTrieKeyCount := defaultRequestKeyLimit - smallTrieDB, _, err := generateTrieWithMinKeyLen(t, r, smallTrieKeyCount, 1) - require.NoError(t, err) - smallTrieRoot, err := smallTrieDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - largeTrieKeyCount := 3 * defaultRequestKeyLimit - largeTrieDB, largeTrieKeys, err := generateTrieWithMinKeyLen(t, r, largeTrieKeyCount, 1) - require.NoError(t, err) - largeTrieRoot, err := largeTrieDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) + responseBytes, err := proto.Marshal(proof.ToProto()) + if err != nil { + return nil, &common.AppError{Code: 123, Message: err.Error()} + } - tests := map[string]struct { - db DB - request *pb.SyncGetRangeProofRequest - modifyResponse func(*merkledb.RangeProof) - expectedErr error - expectedResponseLen int - }{ - "proof restricted by BytesLimit": { - db: smallTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: smallTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: 10000, - }, - }, - "full response for small (single request) trie": { - db: smallTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: smallTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - expectedResponseLen: defaultRequestKeyLimit, - }, - "too many leaves in response": { - db: smallTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: smallTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) - }, - expectedErr: errTooManyKeys, - }, - "partial response to request for entire trie (full leaf limit)": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - expectedResponseLen: defaultRequestKeyLimit, - }, - "full response from near end of trie to end of trie (less than leaf limit)": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - StartKey: &pb.MaybeBytes{ - Value: largeTrieKeys[len(largeTrieKeys)-30], // Set start 30 keys from the end of the large trie - IsNothing: false, - }, - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - expectedResponseLen: 30, - }, - "full response for intermediate range of trie (less than leaf limit)": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - StartKey: &pb.MaybeBytes{ - Value: largeTrieKeys[1000], // Set the range for 1000 leafs in an intermediate range of the trie - IsNothing: false, - }, - EndKey: &pb.MaybeBytes{Value: largeTrieKeys[1099]}, // (inclusive range) - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - expectedResponseLen: 100, - }, - "removed first key in response": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = response.KeyValues[1:] - }, - expectedErr: merkledb.ErrInvalidProof, - }, - "removed first key in response and replaced proof": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - start := maybe.Some(response.KeyValues[1].Key) - rootID, err := largeTrieDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - proof, err := largeTrieDB.GetRangeProofAtRoot(context.Background(), rootID, start, maybe.Nothing[[]byte](), defaultRequestKeyLimit) - require.NoError(t, err) - response.KeyValues = proof.KeyValues - response.StartProof = proof.StartProof - response.EndProof = proof.EndProof - }, - expectedErr: errInvalidRangeProof, - }, - "removed key from middle of response": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = append(response.KeyValues[:100], response.KeyValues[101:]...) - }, - expectedErr: merkledb.ErrInvalidProof, - }, - "start and end proof nodes removed": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.StartProof = nil - response.EndProof = nil - }, - expectedErr: merkledb.ErrNoEndProof, - }, - "end proof removed": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.EndProof = nil - }, - expectedErr: merkledb.ErrNoEndProof, - }, - "empty proof": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.StartProof = nil - response.EndProof = nil - response.KeyValues = nil - }, - expectedErr: merkledb.ErrEmptyProof, + return responseBytes, nil }, } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require := require.New(t) - proof, err := sendRangeProofRequest(t, test.db, test.request, 1, test.modifyResponse) - require.ErrorIs(err, test.expectedErr) - if test.expectedErr != nil { - return - } - if test.expectedResponseLen > 0 { - require.Len(proof.KeyValues, test.expectedResponseLen) - } - bytes, err := proto.Marshal(proof.ToProto()) - require.NoError(err) - require.Less(len(bytes), int(test.request.BytesLimit)) - }) - } } -func sendChangeProofRequest( +func newModifiedChangeProofHandler( t *testing.T, - serverDB DB, - clientDB DB, - request *pb.SyncGetChangeProofRequest, - maxAttempts int, - modifyChangeProof func(*merkledb.ChangeProof), - modifyRangeProof func(*merkledb.RangeProof), -) (*merkledb.ChangeOrRangeProof, error) { - t.Helper() - - require := require.New(t) - ctrl := gomock.NewController(t) - - var ( - // Number of calls from the client to the server so far. - numAttempts int - - // Sends messages from server to client. - sender = commonmock.NewSender(ctrl) - - // Serves the change proof. - server = NewNetworkServer(sender, serverDB, logging.NoLog{}) - - clientNodeID, serverNodeID = ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - - // "Sends" the request from the client to the server and - // "receives" the response from the server. In reality, - // it just invokes the server's method and receives - // the response on [serverResponseChan]. - networkClient = syncmock.NewNetworkClient(ctrl) - - serverResponseChan = make(chan []byte, 1) - - // The context used in client.GetChangeProof. - // Canceled after the first response is received because - // the client will keep sending requests until its context - // expires or it succeeds. - ctx, cancel = context.WithCancel(context.Background()) - ) - - // The client fetching a change proof. - client, err := NewClient(&ClientConfig{ - NetworkClient: networkClient, - Metrics: &mockMetrics{}, - Log: logging.NoLog{}, - BranchFactor: merkledb.BranchFactor16, - }) - require.NoError(err) - - defer cancel() // avoid leaking a goroutine - - networkClient.EXPECT().RequestAny( - gomock.Any(), // ctx - gomock.Any(), // request - ).DoAndReturn( - func(_ context.Context, request []byte) (ids.NodeID, []byte, error) { - go func() { - // Get response from server - require.NoError(server.AppRequest(context.Background(), clientNodeID, 0, time.Now().Add(time.Hour), request)) - }() - - // Wait for response from server - serverResponse := <-serverResponseChan - - numAttempts++ - - if numAttempts >= maxAttempts { - defer cancel() + db merkledb.MerkleDB, + modifyResponse func(response *merkledb.ChangeProof), +) p2p.Handler { + handler := NewSyncGetChangeProofHandler(logging.NoLog{}, db) + + c := counter{m: 2} + return &p2p.TestHandler{ + AppRequestF: func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + var err error + responseBytes, appErr := handler.AppRequest(ctx, nodeID, deadline, requestBytes) + if appErr != nil { + return nil, appErr } - return serverNodeID, serverResponse, nil - }, - ).AnyTimes() - - // Expect server (serverDB) to send app response to client (clientDB) - sender.EXPECT().SendAppResponse( - gomock.Any(), // ctx - clientNodeID, - gomock.Any(), // requestID - gomock.Any(), // responseBytes - ).DoAndReturn( - func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { - // deserialize the response so we can modify it if needed. - var responseProto pb.SyncGetChangeProofResponse - require.NoError(proto.Unmarshal(responseBytes, &responseProto)) - - if responseProto.GetChangeProof() != nil { - // Server responded with a change proof - var changeProof merkledb.ChangeProof - require.NoError(changeProof.UnmarshalProto(responseProto.GetChangeProof())) - - // modify if needed - if modifyChangeProof != nil { - modifyChangeProof(&changeProof) - } - - // reserialize the response and pass it to the client to complete the handling. - responseBytes, err := proto.Marshal(&pb.SyncGetChangeProofResponse{ - Response: &pb.SyncGetChangeProofResponse_ChangeProof{ - ChangeProof: changeProof.ToProto(), - }, - }) - require.NoError(err) + response := &pb.SyncGetChangeProofResponse{} + require.NoError(t, proto.Unmarshal(responseBytes, response)) - serverResponseChan <- responseBytes + changeProof := response.Response.(*pb.SyncGetChangeProofResponse_ChangeProof) + proof := &merkledb.ChangeProof{} + require.NoError(t, proof.UnmarshalProto(changeProof.ChangeProof)) - return nil + // Half of requests are modified + if c.Inc() == 0 { + modifyResponse(proof) } - // Server responded with a range proof - var rangeProof merkledb.RangeProof - require.NoError(rangeProof.UnmarshalProto(responseProto.GetRangeProof())) - - // modify if needed - if modifyRangeProof != nil { - modifyRangeProof(&rangeProof) - } - - // reserialize the response and pass it to the client to complete the handling. - responseBytes, err := proto.Marshal(&pb.SyncGetChangeProofResponse{ - Response: &pb.SyncGetChangeProofResponse_RangeProof{ - RangeProof: rangeProof.ToProto(), + responseBytes, err = proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_ChangeProof{ + ChangeProof: proof.ToProto(), }, }) - require.NoError(err) - - serverResponseChan <- responseBytes - - return nil - }, - ).AnyTimes() - - return client.GetChangeProof(ctx, request, clientDB) -} - -func TestGetChangeProof(t *testing.T) { - now := time.Now().UnixNano() - t.Logf("seed: %d", now) - r := rand.New(rand.NewSource(now)) // #nosec G404 - - serverDB, err := merkledb.New( - context.Background(), - memdb.New(), - newDefaultDBConfig(), - ) - require.NoError(t, err) - - clientDB, err := merkledb.New( - context.Background(), - memdb.New(), - newDefaultDBConfig(), - ) - require.NoError(t, err) - startRoot, err := serverDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - // create changes - for x := 0; x < defaultRequestKeyLimit/2; x++ { - ops := make([]database.BatchOp, 0, 11) - // add some key/values - for i := 0; i < 10; i++ { - key := make([]byte, r.Intn(100)) - _, err = r.Read(key) - require.NoError(t, err) - - val := make([]byte, r.Intn(100)) - _, err = r.Read(val) - require.NoError(t, err) - - ops = append(ops, database.BatchOp{Key: key, Value: val}) - } - - // delete a key - deleteKeyStart := make([]byte, r.Intn(10)) - _, err = r.Read(deleteKeyStart) - require.NoError(t, err) - - it := serverDB.NewIteratorWithStart(deleteKeyStart) - if it.Next() { - ops = append(ops, database.BatchOp{Key: it.Key(), Delete: true}) - } - require.NoError(t, it.Error()) - it.Release() - - view, err := serverDB.NewView( - context.Background(), - merkledb.ViewChanges{BatchOps: ops}, - ) - require.NoError(t, err) - require.NoError(t, view.CommitToDB(context.Background())) - } - - endRoot, err := serverDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - fakeRootID := ids.GenerateTestID() - - tests := map[string]struct { - db DB - request *pb.SyncGetChangeProofRequest - modifyChangeProofResponse func(*merkledb.ChangeProof) - modifyRangeProofResponse func(*merkledb.RangeProof) - expectedErr error - expectedResponseLen int - expectRangeProof bool // Otherwise expect change proof - }{ - "proof restricted by BytesLimit": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: 10000, - }, - }, - "full response for small (single request) trie": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - expectedResponseLen: defaultRequestKeyLimit, - }, - "too many keys in response": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: func(response *merkledb.ChangeProof) { - response.KeyChanges = append(response.KeyChanges, make([]merkledb.KeyChange, defaultRequestKeyLimit)...) - }, - expectedErr: errTooManyKeys, - }, - "partial response to request for entire trie (full leaf limit)": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - expectedResponseLen: defaultRequestKeyLimit, - }, - "removed first key in response": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: func(response *merkledb.ChangeProof) { - response.KeyChanges = response.KeyChanges[1:] - }, - expectedErr: errInvalidChangeProof, - }, - "removed key from middle of response": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: func(response *merkledb.ChangeProof) { - response.KeyChanges = append(response.KeyChanges[:100], response.KeyChanges[101:]...) - }, - expectedErr: merkledb.ErrInvalidProof, - }, - "all proof keys removed from response": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: func(response *merkledb.ChangeProof) { - response.StartProof = nil - response.EndProof = nil - }, - expectedErr: merkledb.ErrInvalidProof, - }, - "range proof response; remove first key": { - request: &pb.SyncGetChangeProofRequest{ - // Server doesn't have the (non-existent) start root - // so should respond with range proof. - StartRootHash: fakeRootID[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: nil, - modifyRangeProofResponse: func(response *merkledb.RangeProof) { - response.KeyValues = response.KeyValues[1:] - }, - expectedErr: errInvalidRangeProof, - expectRangeProof: true, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require := require.New(t) - - // Ensure test is well-formed. - if test.expectRangeProof { - require.Nil(test.modifyChangeProofResponse) - } else { - require.Nil(test.modifyRangeProofResponse) + if err != nil { + return nil, &common.AppError{Code: 123, Message: err.Error()} } - changeOrRangeProof, err := sendChangeProofRequest( - t, - serverDB, - clientDB, - test.request, - 1, - test.modifyChangeProofResponse, - test.modifyRangeProofResponse, - ) - require.ErrorIs(err, test.expectedErr) - if test.expectedErr != nil { - return - } - - if test.expectRangeProof { - require.NotNil(changeOrRangeProof.RangeProof) - require.Nil(changeOrRangeProof.ChangeProof) - } else { - require.NotNil(changeOrRangeProof.ChangeProof) - require.Nil(changeOrRangeProof.RangeProof) - } - - if test.expectedResponseLen > 0 { - if test.expectRangeProof { - require.LessOrEqual(len(changeOrRangeProof.RangeProof.KeyValues), test.expectedResponseLen) - } else { - require.LessOrEqual(len(changeOrRangeProof.ChangeProof.KeyChanges), test.expectedResponseLen) - } - } - - var bytes []byte - if test.expectRangeProof { - bytes, err = proto.Marshal(&pb.SyncGetChangeProofResponse{ - Response: &pb.SyncGetChangeProofResponse_RangeProof{ - RangeProof: changeOrRangeProof.RangeProof.ToProto(), - }, - }) - } else { - bytes, err = proto.Marshal(&pb.SyncGetChangeProofResponse{ - Response: &pb.SyncGetChangeProofResponse_ChangeProof{ - ChangeProof: changeOrRangeProof.ChangeProof.ToProto(), - }, - }) - } - require.NoError(err) - require.LessOrEqual(len(bytes), int(test.request.BytesLimit)) - }) + return responseBytes, nil + }, } } -func TestRangeProofRetries(t *testing.T) { - now := time.Now().UnixNano() - t.Logf("seed: %d", now) - r := rand.New(rand.NewSource(now)) // #nosec G404 - require := require.New(t) - - keyCount := defaultRequestKeyLimit - db, _, err := generateTrieWithMinKeyLen(t, r, keyCount, 1) - require.NoError(err) - root, err := db.GetMerkleRoot(context.Background()) - require.NoError(err) - - maxRequests := 4 - request := &pb.SyncGetRangeProofRequest{ - RootHash: root[:], - KeyLimit: uint32(keyCount), - BytesLimit: defaultRequestByteSizeLimit, - } +type flakyHandler struct { + p2p.Handler + c *counter +} - responseCount := 0 - modifyResponse := func(response *merkledb.RangeProof) { - responseCount++ - if responseCount < maxRequests { - // corrupt the first [maxRequests] responses, to force the client to retry. - response.KeyValues = nil - } +func (f *flakyHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + if f.c.Inc() == 0 { + return nil, &common.AppError{Code: 123, Message: "flake error"} } - proof, err := sendRangeProofRequest(t, db, request, maxRequests, modifyResponse) - require.NoError(err) - require.Len(proof.KeyValues, keyCount) - require.Equal(responseCount, maxRequests) // check the client performed retries. + return f.Handler.AppRequest(ctx, nodeID, deadline, requestBytes) } -// Test that a failure to send an AppRequest is propagated -// and returned by GetRangeProof and GetChangeProof. -func TestAppRequestSendFailed(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - networkClient := syncmock.NewNetworkClient(ctrl) - - client, err := NewClient( - &ClientConfig{ - NetworkClient: networkClient, - Log: logging.NoLog{}, - Metrics: &mockMetrics{}, - BranchFactor: merkledb.BranchFactor16, - }, - ) - require.NoError(err) +type counter struct { + i int + m int + lock sync.Mutex +} - // Mock failure to send app request - networkClient.EXPECT().RequestAny( - gomock.Any(), - gomock.Any(), - ).Return(ids.EmptyNodeID, nil, errAppSendFailed).Times(2) +func (c *counter) Inc() int { + c.lock.Lock() + defer c.lock.Unlock() - _, err = client.GetChangeProof( - context.Background(), - &pb.SyncGetChangeProofRequest{}, - nil, // database is unused - ) - require.ErrorIs(err, errAppSendFailed) + tmp := c.i + result := tmp % c.m - _, err = client.GetRangeProof( - context.Background(), - &pb.SyncGetRangeProofRequest{}, - ) - require.ErrorIs(err, errAppSendFailed) + c.i++ + return result } diff --git a/x/sync/manager.go b/x/sync/manager.go index fa70c03f267..dd176c22303 100644 --- a/x/sync/manager.go +++ b/x/sync/manager.go @@ -8,13 +8,19 @@ import ( "context" "errors" "fmt" + "math" "slices" "sync" + "sync/atomic" + "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "golang.org/x/exp/maps" + "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/utils/set" @@ -26,16 +32,25 @@ import ( const ( defaultRequestKeyLimit = maxKeyValuesLimit defaultRequestByteSizeLimit = maxByteSizeLimit + initialRetryWait = 10 * time.Millisecond + maxRetryWait = time.Second + retryWaitFactor = 1.5 // Larger --> timeout grows more quickly ) var ( - ErrAlreadyStarted = errors.New("cannot start a Manager that has already been started") - ErrAlreadyClosed = errors.New("Manager is closed") - ErrNoClientProvided = errors.New("client is a required field of the sync config") - ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") - ErrNoLogProvided = errors.New("log is a required field of the sync config") - ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") - ErrFinishedWithUnexpectedRoot = errors.New("finished syncing with an unexpected root") + ErrAlreadyStarted = errors.New("cannot start a Manager that has already been started") + ErrAlreadyClosed = errors.New("Manager is closed") + ErrNoRangeProofClientProvided = errors.New("range proof client is a required field of the sync config") + ErrNoChangeProofClientProvided = errors.New("change proofclient is a required field of the sync config") + ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") + ErrNoLogProvided = errors.New("log is a required field of the sync config") + ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") + ErrFinishedWithUnexpectedRoot = errors.New("finished syncing with an unexpected root") + errInvalidRangeProof = errors.New("failed to verify range proof") + errInvalidChangeProof = errors.New("failed to verify change proof") + errTooManyKeys = errors.New("response contains more than requested keys") + errTooManyBytes = errors.New("response contains more than requested bytes") + errUnexpectedChangeProofResponse = errors.New("unexpected response type") ) type priority byte @@ -45,6 +60,7 @@ const ( lowPriority priority = iota + 1 medPriority highPriority + retryPriority ) // Signifies that we should sync the range [start, end]. @@ -57,14 +73,26 @@ type workItem struct { end maybe.Maybe[[]byte] priority priority localRootID ids.ID + attempt int + queueTime time.Time } -func newWorkItem(localRootID ids.ID, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], priority priority) *workItem { +func (w *workItem) requestFailed() { + attempt := w.attempt + 1 + + // Overflow check + if attempt > w.attempt { + w.attempt = attempt + } +} + +func newWorkItem(localRootID ids.ID, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], priority priority, queueTime time.Time) *workItem { return &workItem{ localRootID: localRootID, start: start, end: end, priority: priority, + queueTime: queueTime, } } @@ -107,21 +135,32 @@ type Manager struct { syncing bool closeOnce sync.Once tokenSize int + + stateSyncNodeIdx uint32 + metrics SyncMetrics } +// TODO remove non-config values out of this struct type ManagerConfig struct { DB DB - Client Client + RangeProofClient *p2p.Client + ChangeProofClient *p2p.Client SimultaneousWorkLimit int Log logging.Logger TargetRoot ids.ID BranchFactor merkledb.BranchFactor + StateSyncNodes []ids.NodeID + // If not specified, [merkledb.DefaultHasher] will be used. + Hasher merkledb.Hasher + Metrics prometheus.Registerer } -func NewManager(config ManagerConfig) (*Manager, error) { +func NewManager(config ManagerConfig, registerer prometheus.Registerer) (*Manager, error) { switch { - case config.Client == nil: - return nil, ErrNoClientProvided + case config.RangeProofClient == nil: + return nil, ErrNoRangeProofClientProvided + case config.ChangeProofClient == nil: + return nil, ErrNoChangeProofClientProvided case config.DB == nil: return nil, ErrNoDatabaseProvided case config.Log == nil: @@ -133,12 +172,22 @@ func NewManager(config ManagerConfig) (*Manager, error) { return nil, err } + if config.Hasher == nil { + config.Hasher = merkledb.DefaultHasher + } + + metrics, err := NewMetrics("sync", registerer) + if err != nil { + return nil, err + } + m := &Manager{ config: config, doneChan: make(chan struct{}), unprocessedWork: newWorkHeap(), processedWork: newWorkHeap(), tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], + metrics: metrics, } m.unprocessedWorkCond.L = &m.workLock @@ -157,7 +206,7 @@ func (m *Manager) Start(ctx context.Context) error { // Add work item to fetch the entire key range. // Note that this will be the first work item to be processed. - m.unprocessedWork.Insert(newWorkItem(ids.Empty, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), lowPriority)) + m.unprocessedWork.Insert(newWorkItem(ids.Empty, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), lowPriority, time.Now())) m.syncing = true ctx, m.cancelCtx = context.WithCancel(ctx) @@ -234,38 +283,54 @@ func (m *Manager) close() { }) } -// Processes [item] by fetching and applying a change or range proof. -// Assumes [m.workLock] is not held. +func (m *Manager) finishWorkItem() { + m.workLock.Lock() + defer m.workLock.Unlock() + + m.processingWorkItems-- + m.unprocessedWorkCond.Signal() +} + +// Processes [item] by fetching a change or range proof. func (m *Manager) doWork(ctx context.Context, work *workItem) { - defer func() { - m.workLock.Lock() - defer m.workLock.Unlock() + // Backoff for failed requests accounting for time this job has already + // spent waiting in the unprocessed queue + now := time.Now() + waitTime := max(0, calculateBackoff(work.attempt)-now.Sub(work.queueTime)) + + // Check if we can start this work item before the context deadline + deadline, ok := ctx.Deadline() + if ok && now.Add(waitTime).After(deadline) { + m.finishWorkItem() + return + } - m.processingWorkItems-- - m.unprocessedWorkCond.Signal() - }() + <-time.After(waitTime) if work.localRootID == ids.Empty { // the keys in this range have not been downloaded, so get all key/values - m.getAndApplyRangeProof(ctx, work) + m.requestRangeProof(ctx, work) } else { // the keys in this range have already been downloaded, but the root changed, so get all changes - m.getAndApplyChangeProof(ctx, work) + m.requestChangeProof(ctx, work) } } // Fetch and apply the change proof given by [work]. // Assumes [m.workLock] is not held. -func (m *Manager) getAndApplyChangeProof(ctx context.Context, work *workItem) { +func (m *Manager) requestChangeProof(ctx context.Context, work *workItem) { targetRootID := m.getTargetRoot() if work.localRootID == targetRootID { // Start root is the same as the end root, so we're done. m.completeWorkItem(ctx, work, work.end, targetRootID, nil) + m.finishWorkItem() return } if targetRootID == ids.Empty { + defer m.finishWorkItem() + // The trie is empty after this change. // Delete all the key-value pairs in the range. if err := m.config.DB.Clear(); err != nil { @@ -277,74 +342,55 @@ func (m *Manager) getAndApplyChangeProof(ctx context.Context, work *workItem) { return } - changeOrRangeProof, err := m.config.Client.GetChangeProof( - ctx, - &pb.SyncGetChangeProofRequest{ - StartRootHash: work.localRootID[:], - EndRootHash: targetRootID[:], - StartKey: &pb.MaybeBytes{ - Value: work.start.Value(), - IsNothing: work.start.IsNothing(), - }, - EndKey: &pb.MaybeBytes{ - Value: work.end.Value(), - IsNothing: work.end.IsNothing(), - }, - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, + request := &pb.SyncGetChangeProofRequest{ + StartRootHash: work.localRootID[:], + EndRootHash: targetRootID[:], + StartKey: &pb.MaybeBytes{ + Value: work.start.Value(), + IsNothing: work.start.IsNothing(), }, - m.config.DB, - ) + EndKey: &pb.MaybeBytes{ + Value: work.end.Value(), + IsNothing: work.end.IsNothing(), + }, + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + } + + requestBytes, err := proto.Marshal(request) if err != nil { + m.finishWorkItem() m.setError(err) return } - select { - case <-m.doneChan: - // If we're closed, don't apply the proof. - return - default: - } + onResponse := func(ctx context.Context, _ ids.NodeID, responseBytes []byte, err error) { + defer m.finishWorkItem() - if changeOrRangeProof.ChangeProof != nil { - // The server had sufficient history to respond with a change proof. - changeProof := changeOrRangeProof.ChangeProof - largestHandledKey := work.end - // if the proof wasn't empty, apply changes to the sync DB - if len(changeProof.KeyChanges) > 0 { - if err := m.config.DB.CommitChangeProof(ctx, changeProof); err != nil { - m.setError(err) - return - } - largestHandledKey = maybe.Some(changeProof.KeyChanges[len(changeProof.KeyChanges)-1].Key) + if err := m.handleChangeProofResponse(ctx, targetRootID, work, request, responseBytes, err); err != nil { + m.config.Log.Debug("dropping response", zap.Error(err)) + m.retryWork(work) + return } - - m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, changeProof.EndProof) - return } - // The server responded with a range proof. - rangeProof := changeOrRangeProof.RangeProof - largestHandledKey := work.end - if len(rangeProof.KeyValues) > 0 { - // Add all the key-value pairs we got to the database. - if err := m.config.DB.CommitRangeProof(ctx, work.start, work.end, rangeProof); err != nil { - m.setError(err) - return - } - largestHandledKey = maybe.Some(rangeProof.KeyValues[len(rangeProof.KeyValues)-1].Key) + if err := m.sendRequest(ctx, m.config.ChangeProofClient, requestBytes, onResponse); err != nil { + m.finishWorkItem() + m.setError(err) + return } - m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, rangeProof.EndProof) + m.metrics.RequestMade() } // Fetch and apply the range proof given by [work]. // Assumes [m.workLock] is not held. -func (m *Manager) getAndApplyRangeProof(ctx context.Context, work *workItem) { +func (m *Manager) requestRangeProof(ctx context.Context, work *workItem) { targetRootID := m.getTargetRoot() if targetRootID == ids.Empty { + defer m.finishWorkItem() + if err := m.config.DB.Clear(); err != nil { m.setError(err) return @@ -354,46 +400,252 @@ func (m *Manager) getAndApplyRangeProof(ctx context.Context, work *workItem) { return } - proof, err := m.config.Client.GetRangeProof(ctx, - &pb.SyncGetRangeProofRequest{ - RootHash: targetRootID[:], - StartKey: &pb.MaybeBytes{ - Value: work.start.Value(), - IsNothing: work.start.IsNothing(), - }, - EndKey: &pb.MaybeBytes{ - Value: work.end.Value(), - IsNothing: work.end.IsNothing(), - }, - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, + request := &pb.SyncGetRangeProofRequest{ + RootHash: targetRootID[:], + StartKey: &pb.MaybeBytes{ + Value: work.start.Value(), + IsNothing: work.start.IsNothing(), }, - ) + EndKey: &pb.MaybeBytes{ + Value: work.end.Value(), + IsNothing: work.end.IsNothing(), + }, + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + } + + requestBytes, err := proto.Marshal(request) if err != nil { + m.finishWorkItem() + m.setError(err) + return + } + + onResponse := func(ctx context.Context, _ ids.NodeID, responseBytes []byte, appErr error) { + defer m.finishWorkItem() + + if err := m.handleRangeProofResponse(ctx, targetRootID, work, request, responseBytes, appErr); err != nil { + m.config.Log.Debug("dropping response", zap.Error(err)) + m.retryWork(work) + return + } + } + + if err := m.sendRequest(ctx, m.config.RangeProofClient, requestBytes, onResponse); err != nil { + m.finishWorkItem() m.setError(err) return } + m.metrics.RequestMade() +} + +func (m *Manager) sendRequest(ctx context.Context, client *p2p.Client, requestBytes []byte, onResponse p2p.AppResponseCallback) error { + if len(m.config.StateSyncNodes) == 0 { + return client.AppRequestAny(ctx, requestBytes, onResponse) + } + + // Get the next nodeID to query using the [nodeIdx] offset. + // If we're out of nodes, loop back to 0. + // We do this try to query a different node each time if possible. + nodeIdx := atomic.AddUint32(&m.stateSyncNodeIdx, 1) + nodeID := m.config.StateSyncNodes[nodeIdx%uint32(len(m.config.StateSyncNodes))] + return client.AppRequest(ctx, set.Of(nodeID), requestBytes, onResponse) +} + +func (m *Manager) retryWork(work *workItem) { + work.priority = retryPriority + work.queueTime = time.Now() + work.requestFailed() + + m.workLock.Lock() + m.unprocessedWork.Insert(work) + m.workLock.Unlock() +} + +// Returns an error if we should drop the response +func (m *Manager) handleResponse( + bytesLimit uint32, + responseBytes []byte, + err error, +) error { + if err != nil { + m.metrics.RequestFailed() + return err + } + + m.metrics.RequestSucceeded() + + // TODO can we remove this? select { case <-m.doneChan: // If we're closed, don't apply the proof. - return + return ErrAlreadyClosed default: } + if len(responseBytes) > int(bytesLimit) { + return fmt.Errorf("%w: (%d) > %d)", errTooManyBytes, len(responseBytes), bytesLimit) + } + + return nil +} + +func (m *Manager) handleRangeProofResponse( + ctx context.Context, + targetRootID ids.ID, + work *workItem, + request *pb.SyncGetRangeProofRequest, + responseBytes []byte, + err error, +) error { + if err := m.handleResponse(request.BytesLimit, responseBytes, err); err != nil { + return err + } + + var rangeProofProto pb.RangeProof + if err := proto.Unmarshal(responseBytes, &rangeProofProto); err != nil { + return err + } + + var rangeProof merkledb.RangeProof + if err := rangeProof.UnmarshalProto(&rangeProofProto); err != nil { + return err + } + + if err := verifyRangeProof( + ctx, + &rangeProof, + int(request.KeyLimit), + maybeBytesToMaybe(request.StartKey), + maybeBytesToMaybe(request.EndKey), + request.RootHash, + m.tokenSize, + m.config.Hasher, + ); err != nil { + return err + } + largestHandledKey := work.end // Replace all the key-value pairs in the DB from start to end with values from the response. - if err := m.config.DB.CommitRangeProof(ctx, work.start, work.end, proof); err != nil { + if err := m.config.DB.CommitRangeProof(ctx, work.start, work.end, &rangeProof); err != nil { m.setError(err) - return + return nil } - if len(proof.KeyValues) > 0 { - largestHandledKey = maybe.Some(proof.KeyValues[len(proof.KeyValues)-1].Key) + if len(rangeProof.KeyValues) > 0 { + largestHandledKey = maybe.Some(rangeProof.KeyValues[len(rangeProof.KeyValues)-1].Key) } - m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, proof.EndProof) + m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, rangeProof.EndProof) + return nil +} + +func (m *Manager) handleChangeProofResponse( + ctx context.Context, + targetRootID ids.ID, + work *workItem, + request *pb.SyncGetChangeProofRequest, + responseBytes []byte, + err error, +) error { + if err := m.handleResponse(request.BytesLimit, responseBytes, err); err != nil { + return err + } + + var changeProofResp pb.SyncGetChangeProofResponse + if err := proto.Unmarshal(responseBytes, &changeProofResp); err != nil { + return err + } + + startKey := maybeBytesToMaybe(request.StartKey) + endKey := maybeBytesToMaybe(request.EndKey) + + switch changeProofResp := changeProofResp.Response.(type) { + case *pb.SyncGetChangeProofResponse_ChangeProof: + // The server had enough history to send us a change proof + var changeProof merkledb.ChangeProof + if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof); err != nil { + return err + } + + // Ensure the response does not contain more than the requested number of leaves + // and the start and end roots match the requested roots. + if len(changeProof.KeyChanges) > int(request.KeyLimit) { + return fmt.Errorf( + "%w: (%d) > %d)", + errTooManyKeys, len(changeProof.KeyChanges), request.KeyLimit, + ) + } + + endRoot, err := ids.ToID(request.EndRootHash) + if err != nil { + return err + } + + if err := m.config.DB.VerifyChangeProof( + ctx, + &changeProof, + startKey, + endKey, + endRoot, + ); err != nil { + return fmt.Errorf("%w due to %w", errInvalidChangeProof, err) + } + + largestHandledKey := work.end + // if the proof wasn't empty, apply changes to the sync DB + if len(changeProof.KeyChanges) > 0 { + if err := m.config.DB.CommitChangeProof(ctx, &changeProof); err != nil { + m.setError(err) + return nil + } + largestHandledKey = maybe.Some(changeProof.KeyChanges[len(changeProof.KeyChanges)-1].Key) + } + + m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, changeProof.EndProof) + case *pb.SyncGetChangeProofResponse_RangeProof: + + var rangeProof merkledb.RangeProof + if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { + return err + } + + // The server did not have enough history to send us a change proof + // so they sent a range proof instead. + if err := verifyRangeProof( + ctx, + &rangeProof, + int(request.KeyLimit), + startKey, + endKey, + request.EndRootHash, + m.tokenSize, + m.config.Hasher, + ); err != nil { + return err + } + + largestHandledKey := work.end + if len(rangeProof.KeyValues) > 0 { + // Add all the key-value pairs we got to the database. + if err := m.config.DB.CommitRangeProof(ctx, work.start, work.end, &rangeProof); err != nil { + m.setError(err) + return nil + } + largestHandledKey = maybe.Some(rangeProof.KeyValues[len(rangeProof.KeyValues)-1].Key) + } + + m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, rangeProof.EndProof) + default: + return fmt.Errorf( + "%w: %T", + errUnexpectedChangeProofResponse, changeProofResp, + ) + } + + return nil } // findNextKey returns the start of the key range that should be fetched next @@ -695,7 +947,7 @@ func (m *Manager) completeWorkItem(ctx context.Context, work *workItem, largestH largestHandledKey = work.end } else { // the full range wasn't completed, so enqueue a new work item for the range [nextStartKey, workItem.end] - m.enqueueWork(newWorkItem(work.localRootID, nextStartKey, work.end, work.priority)) + m.enqueueWork(newWorkItem(work.localRootID, nextStartKey, work.end, work.priority, time.Now())) largestHandledKey = nextStartKey } } @@ -708,12 +960,12 @@ func (m *Manager) completeWorkItem(ctx context.Context, work *workItem, largestH stale := m.config.TargetRoot != rootID if stale { // the root has changed, so reinsert with high priority - m.enqueueWork(newWorkItem(rootID, work.start, largestHandledKey, highPriority)) + m.enqueueWork(newWorkItem(rootID, work.start, largestHandledKey, highPriority, time.Now())) } else { m.workLock.Lock() defer m.workLock.Unlock() - m.processedWork.MergeInsert(newWorkItem(rootID, work.start, largestHandledKey, work.priority)) + m.processedWork.MergeInsert(newWorkItem(rootID, work.start, largestHandledKey, work.priority, time.Now())) } // completed the range [work.start, lastKey], log and record in the completed work heap @@ -758,8 +1010,8 @@ func (m *Manager) enqueueWork(work *workItem) { // first item gets higher priority than the second to encourage finished ranges to grow // rather than start a new range that is not contiguous with existing completed ranges - first := newWorkItem(work.localRootID, work.start, mid, medPriority) - second := newWorkItem(work.localRootID, mid, work.end, lowPriority) + first := newWorkItem(work.localRootID, work.start, mid, medPriority, time.Now()) + second := newWorkItem(work.localRootID, mid, work.end, lowPriority, time.Now()) m.unprocessedWork.Insert(first) m.unprocessedWork.Insert(second) @@ -877,3 +1129,55 @@ func findChildDifference(node1, node2 *merkledb.ProofNode, startIndex int) (byte // there were no differences found return 0, false } + +// Verify [rangeProof] is a valid range proof for keys in [start, end] for +// root [rootBytes]. Returns [errTooManyKeys] if the response contains more +// than [keyLimit] keys. +func verifyRangeProof( + ctx context.Context, + rangeProof *merkledb.RangeProof, + keyLimit int, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + rootBytes []byte, + tokenSize int, + hasher merkledb.Hasher, +) error { + root, err := ids.ToID(rootBytes) + if err != nil { + return err + } + + // Ensure the response does not contain more than the maximum requested number of leaves. + if len(rangeProof.KeyValues) > keyLimit { + return fmt.Errorf( + "%w: (%d) > %d)", + errTooManyKeys, len(rangeProof.KeyValues), keyLimit, + ) + } + + if err := rangeProof.Verify( + ctx, + start, + end, + root, + tokenSize, + hasher, + ); err != nil { + return fmt.Errorf("%w due to %w", errInvalidRangeProof, err) + } + return nil +} + +func calculateBackoff(attempt int) time.Duration { + if attempt == 0 { + return 0 + } + + retryWait := initialRetryWait * time.Duration(math.Pow(retryWaitFactor, float64(attempt))) + if retryWait > maxRetryWait { + retryWait = maxRetryWait + } + + return retryWait +} diff --git a/x/sync/mock_client.go b/x/sync/mock_client.go deleted file mode 100644 index 8ffd6f62a1c..00000000000 --- a/x/sync/mock_client.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: x/sync/client.go -// -// Generated by this command: -// -// mockgen -source=x/sync/client.go -destination=x/sync/mock_client.go -package=sync -exclude_interfaces= -mock_names=MockClient=MockClient -// - -// Package sync is a generated GoMock package. -package sync - -import ( - context "context" - reflect "reflect" - - sync "github.com/ava-labs/avalanchego/proto/pb/sync" - merkledb "github.com/ava-labs/avalanchego/x/merkledb" - gomock "go.uber.org/mock/gomock" -) - -// MockClient is a mock of Client interface. -type MockClient struct { - ctrl *gomock.Controller - recorder *MockClientMockRecorder -} - -// MockClientMockRecorder is the mock recorder for MockClient. -type MockClientMockRecorder struct { - mock *MockClient -} - -// NewMockClient creates a new mock instance. -func NewMockClient(ctrl *gomock.Controller) *MockClient { - mock := &MockClient{ctrl: ctrl} - mock.recorder = &MockClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClient) EXPECT() *MockClientMockRecorder { - return m.recorder -} - -// GetChangeProof mocks base method. -func (m *MockClient) GetChangeProof(ctx context.Context, request *sync.SyncGetChangeProofRequest, verificationDB DB) (*merkledb.ChangeOrRangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChangeProof", ctx, request, verificationDB) - ret0, _ := ret[0].(*merkledb.ChangeOrRangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChangeProof indicates an expected call of GetChangeProof. -func (mr *MockClientMockRecorder) GetChangeProof(ctx, request, verificationDB any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockClient)(nil).GetChangeProof), ctx, request, verificationDB) -} - -// GetRangeProof mocks base method. -func (m *MockClient) GetRangeProof(ctx context.Context, request *sync.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProof", ctx, request) - ret0, _ := ret[0].(*merkledb.RangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRangeProof indicates an expected call of GetRangeProof. -func (mr *MockClientMockRecorder) GetRangeProof(ctx, request any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockClient)(nil).GetRangeProof), ctx, request) -} diff --git a/x/sync/network_client.go b/x/sync/network_client.go deleted file mode 100644 index 18530d1c4e7..00000000000 --- a/x/sync/network_client.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "golang.org/x/sync/semaphore" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/version" -) - -// Minimum amount of time to handle a request -const minRequestHandlingDuration = 100 * time.Millisecond - -var ( - _ NetworkClient = (*networkClient)(nil) - - errAcquiringSemaphore = errors.New("error acquiring semaphore") - errRequestFailed = errors.New("request failed") - errAppSendFailed = errors.New("failed to send app message") -) - -// NetworkClient defines ability to send request / response through the Network -type NetworkClient interface { - // RequestAny synchronously sends request to an arbitrary peer with a - // node version greater than or equal to minVersion. - // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if - // the request should be retried. - RequestAny( - ctx context.Context, - request []byte, - ) (ids.NodeID, []byte, error) - - // Sends [request] to [nodeID] and returns the response. - // Blocks until the number of outstanding requests is - // below the limit before sending the request. - Request( - ctx context.Context, - nodeID ids.NodeID, - request []byte, - ) ([]byte, error) - - // The following declarations allow this interface to be embedded in the VM - // to handle incoming responses from peers. - - // Always returns nil because the engine considers errors - // returned from this function as fatal. - AppResponse(context.Context, ids.NodeID, uint32, []byte) error - - // Always returns nil because the engine considers errors - // returned from this function as fatal. - AppRequestFailed(context.Context, ids.NodeID, uint32) error - - // Adds the given [nodeID] to the peer - // list so that it can receive messages. - // If [nodeID] is this node's ID, this is a no-op. - Connected(context.Context, ids.NodeID, *version.Application) error - - // Removes given [nodeID] from the peer list. - Disconnected(context.Context, ids.NodeID) error -} - -type networkClient struct { - lock sync.Mutex - log logging.Logger - // requestID counter used to track outbound requests - requestID uint32 - // requestID => handler for the response/failure - outstandingRequestHandlers map[uint32]ResponseHandler - // controls maximum number of active outbound requests - activeRequests *semaphore.Weighted - // tracking of peers & bandwidth usage - peers *p2p.PeerTracker - // For sending messages to peers - appSender common.AppSender -} - -func NewNetworkClient( - appSender common.AppSender, - myNodeID ids.NodeID, - maxActiveRequests int64, - log logging.Logger, - metricsNamespace string, - registerer prometheus.Registerer, - minVersion *version.Application, -) (NetworkClient, error) { - peerTracker, err := p2p.NewPeerTracker( - log, - metricsNamespace, - registerer, - set.Of(myNodeID), - minVersion, - ) - if err != nil { - return nil, fmt.Errorf("failed to create peer tracker: %w", err) - } - - return &networkClient{ - appSender: appSender, - outstandingRequestHandlers: make(map[uint32]ResponseHandler), - activeRequests: semaphore.NewWeighted(maxActiveRequests), - peers: peerTracker, - log: log, - }, nil -} - -func (c *networkClient) AppResponse( - _ context.Context, - nodeID ids.NodeID, - requestID uint32, - response []byte, -) error { - c.lock.Lock() - defer c.lock.Unlock() - - c.log.Info( - "received AppResponse from peer", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("responseLen", len(response)), - ) - - handler, exists := c.getRequestHandler(requestID) - if !exists { - // Should never happen since the engine - // should be managing outstanding requests - c.log.Warn( - "received response to unknown request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("responseLen", len(response)), - ) - return nil - } - handler.OnResponse(response) - return nil -} - -func (c *networkClient) AppRequestFailed( - _ context.Context, - nodeID ids.NodeID, - requestID uint32, -) error { - c.lock.Lock() - defer c.lock.Unlock() - - c.log.Info( - "received AppRequestFailed from peer", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - - handler, exists := c.getRequestHandler(requestID) - if !exists { - // Should never happen since the engine - // should be managing outstanding requests - c.log.Warn( - "received request failed to unknown request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return nil - } - handler.OnFailure() - return nil -} - -// Returns the handler for [requestID] and marks the request as fulfilled. -// Returns false if there's no outstanding request with [requestID]. -// Assumes [c.lock] is held. -func (c *networkClient) getRequestHandler(requestID uint32) (ResponseHandler, bool) { - handler, exists := c.outstandingRequestHandlers[requestID] - if !exists { - return nil, false - } - // mark message as processed, release activeRequests slot - delete(c.outstandingRequestHandlers, requestID) - return handler, true -} - -// If [errAppSendFailed] is returned this should be considered fatal. -func (c *networkClient) RequestAny( - ctx context.Context, - request []byte, -) (ids.NodeID, []byte, error) { - // Take a slot from total [activeRequests] and block until a slot becomes available. - if err := c.activeRequests.Acquire(ctx, 1); err != nil { - return ids.EmptyNodeID, nil, errAcquiringSemaphore - } - defer c.activeRequests.Release(1) - - nodeID, responseChan, err := c.sendRequestAny(ctx, request) - if err != nil { - return ids.EmptyNodeID, nil, err - } - - response, err := c.awaitResponse(ctx, nodeID, responseChan) - return nodeID, response, err -} - -func (c *networkClient) sendRequestAny( - ctx context.Context, - request []byte, -) (ids.NodeID, chan []byte, error) { - c.lock.Lock() - defer c.lock.Unlock() - - nodeID, ok := c.peers.SelectPeer() - if !ok { - numPeers := c.peers.Size() - return ids.EmptyNodeID, nil, fmt.Errorf("no peers found from %d peers", numPeers) - } - - responseChan, err := c.sendRequestLocked(ctx, nodeID, request) - return nodeID, responseChan, err -} - -// If [errAppSendFailed] is returned this should be considered fatal. -func (c *networkClient) Request( - ctx context.Context, - nodeID ids.NodeID, - request []byte, -) ([]byte, error) { - // Take a slot from total [activeRequests] - // and block until a slot becomes available. - if err := c.activeRequests.Acquire(ctx, 1); err != nil { - return nil, errAcquiringSemaphore - } - defer c.activeRequests.Release(1) - - responseChan, err := c.sendRequest(ctx, nodeID, request) - if err != nil { - return nil, err - } - - return c.awaitResponse(ctx, nodeID, responseChan) -} - -func (c *networkClient) sendRequest( - ctx context.Context, - nodeID ids.NodeID, - request []byte, -) (chan []byte, error) { - c.lock.Lock() - defer c.lock.Unlock() - - return c.sendRequestLocked(ctx, nodeID, request) -} - -// Sends [request] to [nodeID] and returns a channel that will populate the -// response. -// -// If [errAppSendFailed] is returned this should be considered fatal. -// -// Assumes [nodeID] is never [c.myNodeID] since we guarantee [c.myNodeID] will -// not be added to [c.peers]. -// -// Assumes [c.lock] is held. -func (c *networkClient) sendRequestLocked( - ctx context.Context, - nodeID ids.NodeID, - request []byte, -) (chan []byte, error) { - requestID := c.requestID - c.requestID++ - - c.log.Debug("sending request to peer", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("requestLen", len(request)), - ) - c.peers.RegisterRequest(nodeID) - - // Send an app request to the peer. - nodeIDs := set.Of(nodeID) - // Cancellation is removed from this context to avoid erroring unexpectedly. - // SendAppRequest should be non-blocking and any error other than context - // cancellation is unexpected. - // - // This guarantees that the network should never receive an unexpected - // AppResponse. - ctxWithoutCancel := context.WithoutCancel(ctx) - if err := c.appSender.SendAppRequest(ctxWithoutCancel, nodeIDs, requestID, request); err != nil { - c.lock.Unlock() - c.log.Fatal("failed to send app request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("requestLen", len(request)), - zap.Error(err), - ) - return nil, fmt.Errorf("%w: %w", errAppSendFailed, err) - } - - handler := newResponseHandler() - c.outstandingRequestHandlers[requestID] = handler - return handler.responseChan, nil -} - -// awaitResponse from [nodeID] and returns the response. -// -// Returns an error if the request failed or [ctx] is canceled. -// -// Blocks until a response is received or the [ctx] is canceled fails. -// -// Assumes [nodeID] is never [c.myNodeID] since we guarantee [c.myNodeID] will -// not be added to [c.peers]. -// -// Assumes [c.lock] is not held. -func (c *networkClient) awaitResponse( - ctx context.Context, - nodeID ids.NodeID, - responseChan chan []byte, -) ([]byte, error) { - var ( - response []byte - responded bool - startTime = time.Now() - ) - select { - case <-ctx.Done(): - c.peers.RegisterFailure(nodeID) - return nil, ctx.Err() - case response, responded = <-responseChan: - } - if !responded { - c.peers.RegisterFailure(nodeID) - return nil, errRequestFailed - } - - elapsedSeconds := time.Since(startTime).Seconds() - bandwidth := float64(len(response)) / (elapsedSeconds + epsilon) - c.peers.RegisterResponse(nodeID, bandwidth) - - c.log.Debug("received response from peer", - zap.Stringer("nodeID", nodeID), - zap.Int("responseLen", len(response)), - ) - return response, nil -} - -func (c *networkClient) Connected( - _ context.Context, - nodeID ids.NodeID, - nodeVersion *version.Application, -) error { - c.log.Debug("adding new peer", zap.Stringer("nodeID", nodeID)) - c.peers.Connected(nodeID, nodeVersion) - return nil -} - -func (c *networkClient) Disconnected(_ context.Context, nodeID ids.NodeID) error { - c.log.Debug("disconnecting peer", zap.Stringer("nodeID", nodeID)) - c.peers.Disconnected(nodeID) - return nil -} diff --git a/x/sync/network_server.go b/x/sync/network_server.go index f7ca7ec618b..fbc81ce9a48 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -10,12 +10,10 @@ import ( "fmt" "time" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/hashing" @@ -50,146 +48,70 @@ var ( errInvalidEndKey = errors.New("end key is Nothing but has value") errInvalidBounds = errors.New("start key is greater than end key") errInvalidRootHash = fmt.Errorf("root hash must have length %d", hashing.HashLen) -) -type NetworkServer struct { - appSender common.AppSender // Used to respond to peer requests via AppResponse. - db DB - log logging.Logger -} + _ p2p.Handler = (*SyncGetChangeProofHandler)(nil) + _ p2p.Handler = (*SyncGetRangeProofHandler)(nil) +) -func NewNetworkServer(appSender common.AppSender, db DB, log logging.Logger) *NetworkServer { - return &NetworkServer{ - appSender: appSender, - db: db, - log: log, +func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { + if mb != nil && !mb.IsNothing { + return maybe.Some(mb.Value) } + return maybe.Nothing[[]byte]() } -// AppRequest is called by avalanchego -> VM when there is an incoming AppRequest from a peer. -// Returns a non-nil error iff we fail to send an app message. This is a fatal error. -// Sends a response back to the sender if length of response returned by the handler > 0. -func (s *NetworkServer) AppRequest( - ctx context.Context, - nodeID ids.NodeID, - requestID uint32, - deadline time.Time, - request []byte, -) error { - var req pb.Request - if err := proto.Unmarshal(request, &req); err != nil { - s.log.Debug( - "failed to unmarshal AppRequest", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("requestLen", len(request)), - zap.Error(err), - ) - return nil - } - s.log.Debug( - "processing AppRequest from node", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - - // bufferedDeadline is half the time till actual deadline so that the message has a - // reasonable chance of completing its processing and sending the response to the peer. - timeTillDeadline := time.Until(deadline) - bufferedDeadline := time.Now().Add(timeTillDeadline / 2) - - // check if we have enough time to handle this request. - // TODO danlaine: Do we need this? Why? - if time.Until(bufferedDeadline) < minRequestHandlingDuration { - // Drop the request if we already missed the deadline to respond. - s.log.Info( - "deadline to process AppRequest has expired, skipping", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return nil +func NewSyncGetChangeProofHandler(log logging.Logger, db DB) *SyncGetChangeProofHandler { + return &SyncGetChangeProofHandler{ + log: log, + db: db, } +} - ctx, cancel := context.WithDeadline(ctx, bufferedDeadline) - defer cancel() - - var err error - switch req := req.GetMessage().(type) { - case *pb.Request_ChangeProofRequest: - err = s.HandleChangeProofRequest(ctx, nodeID, requestID, req.ChangeProofRequest) - case *pb.Request_RangeProofRequest: - err = s.HandleRangeProofRequest(ctx, nodeID, requestID, req.RangeProofRequest) - default: - s.log.Debug( - "unknown AppRequest type", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("requestLen", len(request)), - zap.String("requestType", fmt.Sprintf("%T", req)), - ) - return nil - } +type SyncGetChangeProofHandler struct { + log logging.Logger + db DB +} - if err != nil { - if errors.Is(err, errAppSendFailed) { - return err - } +func (*SyncGetChangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} - if !isTimeout(err) { - // log unexpected errors instead of returning them, since they are fatal. - s.log.Warn( - "unexpected error handling AppRequest", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) +func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { + request := &pb.SyncGetChangeProofRequest{} + if err := proto.Unmarshal(requestBytes, request); err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to unmarshal request: %s", err), } } - return nil -} -func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { - if mb != nil && !mb.IsNothing { - return maybe.Some(mb.Value) - } - return maybe.Nothing[[]byte]() -} - -// Generates a change proof and sends it to [nodeID]. -// If [errAppSendFailed] is returned, this should be considered fatal. -func (s *NetworkServer) HandleChangeProofRequest( - ctx context.Context, - nodeID ids.NodeID, - requestID uint32, - req *pb.SyncGetChangeProofRequest, -) error { - if err := validateChangeProofRequest(req); err != nil { - s.log.Debug( - "dropping invalid change proof request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("req", req), - zap.Error(err), - ) - return nil // dropping request + if err := validateChangeProofRequest(request); err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("invalid request: %s", err), + } } // override limits if they exceed caps var ( - keyLimit = min(req.KeyLimit, maxKeyValuesLimit) - bytesLimit = min(int(req.BytesLimit), maxByteSizeLimit) - start = maybeBytesToMaybe(req.StartKey) - end = maybeBytesToMaybe(req.EndKey) + keyLimit = min(request.KeyLimit, maxKeyValuesLimit) + bytesLimit = min(int(request.BytesLimit), maxByteSizeLimit) + start = maybeBytesToMaybe(request.StartKey) + end = maybeBytesToMaybe(request.EndKey) ) - startRoot, err := ids.ToID(req.StartRootHash) + startRoot, err := ids.ToID(request.StartRootHash) if err != nil { - return err + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to parse start root hash: %s", err), + } } - endRoot, err := ids.ToID(req.EndRootHash) + endRoot, err := ids.ToID(request.EndRootHash) if err != nil { - return err + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to parse end root hash: %s", err), + } } for keyLimit > 0 { @@ -198,12 +120,18 @@ func (s *NetworkServer) HandleChangeProofRequest( if !errors.Is(err, merkledb.ErrInsufficientHistory) { // We should only fail to get a change proof if we have insufficient history. // Other errors are unexpected. - return err + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to get change proof: %s", err), + } } if errors.Is(err, merkledb.ErrNoEndRoot) { // [s.db] doesn't have [endRoot] in its history. // We can't generate a change/range proof. Drop this request. - return nil + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to get change proof: %s", err), + } } // [s.db] doesn't have sufficient history to generate change proof. @@ -212,11 +140,11 @@ func (s *NetworkServer) HandleChangeProofRequest( ctx, s.db, &pb.SyncGetRangeProofRequest{ - RootHash: req.EndRootHash, - StartKey: req.StartKey, - EndKey: req.EndKey, - KeyLimit: req.KeyLimit, - BytesLimit: req.BytesLimit, + RootHash: request.EndRootHash, + StartKey: request.StartKey, + EndKey: request.EndKey, + KeyLimit: request.KeyLimit, + BytesLimit: request.BytesLimit, }, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(&pb.SyncGetChangeProofResponse{ @@ -227,20 +155,13 @@ func (s *NetworkServer) HandleChangeProofRequest( }, ) if err != nil { - return err + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to get range proof: %s", err), + } } - if err := s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes); err != nil { - s.log.Fatal( - "failed to send app response", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("responseLen", len(proofBytes)), - zap.Error(err), - ) - return fmt.Errorf("%w: %w", errAppSendFailed, err) - } - return nil + return proofBytes, nil } // We generated a change proof. See if it's small enough. @@ -250,74 +171,84 @@ func (s *NetworkServer) HandleChangeProofRequest( }, }) if err != nil { - return err + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to marshal change proof: %s", err), + } } if len(proofBytes) < bytesLimit { - if err := s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes); err != nil { - s.log.Fatal( - "failed to send app response", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("responseLen", len(proofBytes)), - zap.Error(err), - ) - return fmt.Errorf("%w: %w", errAppSendFailed, err) - } - return nil + return proofBytes, nil } // The proof was too large. Try to shrink it. keyLimit = uint32(len(changeProof.KeyChanges)) / 2 } - return ErrMinProofSizeIsTooLarge + + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to generate proof: %s", ErrMinProofSizeIsTooLarge), + } } -// Generates a range proof and sends it to [nodeID]. -// If [errAppSendFailed] is returned, this should be considered fatal. -func (s *NetworkServer) HandleRangeProofRequest( - ctx context.Context, - nodeID ids.NodeID, - requestID uint32, - req *pb.SyncGetRangeProofRequest, -) error { - if err := validateRangeProofRequest(req); err != nil { - s.log.Debug( - "dropping invalid range proof request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("req", req), - zap.Error(err), - ) - return nil // drop request +func (*SyncGetChangeProofHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return nil, nil +} + +func NewSyncGetRangeProofHandler(log logging.Logger, db DB) *SyncGetRangeProofHandler { + return &SyncGetRangeProofHandler{ + log: log, + db: db, + } +} + +type SyncGetRangeProofHandler struct { + log logging.Logger + db DB +} + +func (*SyncGetRangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} + +func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { + request := &pb.SyncGetRangeProofRequest{} + if err := proto.Unmarshal(requestBytes, request); err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to unmarshal request: %s", err), + } + } + + if err := validateRangeProofRequest(request); err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("invalid range proof request: %s", err), + } } // override limits if they exceed caps - req.KeyLimit = min(req.KeyLimit, maxKeyValuesLimit) - req.BytesLimit = min(req.BytesLimit, maxByteSizeLimit) + request.KeyLimit = min(request.KeyLimit, maxKeyValuesLimit) + request.BytesLimit = min(request.BytesLimit, maxByteSizeLimit) proofBytes, err := getRangeProof( ctx, s.db, - req, + request, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(rangeProof.ToProto()) }, ) if err != nil { - return err - } - if err := s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes); err != nil { - s.log.Fatal( - "failed to send app response", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("responseLen", len(proofBytes)), - zap.Error(err), - ) - return fmt.Errorf("%w: %w", errAppSendFailed, err) + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to get range proof: %s", err), + } } - return nil + + return proofBytes, nil +} + +func (*SyncGetRangeProofHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return nil, nil } // Get the range proof specified by [req]. @@ -371,19 +302,6 @@ func getRangeProof( return nil, ErrMinProofSizeIsTooLarge } -// isTimeout returns true if err is a timeout from a context cancellation -// or a context cancellation over grpc. -func isTimeout(err error) bool { - // handle grpc wrapped DeadlineExceeded - if e, ok := status.FromError(err); ok { - if e.Code() == codes.DeadlineExceeded { - return true - } - } - // otherwise, check for context.DeadlineExceeded directly - return errors.Is(err, context.DeadlineExceeded) -} - // Returns nil iff [req] is well-formed. func validateChangeProofRequest(req *pb.SyncGetChangeProofRequest) error { switch { diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index 3970588b8d4..84dbd1c1268 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -10,12 +10,13 @@ import ( "time" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common/commonmock" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/x/merkledb" @@ -27,45 +28,52 @@ func Test_Server_GetRangeProof(t *testing.T) { t.Logf("seed: %d", now) r := rand.New(rand.NewSource(now)) // #nosec G404 - smallTrieDB, _, err := generateTrieWithMinKeyLen(t, r, defaultRequestKeyLimit, 1) + smallTrieDB, err := generateTrieWithMinKeyLen(t, r, defaultRequestKeyLimit, 1) require.NoError(t, err) smallTrieRoot, err := smallTrieDB.GetMerkleRoot(context.Background()) require.NoError(t, err) - tests := map[string]struct { + tests := []struct { + name string request *pb.SyncGetRangeProofRequest - expectedErr error + expectedErr *common.AppError expectedResponseLen int expectedMaxResponseBytes int nodeID ids.NodeID proofNil bool }{ - "proof too large": { + { + name: "proof too large", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: defaultRequestKeyLimit, BytesLimit: 1000, }, proofNil: true, - expectedErr: ErrMinProofSizeIsTooLarge, + expectedErr: p2p.ErrUnexpected, }, - "byteslimit is 0": { + { + name: "byteslimit is 0", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: defaultRequestKeyLimit, BytesLimit: 0, }, - proofNil: true, + proofNil: true, + expectedErr: p2p.ErrUnexpected, }, - "keylimit is 0": { + { + name: "keylimit is 0", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: 0, + KeyLimit: 0, + BytesLimit: defaultRequestByteSizeLimit, }, - proofNil: true, + proofNil: true, + expectedErr: p2p.ErrUnexpected, }, - "keys out of order": { + { + name: "keys out of order", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: defaultRequestKeyLimit, @@ -73,9 +81,11 @@ func Test_Server_GetRangeProof(t *testing.T) { StartKey: &pb.MaybeBytes{Value: []byte{1}}, EndKey: &pb.MaybeBytes{Value: []byte{0}}, }, - proofNil: true, + proofNil: true, + expectedErr: p2p.ErrUnexpected, }, - "key limit too large": { + { + name: "key limit too large", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: 2 * defaultRequestKeyLimit, @@ -83,7 +93,8 @@ func Test_Server_GetRangeProof(t *testing.T) { }, expectedResponseLen: defaultRequestKeyLimit, }, - "bytes limit too large": { + { + name: "bytes limit too large", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: defaultRequestKeyLimit, @@ -91,52 +102,46 @@ func Test_Server_GetRangeProof(t *testing.T) { }, expectedMaxResponseBytes: defaultRequestByteSizeLimit, }, - "empty proof": { + { + name: "empty proof", request: &pb.SyncGetRangeProofRequest{ RootHash: ids.Empty[:], KeyLimit: defaultRequestKeyLimit, BytesLimit: defaultRequestByteSizeLimit, }, - proofNil: true, + proofNil: true, + expectedErr: p2p.ErrUnexpected, }, } - for name, test := range tests { - t.Run(name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - sender := commonmock.NewSender(ctrl) - var proof *merkledb.RangeProof - sender.EXPECT().SendAppResponse( - gomock.Any(), // ctx - gomock.Any(), // nodeID - gomock.Any(), // requestID - gomock.Any(), // responseBytes - ).DoAndReturn( - func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { - // grab a copy of the proof so we can inspect it later - if !test.proofNil { - var proofProto pb.RangeProof - require.NoError(proto.Unmarshal(responseBytes, &proofProto)) - - var p merkledb.RangeProof - require.NoError(p.UnmarshalProto(&proofProto)) - proof = &p - } - return nil - }, - ).AnyTimes() - handler := NewNetworkServer(sender, smallTrieDB, logging.NoLog{}) - err := handler.HandleRangeProofRequest(context.Background(), test.nodeID, 0, test.request) + + handler := NewSyncGetRangeProofHandler(logging.NoLog{}, smallTrieDB) + requestBytes, err := proto.Marshal(test.request) + require.NoError(err) + responseBytes, err := handler.AppRequest(context.Background(), test.nodeID, time.Time{}, requestBytes) require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { return } if test.proofNil { - require.Nil(proof) + require.Nil(responseBytes) return } - require.NotNil(proof) + require.NotNil(responseBytes) + + var proof *merkledb.RangeProof + if !test.proofNil { + var proofProto pb.RangeProof + require.NoError(proto.Unmarshal(responseBytes, &proofProto)) + + var p merkledb.RangeProof + require.NoError(p.UnmarshalProto(&proofProto)) + proof = &p + } + if test.expectedResponseLen > 0 { require.LessOrEqual(len(proof.KeyValues), test.expectedResponseLen) } @@ -155,37 +160,45 @@ func Test_Server_GetChangeProof(t *testing.T) { now := time.Now().UnixNano() t.Logf("seed: %d", now) r := rand.New(rand.NewSource(now)) // #nosec G404 - trieDB, _, err := generateTrieWithMinKeyLen(t, r, defaultRequestKeyLimit, 1) - require.NoError(t, err) - startRoot, err := trieDB.GetMerkleRoot(context.Background()) + serverDB, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(t, err) + startRoot, err := serverDB.GetMerkleRoot(context.Background()) require.NoError(t, err) // create changes - ops := make([]database.BatchOp, 0, 300) - for x := 0; x < 300; x++ { - key := make([]byte, r.Intn(100)) - _, err = r.Read(key) - require.NoError(t, err) - - val := make([]byte, r.Intn(100)) - _, err = r.Read(val) - require.NoError(t, err) - - ops = append(ops, database.BatchOp{Key: key, Value: val}) + for x := 0; x < defaultRequestKeyLimit/2; x++ { + ops := make([]database.BatchOp, 0, 11) + // add some key/values + for i := 0; i < 10; i++ { + key := make([]byte, r.Intn(100)) + _, err = r.Read(key) + require.NoError(t, err) + + val := make([]byte, r.Intn(100)) + _, err = r.Read(val) + require.NoError(t, err) + + ops = append(ops, database.BatchOp{Key: key, Value: val}) + } + // delete a key deleteKeyStart := make([]byte, r.Intn(10)) _, err = r.Read(deleteKeyStart) require.NoError(t, err) - it := trieDB.NewIteratorWithStart(deleteKeyStart) + it := serverDB.NewIteratorWithStart(deleteKeyStart) if it.Next() { ops = append(ops, database.BatchOp{Key: it.Key(), Delete: true}) } require.NoError(t, it.Error()) it.Release() - view, err := trieDB.NewView( + view, err := serverDB.NewView( context.Background(), merkledb.ViewChanges{BatchOps: ops}, ) @@ -193,39 +206,71 @@ func Test_Server_GetChangeProof(t *testing.T) { require.NoError(t, view.CommitToDB(context.Background())) } - endRoot, err := trieDB.GetMerkleRoot(context.Background()) + endRoot, err := serverDB.GetMerkleRoot(context.Background()) require.NoError(t, err) fakeRootID := ids.GenerateTestID() - tests := map[string]struct { + tests := []struct { + name string request *pb.SyncGetChangeProofRequest - expectedErr error + expectedErr *common.AppError expectedResponseLen int expectedMaxResponseBytes int nodeID ids.NodeID - proofNil bool expectRangeProof bool // Otherwise expect change proof }{ - "byteslimit is 0": { + { + name: "proof restricted by BytesLimit", request: &pb.SyncGetChangeProofRequest{ StartRootHash: startRoot[:], EndRootHash: endRoot[:], KeyLimit: defaultRequestKeyLimit, - BytesLimit: 0, + BytesLimit: 10000, + }, + }, + { + name: "full response for small (single request) trie", + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedResponseLen: defaultRequestKeyLimit, + }, + { + name: "partial response to request for entire trie (full leaf limit)", + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, - proofNil: true, + expectedResponseLen: defaultRequestKeyLimit, }, - "keylimit is 0": { + { + name: "byteslimit is 0", request: &pb.SyncGetChangeProofRequest{ StartRootHash: startRoot[:], EndRootHash: endRoot[:], KeyLimit: defaultRequestKeyLimit, BytesLimit: 0, }, - proofNil: true, + expectedErr: p2p.ErrUnexpected, }, - "keys out of order": { + { + name: "keylimit is 0", + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: 0, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedErr: p2p.ErrUnexpected, + }, + { + name: "keys out of order", request: &pb.SyncGetChangeProofRequest{ StartRootHash: startRoot[:], EndRootHash: endRoot[:], @@ -234,9 +279,10 @@ func Test_Server_GetChangeProof(t *testing.T) { StartKey: &pb.MaybeBytes{Value: []byte{1}}, EndKey: &pb.MaybeBytes{Value: []byte{0}}, }, - proofNil: true, + expectedErr: p2p.ErrUnexpected, }, - "key limit too large": { + { + name: "key limit too large", request: &pb.SyncGetChangeProofRequest{ StartRootHash: startRoot[:], EndRootHash: endRoot[:], @@ -245,7 +291,8 @@ func Test_Server_GetChangeProof(t *testing.T) { }, expectedResponseLen: defaultRequestKeyLimit, }, - "bytes limit too large": { + { + name: "bytes limit too large", request: &pb.SyncGetChangeProofRequest{ StartRootHash: startRoot[:], EndRootHash: endRoot[:], @@ -254,7 +301,8 @@ func Test_Server_GetChangeProof(t *testing.T) { }, expectedMaxResponseBytes: defaultRequestByteSizeLimit, }, - "insufficient history for change proof; return range proof": { + { + name: "insufficient history for change proof; return range proof", request: &pb.SyncGetChangeProofRequest{ // This root doesn't exist so server has insufficient history // to serve a change proof @@ -266,7 +314,8 @@ func Test_Server_GetChangeProof(t *testing.T) { expectedMaxResponseBytes: defaultRequestByteSizeLimit, expectRangeProof: true, }, - "insufficient history for change proof or range proof": { + { + name: "insufficient history for change proof or range proof", request: &pb.SyncGetChangeProofRequest{ // These roots don't exist so server has insufficient history // to serve a change proof or range proof @@ -276,9 +325,10 @@ func Test_Server_GetChangeProof(t *testing.T) { BytesLimit: defaultRequestByteSizeLimit, }, expectedMaxResponseBytes: defaultRequestByteSizeLimit, - proofNil: true, + expectedErr: p2p.ErrUnexpected, }, - "empt proof": { + { + name: "empty proof", request: &pb.SyncGetChangeProofRequest{ StartRootHash: fakeRootID[:], EndRootHash: ids.Empty[:], @@ -286,53 +336,28 @@ func Test_Server_GetChangeProof(t *testing.T) { BytesLimit: defaultRequestByteSizeLimit, }, expectedMaxResponseBytes: defaultRequestByteSizeLimit, - proofNil: true, + expectedErr: p2p.ErrUnexpected, }, } - for name, test := range tests { - t.Run(name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Store proof returned by server in [proofResult] - var proofResult *pb.SyncGetChangeProofResponse - var proofBytes []byte - sender := commonmock.NewSender(ctrl) - sender.EXPECT().SendAppResponse( - gomock.Any(), // ctx - gomock.Any(), // nodeID - gomock.Any(), // requestID - gomock.Any(), // responseBytes - ).DoAndReturn( - func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { - if test.proofNil { - return nil - } - proofBytes = responseBytes - - // grab a copy of the proof so we can inspect it later - var responseProto pb.SyncGetChangeProofResponse - require.NoError(proto.Unmarshal(responseBytes, &responseProto)) - proofResult = &responseProto - - return nil - }, - ).AnyTimes() - - handler := NewNetworkServer(sender, trieDB, logging.NoLog{}) - err := handler.HandleChangeProofRequest(context.Background(), test.nodeID, 0, test.request) + + handler := NewSyncGetChangeProofHandler(logging.NoLog{}, serverDB) + + requestBytes, err := proto.Marshal(test.request) + require.NoError(err) + proofBytes, err := handler.AppRequest(context.Background(), test.nodeID, time.Time{}, requestBytes) require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + require.Nil(proofBytes) return } - if test.proofNil { - require.Nil(proofResult) - return - } - require.NotNil(proofResult) + proofResult := &pb.SyncGetChangeProofResponse{} + require.NoError(proto.Unmarshal(proofBytes, proofResult)) if test.expectRangeProof { require.NotNil(proofResult.GetRangeProof()) @@ -348,7 +373,6 @@ func Test_Server_GetChangeProof(t *testing.T) { } } - require.NoError(err) require.LessOrEqual(len(proofBytes), int(test.request.BytesLimit)) if test.expectedMaxResponseBytes > 0 { require.LessOrEqual(len(proofBytes), test.expectedMaxResponseBytes) @@ -356,112 +380,3 @@ func Test_Server_GetChangeProof(t *testing.T) { }) } } - -// Test that AppRequest returns a non-nil error if we fail to send -// an AppRequest or AppResponse. -func TestAppRequestErrAppSendFailed(t *testing.T) { - startRootID := ids.GenerateTestID() - endRootID := ids.GenerateTestID() - - type test struct { - name string - request *pb.Request - handlerFunc func(*gomock.Controller) *NetworkServer - expectedErr error - } - - tests := []test{ - { - name: "GetChangeProof", - request: &pb.Request{ - Message: &pb.Request_ChangeProofRequest{ - ChangeProofRequest: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRootID[:], - EndRootHash: endRootID[:], - StartKey: &pb.MaybeBytes{Value: []byte{1}}, - EndKey: &pb.MaybeBytes{Value: []byte{2}}, - KeyLimit: 100, - BytesLimit: 100, - }, - }, - }, - handlerFunc: func(ctrl *gomock.Controller) *NetworkServer { - sender := commonmock.NewSender(ctrl) - sender.EXPECT().SendAppResponse( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(errAppSendFailed).AnyTimes() - - db := merkledb.NewMockMerkleDB(ctrl) - db.EXPECT().GetChangeProof( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(&merkledb.ChangeProof{}, nil).Times(1) - - return NewNetworkServer(sender, db, logging.NoLog{}) - }, - expectedErr: errAppSendFailed, - }, - { - name: "GetRangeProof", - request: &pb.Request{ - Message: &pb.Request_RangeProofRequest{ - RangeProofRequest: &pb.SyncGetRangeProofRequest{ - RootHash: endRootID[:], - StartKey: &pb.MaybeBytes{Value: []byte{1}}, - EndKey: &pb.MaybeBytes{Value: []byte{2}}, - KeyLimit: 100, - BytesLimit: 100, - }, - }, - }, - handlerFunc: func(ctrl *gomock.Controller) *NetworkServer { - sender := commonmock.NewSender(ctrl) - sender.EXPECT().SendAppResponse( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(errAppSendFailed).AnyTimes() - - db := merkledb.NewMockMerkleDB(ctrl) - db.EXPECT().GetRangeProofAtRoot( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(&merkledb.RangeProof{}, nil).Times(1) - - return NewNetworkServer(sender, db, logging.NoLog{}) - }, - expectedErr: errAppSendFailed, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - handler := tt.handlerFunc(ctrl) - requestBytes, err := proto.Marshal(tt.request) - require.NoError(err) - - err = handler.AppRequest( - context.Background(), - ids.EmptyNodeID, - 0, - time.Now().Add(10*time.Second), - requestBytes, - ) - require.ErrorIs(err, tt.expectedErr) - }) - } -} diff --git a/x/sync/response_handler.go b/x/sync/response_handler.go deleted file mode 100644 index 3f14e94dae4..00000000000 --- a/x/sync/response_handler.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -var _ ResponseHandler = (*responseHandler)(nil) - -// Handles responses/failure notifications for a sent request. -// Exactly one of OnResponse or OnFailure is eventually called. -type ResponseHandler interface { - // Called when [response] is received. - OnResponse(response []byte) - // Called when the request failed or timed out. - OnFailure() -} - -func newResponseHandler() *responseHandler { - return &responseHandler{responseChan: make(chan []byte)} -} - -// Implements [ResponseHandler]. -// Used to wait for a response after making a synchronous request. -// responseChan contains response bytes if the request succeeded. -// responseChan is closed in either fail or success scenario. -type responseHandler struct { - // If [OnResponse] is called, the response bytes are sent on this channel. - // If [OnFailure] is called, the channel is closed without sending bytes. - responseChan chan []byte -} - -// OnResponse passes the response bytes to the responseChan and closes the -// channel. -func (h *responseHandler) OnResponse(response []byte) { - h.responseChan <- response - close(h.responseChan) -} - -// OnFailure closes the channel. -func (h *responseHandler) OnFailure() { - close(h.responseChan) -} diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 3d5fe6d325c..7f692af6651 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -11,64 +11,24 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/p2ptest" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" - - pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) -func newCallthroughSyncClient(ctrl *gomock.Controller, db merkledb.MerkleDB) *MockClient { - syncClient := NewMockClient(ctrl) - syncClient.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *pb.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { - return db.GetRangeProof( - context.Background(), - maybeBytesToMaybe(request.StartKey), - maybeBytesToMaybe(request.EndKey), - int(request.KeyLimit), - ) - }).AnyTimes() - syncClient.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *pb.SyncGetChangeProofRequest, _ DB) (*merkledb.ChangeOrRangeProof, error) { - startRoot, err := ids.ToID(request.StartRootHash) - if err != nil { - return nil, err - } - - endRoot, err := ids.ToID(request.EndRootHash) - if err != nil { - return nil, err - } - - changeProof, err := db.GetChangeProof( - context.Background(), - startRoot, - endRoot, - maybeBytesToMaybe(request.StartKey), - maybeBytesToMaybe(request.EndKey), - int(request.KeyLimit), - ) - if err != nil { - return nil, err - } - return &merkledb.ChangeOrRangeProof{ - ChangeProof: changeProof, - }, nil - }).AnyTimes() - return syncClient -} +var _ p2p.Handler = (*testHandler)(nil) func Test_Creation(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() db, err := merkledb.New( context.Background(), @@ -79,20 +39,18 @@ func Test_Creation(t *testing.T) { syncer, err := NewManager(ManagerConfig{ DB: db, - Client: NewMockClient(ctrl), - TargetRoot: ids.Empty, + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) } func Test_Completion(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() emptyDB, err := merkledb.New( context.Background(), @@ -111,14 +69,16 @@ func Test_Completion(t *testing.T) { ) require.NoError(err) + ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - Client: newCallthroughSyncClient(ctrl, emptyDB), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, emptyDB)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, emptyDB)), TargetRoot: emptyRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) @@ -198,8 +158,6 @@ func Test_Midpoint(t *testing.T) { func Test_Sync_FindNextKey_InSync(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -216,14 +174,16 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { ) require.NoError(err) + ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - Client: newCallthroughSyncClient(ctrl, dbToSync), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) @@ -277,8 +237,6 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { func Test_Sync_FindNextKey_Deleted(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() db, err := merkledb.New( context.Background(), @@ -294,12 +252,13 @@ func Test_Sync_FindNextKey_Deleted(t *testing.T) { syncer, err := NewManager(ManagerConfig{ DB: db, - Client: NewMockClient(ctrl), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) // 0x12 was "deleted" and there should be no extra node in the proof since there was nothing with a common prefix @@ -324,7 +283,6 @@ func Test_Sync_FindNextKey_Deleted(t *testing.T) { func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) db, err := merkledb.New( context.Background(), @@ -343,12 +301,13 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { syncer, err := NewManager(ManagerConfig{ DB: db, - Client: NewMockClient(ctrl), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NoError(db.Put([]byte{0x11, 0x15}, []byte{4})) @@ -359,7 +318,6 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) db, err := merkledb.New( context.Background(), @@ -379,12 +337,13 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { syncer, err := NewManager(ManagerConfig{ DB: db, - Client: NewMockClient(ctrl), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NoError(db.Delete([]byte{0x12, 0xA0})) @@ -395,8 +354,6 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -412,14 +369,17 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { newDefaultDBConfig(), ) require.NoError(err) + + ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - Client: newCallthroughSyncClient(ctrl, dbToSync), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) @@ -464,8 +424,6 @@ func TestFindNextKeyEmptyEndProof(t *testing.T) { now := time.Now().UnixNano() t.Logf("seed: %d", now) r := rand.New(rand.NewSource(now)) // #nosec G404 - ctrl := gomock.NewController(t) - defer ctrl.Finish() db, err := merkledb.New( context.Background(), @@ -474,14 +432,16 @@ func TestFindNextKeyEmptyEndProof(t *testing.T) { ) require.NoError(err) + ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - Client: NewMockClient(ctrl), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, db)), TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) @@ -525,8 +485,6 @@ func isPrefix(data []byte, prefix []byte) bool { func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -542,14 +500,17 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { newDefaultDBConfig(), ) require.NoError(err) + + ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - Client: newCallthroughSyncClient(ctrl, dbToSync), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) require.NoError(syncer.Start(context.Background())) @@ -576,13 +537,12 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { // Test findNextKey by computing the expected result in a naive, inefficient // way and comparing it to the actual result + func TestFindNextKeyRandom(t *testing.T) { now := time.Now().UnixNano() t.Logf("seed: %d", now) rand := rand.New(rand.NewSource(now)) // #nosec G404 require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create a "remote" database and "local" database remoteDB, err := merkledb.New( @@ -767,12 +727,13 @@ func TestFindNextKeyRandom(t *testing.T) { // Get the actual value from the syncer syncer, err := NewManager(ManagerConfig{ DB: localDB, - Client: NewMockClient(ctrl), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: ids.GenerateTestID(), SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) gotFirstDiff, err := syncer.findNextKey( @@ -793,68 +754,257 @@ func TestFindNextKeyRandom(t *testing.T) { } } +// Tests that we are able to sync to the correct root while the server is +// updating func Test_Sync_Result_Correct_Root(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - now := time.Now().UnixNano() t.Logf("seed: %d", now) r := rand.New(rand.NewSource(now)) // #nosec G404 - dbToSync, err := generateTrie(t, r, 1000) - require.NoError(err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(err) - db, err := merkledb.New( - context.Background(), - memdb.New(), - newDefaultDBConfig(), - ) - require.NoError(err) - syncer, err := NewManager(ManagerConfig{ - DB: db, - Client: newCallthroughSyncClient(ctrl, dbToSync), - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - BranchFactor: merkledb.BranchFactor16, - }) - require.NoError(err) - require.NotNil(syncer) - require.NoError(syncer.Start(context.Background())) + tests := []struct { + name string + db merkledb.MerkleDB + rangeProofClient func(db merkledb.MerkleDB) *p2p.Client + changeProofClient func(db merkledb.MerkleDB) *p2p.Client + }{ + { + name: "range proof bad response - too many leaves in response", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) + }) - require.NoError(syncer.Wait(context.Background())) - require.NoError(syncer.Error()) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof bad response - removed first key in response", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] + }) - // new db has fully sync'ed and should be at the same root as the original db - newRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(err) - require.Equal(syncRoot, newRoot) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof bad response - removed first key in response and replaced proof", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] + response.KeyValues = []merkledb.KeyValue{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + } + response.StartProof = []merkledb.ProofNode{ + { + Key: merkledb.Key{}, + }, + } + response.EndProof = []merkledb.ProofNode{ + { + Key: merkledb.Key{}, + }, + } + }) - // make sure they stay in sync - addkey := make([]byte, r.Intn(50)) - _, err = r.Read(addkey) - require.NoError(err) - val := make([]byte, r.Intn(50)) - _, err = r.Read(val) - require.NoError(err) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof bad response - removed key from middle of response", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + i := rand.Intn(max(1, len(response.KeyValues)-1)) // #nosec G404 + _ = slices.Delete(response.KeyValues, i, min(len(response.KeyValues), i+1)) + }) - require.NoError(db.Put(addkey, val)) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof bad response - start and end proof nodes removed", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + response.StartProof = nil + response.EndProof = nil + }) - require.NoError(dbToSync.Put(addkey, val)) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof bad response - end proof removed", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + response.EndProof = nil + }) - syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) - require.NoError(err) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof bad response - empty proof", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + response.StartProof = nil + response.EndProof = nil + response.KeyValues = nil + }) - newRoot, err = db.GetMerkleRoot(context.Background()) - require.NoError(err) - require.Equal(syncRoot, newRoot) + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "range proof server flake", + rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + return p2ptest.NewClient(t, context.Background(), &flakyHandler{ + Handler: NewSyncGetRangeProofHandler(logging.NoLog{}, db), + c: &counter{m: 2}, + }) + }, + }, + { + name: "change proof bad response - too many keys in response", + changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + response.KeyChanges = append(response.KeyChanges, make([]merkledb.KeyChange, defaultRequestKeyLimit)...) + }) + + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "change proof bad response - removed first key in response", + changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + response.KeyChanges = response.KeyChanges[min(1, len(response.KeyChanges)):] + }) + + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "change proof bad response - removed key from middle of response", + changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + i := rand.Intn(max(1, len(response.KeyChanges)-1)) // #nosec G404 + _ = slices.Delete(response.KeyChanges, i, min(len(response.KeyChanges), i+1)) + }) + + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "all proof keys removed from response", + changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + response.StartProof = nil + response.EndProof = nil + }) + + return p2ptest.NewClient(t, context.Background(), handler) + }, + }, + { + name: "flaky change proof client", + changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { + return p2ptest.NewClient(t, context.Background(), &flakyHandler{ + Handler: NewSyncGetChangeProofHandler(logging.NoLog{}, db), + c: &counter{m: 2}, + }) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + ctx := context.Background() + dbToSync, err := generateTrie(t, r, 3*maxKeyValuesLimit) + require.NoError(err) + + syncRoot, err := dbToSync.GetMerkleRoot(ctx) + require.NoError(err) + + db, err := merkledb.New( + ctx, + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + + var ( + rangeProofClient *p2p.Client + changeProofClient *p2p.Client + ) + + rangeProofHandler := NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync) + rangeProofClient = p2ptest.NewClient(t, ctx, rangeProofHandler) + if tt.rangeProofClient != nil { + rangeProofClient = tt.rangeProofClient(dbToSync) + } + + changeProofHandler := NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync) + changeProofClient = p2ptest.NewClient(t, ctx, changeProofHandler) + if tt.changeProofClient != nil { + changeProofClient = tt.changeProofClient(dbToSync) + } + + syncer, err := NewManager(ManagerConfig{ + DB: db, + RangeProofClient: rangeProofClient, + ChangeProofClient: changeProofClient, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }, prometheus.NewRegistry()) + + require.NoError(err) + require.NotNil(syncer) + + // Start syncing from the server + require.NoError(syncer.Start(ctx)) + + // Simulate writes on the server + // TODO more than a single write when API is less flaky + for i := 0; i <= 1; i++ { + addkey := make([]byte, r.Intn(50)) + _, err = r.Read(addkey) + require.NoError(err) + val := make([]byte, r.Intn(50)) + _, err = r.Read(val) + require.NoError(err) + + // Update the server's root + our sync target + require.NoError(dbToSync.Put(addkey, val)) + targetRoot, err := dbToSync.GetMerkleRoot(ctx) + require.NoError(err) + + // Simulate client periodically recording root updates + require.NoError(syncer.UpdateSyncTarget(targetRoot)) + } + + // Block until all syncing is done + require.NoError(syncer.Wait(ctx)) + + // We should have the same resulting root as the server + wantRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + gotRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(wantRoot, gotRoot) + }) + } } func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -871,14 +1021,16 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { ) require.NoError(err) + ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - Client: newCallthroughSyncClient(ctrl, dbToSync), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) require.NoError(syncer.Start(context.Background())) @@ -899,12 +1051,13 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { newSyncer, err := NewManager(ManagerConfig{ DB: db, - Client: newCallthroughSyncClient(ctrl, dbToSync), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(newSyncer) @@ -917,71 +1070,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { require.Equal(syncRoot, newRoot) } -func Test_Sync_Error_During_Sync(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - now := time.Now().UnixNano() - t.Logf("seed: %d", now) - r := rand.New(rand.NewSource(now)) // #nosec G404 - - dbToSync, err := generateTrie(t, r, 100) - require.NoError(err) - - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(err) - - db, err := merkledb.New( - context.Background(), - memdb.New(), - newDefaultDBConfig(), - ) - require.NoError(err) - - client := NewMockClient(ctrl) - client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( - func(context.Context, *pb.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { - return nil, errInvalidRangeProof - }, - ).AnyTimes() - client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *pb.SyncGetChangeProofRequest, _ DB) (*merkledb.ChangeOrRangeProof, error) { - startRoot, err := ids.ToID(request.StartRootHash) - require.NoError(err) - - endRoot, err := ids.ToID(request.EndRootHash) - require.NoError(err) - - changeProof, err := dbToSync.GetChangeProof(ctx, startRoot, endRoot, maybeBytesToMaybe(request.StartKey), maybeBytesToMaybe(request.EndKey), int(request.KeyLimit)) - if err != nil { - return nil, err - } - - return &merkledb.ChangeOrRangeProof{ - ChangeProof: changeProof, - }, nil - }, - ).AnyTimes() - - syncer, err := NewManager(ManagerConfig{ - DB: db, - Client: client, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - BranchFactor: merkledb.BranchFactor16, - }) - require.NoError(err) - require.NotNil(syncer) - - require.NoError(syncer.Start(context.Background())) - - err = syncer.Wait(context.Background()) - require.ErrorIs(err, errInvalidRangeProof) -} - func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -1030,44 +1120,26 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { updatedRootChan := make(chan struct{}, 1) updatedRootChan <- struct{}{} - client := NewMockClient(ctrl) - client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *pb.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { - <-updatedRootChan - root, err := ids.ToID(request.RootHash) - require.NoError(err) - return dbToSync.GetRangeProofAtRoot(ctx, root, maybeBytesToMaybe(request.StartKey), maybeBytesToMaybe(request.EndKey), int(request.KeyLimit)) - }, - ).AnyTimes() - client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *pb.SyncGetChangeProofRequest, _ DB) (*merkledb.ChangeOrRangeProof, error) { - <-updatedRootChan - - startRoot, err := ids.ToID(request.StartRootHash) - require.NoError(err) - - endRoot, err := ids.ToID(request.EndRootHash) - require.NoError(err) - - changeProof, err := dbToSync.GetChangeProof(ctx, startRoot, endRoot, maybeBytesToMaybe(request.StartKey), maybeBytesToMaybe(request.EndKey), int(request.KeyLimit)) - if err != nil { - return nil, err - } + ctx := context.Background() + rangeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ + handler: NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), + updatedRootChan: updatedRootChan, + }) - return &merkledb.ChangeOrRangeProof{ - ChangeProof: changeProof, - }, nil - }, - ).AnyTimes() + changeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ + handler: NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), + updatedRootChan: updatedRootChan, + }) syncer, err := NewManager(ManagerConfig{ DB: db, - Client: client, + RangeProofClient: rangeProofClient, + ChangeProofClient: changeProofClient, TargetRoot: firstSyncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) require.NotNil(syncer) @@ -1098,16 +1170,22 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { func Test_Sync_UpdateSyncTarget(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) m, err := NewManager(ManagerConfig{ - DB: merkledb.NewMockMerkleDB(ctrl), // Not used - Client: NewMockClient(ctrl), // Not used + DB: db, + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, - }) + }, prometheus.NewRegistry()) require.NoError(err) // Populate [m.processWork] to ensure that UpdateSyncTarget @@ -1143,11 +1221,10 @@ func Test_Sync_UpdateSyncTarget(t *testing.T) { } func generateTrie(t *testing.T, r *rand.Rand, count int) (merkledb.MerkleDB, error) { - db, _, err := generateTrieWithMinKeyLen(t, r, count, 0) - return db, err + return generateTrieWithMinKeyLen(t, r, count, 0) } -func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (merkledb.MerkleDB, [][]byte, error) { +func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (merkledb.MerkleDB, error) { require := require.New(t) db, err := merkledb.New( @@ -1156,7 +1233,7 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen newDefaultDBConfig(), ) if err != nil { - return nil, nil, err + return nil, err } var ( allKeys [][]byte @@ -1196,10 +1273,20 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen allKeys = append(allKeys, key) seenKeys[string(key)] = struct{}{} if err = batch.Put(key, value); err != nil { - return db, nil, err + return db, err } i++ } - slices.SortFunc(allKeys, bytes.Compare) - return db, allKeys, batch.Write() + return db, batch.Write() +} + +type testHandler struct { + p2p.NoOpHandler + handler p2p.Handler + updatedRootChan chan struct{} +} + +func (t *testHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + <-t.updatedRootChan + return t.handler.AppRequest(ctx, nodeID, deadline, requestBytes) } diff --git a/x/sync/syncmock/network_client.go b/x/sync/syncmock/network_client.go deleted file mode 100644 index 0df263196f1..00000000000 --- a/x/sync/syncmock/network_client.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/x/sync (interfaces: NetworkClient) -// -// Generated by this command: -// -// mockgen -package=syncmock -destination=x/sync/syncmock/network_client.go -mock_names=NetworkClient=NetworkClient github.com/ava-labs/avalanchego/x/sync NetworkClient -// - -// Package syncmock is a generated GoMock package. -package syncmock - -import ( - context "context" - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - version "github.com/ava-labs/avalanchego/version" - gomock "go.uber.org/mock/gomock" -) - -// NetworkClient is a mock of NetworkClient interface. -type NetworkClient struct { - ctrl *gomock.Controller - recorder *NetworkClientMockRecorder -} - -// NetworkClientMockRecorder is the mock recorder for NetworkClient. -type NetworkClientMockRecorder struct { - mock *NetworkClient -} - -// NewNetworkClient creates a new mock instance. -func NewNetworkClient(ctrl *gomock.Controller) *NetworkClient { - mock := &NetworkClient{ctrl: ctrl} - mock.recorder = &NetworkClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *NetworkClient) EXPECT() *NetworkClientMockRecorder { - return m.recorder -} - -// AppRequestFailed mocks base method. -func (m *NetworkClient) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *NetworkClientMockRecorder) AppRequestFailed(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*NetworkClient)(nil).AppRequestFailed), arg0, arg1, arg2) -} - -// AppResponse mocks base method. -func (m *NetworkClient) AppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppResponse", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// AppResponse indicates an expected call of AppResponse. -func (mr *NetworkClientMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*NetworkClient)(nil).AppResponse), arg0, arg1, arg2, arg3) -} - -// Connected mocks base method. -func (m *NetworkClient) Connected(arg0 context.Context, arg1 ids.NodeID, arg2 *version.Application) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connected", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Connected indicates an expected call of Connected. -func (mr *NetworkClientMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*NetworkClient)(nil).Connected), arg0, arg1, arg2) -} - -// Disconnected mocks base method. -func (m *NetworkClient) Disconnected(arg0 context.Context, arg1 ids.NodeID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Disconnected", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Disconnected indicates an expected call of Disconnected. -func (mr *NetworkClientMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*NetworkClient)(nil).Disconnected), arg0, arg1) -} - -// Request mocks base method. -func (m *NetworkClient) Request(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Request", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Request indicates an expected call of Request. -func (mr *NetworkClientMockRecorder) Request(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Request", reflect.TypeOf((*NetworkClient)(nil).Request), arg0, arg1, arg2) -} - -// RequestAny mocks base method. -func (m *NetworkClient) RequestAny(arg0 context.Context, arg1 []byte) (ids.NodeID, []byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RequestAny", arg0, arg1) - ret0, _ := ret[0].(ids.NodeID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// RequestAny indicates an expected call of RequestAny. -func (mr *NetworkClientMockRecorder) RequestAny(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestAny", reflect.TypeOf((*NetworkClient)(nil).RequestAny), arg0, arg1) -}