diff --git a/activation/e2e/activation_test.go b/activation/e2e/activation_test.go index d0c86e5785..426d225fa2 100644 --- a/activation/e2e/activation_test.go +++ b/activation/e2e/activation_test.go @@ -20,7 +20,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/codec" "github.com/spacemeshos/go-spacemesh/common/types" @@ -64,7 +64,7 @@ func Test_BuilderWithMultipleClients(t *testing.T) { db := statesql.InMemoryTest(t) localDB := localsql.InMemoryTest(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) diff --git a/activation/e2e/atx_merge_test.go b/activation/e2e/atx_merge_test.go index 2cbc7885d5..d7ac66e127 100644 --- a/activation/e2e/atx_merge_test.go +++ b/activation/e2e/atx_merge_test.go @@ -20,7 +20,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" ae2e "github.com/spacemeshos/go-spacemesh/activation/e2e" "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/codec" "github.com/spacemeshos/go-spacemesh/common/types" @@ -214,7 +214,7 @@ func Test_MarryAndMerge(t *testing.T) { t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) localDB := localsql.InMemoryTest(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) diff --git a/activation/e2e/builds_atx_v2_test.go b/activation/e2e/builds_atx_v2_test.go index f3992b6b49..5201780f85 100644 --- a/activation/e2e/builds_atx_v2_test.go +++ b/activation/e2e/builds_atx_v2_test.go @@ -17,7 +17,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" ae2e "github.com/spacemeshos/go-spacemesh/activation/e2e" "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/codec" "github.com/spacemeshos/go-spacemesh/common/types" @@ -58,7 +58,7 @@ func TestBuilder_SwitchesToBuildV2(t *testing.T) { t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) opts := testPostSetupOpts(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) diff --git a/activation/e2e/certifier_client_test.go b/activation/e2e/certifier_client_test.go index 3a0d8b3350..71612f7cc3 100644 --- a/activation/e2e/certifier_client_test.go +++ b/activation/e2e/certifier_client_test.go @@ -20,7 +20,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/spacemeshos/go-spacemesh/activation" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/signing" "github.com/spacemeshos/go-spacemesh/sql/localsql" @@ -39,7 +39,7 @@ func TestCertification(t *testing.T) { opts := testPostSetupOpts(t) logger := zaptest.NewLogger(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) diff --git a/activation/e2e/checkpoint_merged_test.go b/activation/e2e/checkpoint_merged_test.go index 88c2a87238..7498369ceb 100644 --- a/activation/e2e/checkpoint_merged_test.go +++ b/activation/e2e/checkpoint_merged_test.go @@ -17,7 +17,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" ae2e "github.com/spacemeshos/go-spacemesh/activation/e2e" "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/checkpoint" "github.com/spacemeshos/go-spacemesh/codec" @@ -48,7 +48,7 @@ func Test_CheckpointAfterMerge(t *testing.T) { t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) localDB := localsql.InMemoryTest(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) t.Cleanup(cleanup) diff --git a/activation/e2e/checkpoint_test.go b/activation/e2e/checkpoint_test.go index cf481facb7..00eaa1c8b2 100644 --- a/activation/e2e/checkpoint_test.go +++ b/activation/e2e/checkpoint_test.go @@ -16,7 +16,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" ae2e "github.com/spacemeshos/go-spacemesh/activation/e2e" "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/checkpoint" "github.com/spacemeshos/go-spacemesh/codec" @@ -51,7 +51,7 @@ func TestCheckpoint_PublishingSoloATXs(t *testing.T) { t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) opts := testPostSetupOpts(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) t.Cleanup(cleanup) diff --git a/activation/e2e/nipost_test.go b/activation/e2e/nipost_test.go index a3433dd794..f7962b5c2b 100644 --- a/activation/e2e/nipost_test.go +++ b/activation/e2e/nipost_test.go @@ -20,6 +20,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" ae2e "github.com/spacemeshos/go-spacemesh/activation/e2e" "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/signing" @@ -130,7 +131,7 @@ func initPost( sig *signing.EdSigner, golden types.ATXID, grpcCfg grpcserver.Config, - svc *grpcserver.PostService, + svc *v1.PostService, ) { tb.Helper() @@ -161,7 +162,7 @@ func TestNIPostBuilderWithClients(t *testing.T) { localDb := localsql.InMemoryTest(t) opts := testPostSetupOpts(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) t.Cleanup(cleanup) @@ -247,7 +248,7 @@ func Test_NIPostBuilderWithMultipleClients(t *testing.T) { db := statesql.InMemoryTest(t) opts := testPostSetupOpts(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) t.Cleanup(cleanup) diff --git a/activation/e2e/validation_test.go b/activation/e2e/validation_test.go index 493aa1cf17..17bcd2a1b7 100644 --- a/activation/e2e/validation_test.go +++ b/activation/e2e/validation_test.go @@ -13,7 +13,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" ae2e "github.com/spacemeshos/go-spacemesh/activation/e2e" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/signing" "github.com/spacemeshos/go-spacemesh/sql/localsql" @@ -34,7 +34,7 @@ func TestValidator_Validate(t *testing.T) { validator := activation.NewMocknipostValidator(gomock.NewController(t)) opts := testPostSetupOpts(t) - svc := grpcserver.NewPostService(logger, grpcserver.PostServiceQueryInterval(100*time.Millisecond)) + svc := v1.NewPostService(logger, v1.PostServiceQueryInterval(100*time.Millisecond)) svc.AllowConnections(true) grpcCfg, cleanup := launchServer(t, svc) t.Cleanup(cleanup) diff --git a/api/grpcserver/config.go b/api/grpcserver/config.go index d6ee88f8cd..35172e1bf9 100644 --- a/api/grpcserver/config.go +++ b/api/grpcserver/config.go @@ -31,16 +31,19 @@ type Config struct { type Service = string const ( - Admin Service = "admin" - Debug Service = "debug" - GlobalState Service = "global" - Mesh Service = "mesh" - Transaction Service = "transaction" - Activation Service = "activation" - Smesher Service = "smesher" - Post Service = "post" - PostInfo Service = "postInfo" - Node Service = "node" + // v1. + Admin Service = "admin" + Debug Service = "debug" + GlobalState Service = "global" + Mesh Service = "mesh" + Transaction Service = "transaction" + Activation Service = "activation" + Smesher Service = "smesher" + Post Service = "post" + PostInfo Service = "postInfo" + Node Service = "node" + + // v1alpha1. ActivationV2Alpha1 Service = "activation_v2alpha1" ActivationStreamV2Alpha1 Service = "activation_stream_v2alpha1" RewardV2Alpha1 Service = "reward_v2alpha1" diff --git a/api/grpcserver/grpcserver_test.go b/api/grpcserver/grpcserver_test.go index 5778bb7689..f30b6fe77b 100644 --- a/api/grpcserver/grpcserver_test.go +++ b/api/grpcserver/grpcserver_test.go @@ -1,2518 +1,41 @@ package grpcserver import ( - "context" - "errors" "fmt" - "io" - "log" - "math" - "math/big" "net" - "os" - "path/filepath" "strconv" "testing" - "time" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" - ma "github.com/multiformats/go-multiaddr" - pb "github.com/spacemeshos/api/release/go/spacemesh/v1" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest" - "go.uber.org/zap/zaptest/observer" - "golang.org/x/sync/errgroup" - "google.golang.org/genproto/googleapis/rpc/code" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/spacemeshos/go-spacemesh/activation" - "github.com/spacemeshos/go-spacemesh/codec" - "github.com/spacemeshos/go-spacemesh/common/types" - "github.com/spacemeshos/go-spacemesh/datastore" - "github.com/spacemeshos/go-spacemesh/events" - vm "github.com/spacemeshos/go-spacemesh/genvm" - "github.com/spacemeshos/go-spacemesh/genvm/sdk" - "github.com/spacemeshos/go-spacemesh/genvm/sdk/wallet" - "github.com/spacemeshos/go-spacemesh/p2p" - "github.com/spacemeshos/go-spacemesh/p2p/peerinfo" - peerinfomocks "github.com/spacemeshos/go-spacemesh/p2p/peerinfo/mocks" - pubsubmocks "github.com/spacemeshos/go-spacemesh/p2p/pubsub/mocks" - "github.com/spacemeshos/go-spacemesh/signing" - "github.com/spacemeshos/go-spacemesh/sql/accounts" - "github.com/spacemeshos/go-spacemesh/sql/activesets" - "github.com/spacemeshos/go-spacemesh/sql/atxs" - "github.com/spacemeshos/go-spacemesh/sql/identities" - "github.com/spacemeshos/go-spacemesh/sql/statesql" - "github.com/spacemeshos/go-spacemesh/system" - "github.com/spacemeshos/go-spacemesh/txs" -) - -const ( - labelsPerUnit = 2048 - numUnits = 2 - genTimeUnix = 1000000 - layerDuration = 10 * time.Second - layerAvgSize = 10 - txsPerProposal = 99 - layersPerEpoch = uint32(5) - - accountBalance = 8675301 - accountCounter = 0 - rewardAmount = 5551234 -) - -var ( - txReturnLayer = types.LayerID(1) - layerFirst = types.LayerID(0) - layerVerified = types.LayerID(8) - layerLatest = types.LayerID(10) - layerCurrent = types.LayerID(12) - postGenesisEpoch = types.EpochID(2) - genesisID = types.Hash20{} - - addr1 types.Address - addr2 types.Address - rewardSmesherID = types.RandomNodeID() - globalAtx *types.ActivationTx - globalAtx2 *types.ActivationTx - globalTx *types.Transaction - globalTx2 *types.Transaction - ballot1 = genLayerBallot(types.LayerID(11)) - block1 = genLayerBlock(types.LayerID(11), nil) - block2 = genLayerBlock(types.LayerID(11), nil) - block3 = genLayerBlock(types.LayerID(11), nil) - meshAPIMock = &MeshAPIMock{} - conStateAPI = &ConStateAPIMock{ - returnTx: make(map[types.TransactionID]*types.Transaction), - balances: make(map[types.Address]*big.Int), - nonces: make(map[types.Address]uint64), - poolByAddress: make(map[types.Address]types.TransactionID), - poolByTxId: make(map[types.TransactionID]*types.Transaction), - } - stateRoot = types.HexToHash32("11111") ) -func genLayerBallot(layerID types.LayerID) *types.Ballot { - b := types.RandomBallot() - b.Layer = layerID - signer, _ := signing.NewEdSigner() - b.Signature = signer.Sign(signing.BALLOT, b.SignedBytes()) - b.SmesherID = signer.NodeID() - b.Initialize() - return b -} - -func genLayerBlock(layerID types.LayerID, txs []types.TransactionID) *types.Block { - b := &types.Block{ - InnerBlock: types.InnerBlock{ - LayerIndex: layerID, - TxIDs: txs, - }, - } - b.Initialize() - return b -} - -func dialGrpc(tb testing.TB, cfg Config) *grpc.ClientConn { +func getFreePort(tb testing.TB) int { tb.Helper() - conn, err := grpc.NewClient( - cfg.PublicListener, - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - require.NoError(tb, err) - tb.Cleanup(func() { require.NoError(tb, conn.Close()) }) - return conn -} - -func TestMain(m *testing.M) { - types.SetLayersPerEpoch(layersPerEpoch) - - var err error - signer, err := signing.NewEdSigner() - if err != nil { - log.Println("failed to create signer:", err) - os.Exit(1) - } - signer1, err := signing.NewEdSigner() - if err != nil { - log.Println("failed to create signer:", err) - os.Exit(1) - } - signer2, err := signing.NewEdSigner() - if err != nil { - log.Println("failed to create signer:", err) - os.Exit(1) - } - - addr1 = wallet.Address(signer1.PublicKey().Bytes()) - addr2 = wallet.Address(signer2.PublicKey().Bytes()) - - globalAtx = &types.ActivationTx{ - PublishEpoch: postGenesisEpoch, - Sequence: 1, - Coinbase: addr1, - NumUnits: numUnits, - Weight: numUnits, - TickCount: 1, - SmesherID: signer.NodeID(), - } - globalAtx.SetReceived(time.Now()) - - globalAtx2 = &types.ActivationTx{ - PublishEpoch: postGenesisEpoch, - Sequence: 1, - Coinbase: addr2, - NumUnits: numUnits, - Weight: numUnits, - TickCount: 1, - SmesherID: signer.NodeID(), - } - globalAtx2.SetReceived(time.Now()) - globalAtx2.SmesherID = signer.NodeID() - globalAtx2.TickCount = 1 - - // These create circular dependencies so they have to be initialized - // after the global vars - ballot1.AtxID = globalAtx.ID() - ballot1.EpochData = &types.EpochData{ActiveSetHash: types.ATXIDList{globalAtx.ID(), globalAtx2.ID()}.Hash()} - - globalTx = NewTx(0, addr1, signer1) - globalTx2 = NewTx(1, addr2, signer2) - - block1.TxIDs = []types.TransactionID{globalTx.ID, globalTx2.ID} - conStateAPI.returnTx[globalTx.ID] = globalTx - conStateAPI.returnTx[globalTx2.ID] = globalTx2 - conStateAPI.balances[addr1] = big.NewInt(int64(accountBalance)) - conStateAPI.balances[addr2] = big.NewInt(int64(accountBalance)) - conStateAPI.nonces[globalTx.Principal] = uint64(accountCounter) - - types.SetLayersPerEpoch(layersPerEpoch) - - res := m.Run() - os.Exit(res) -} - -type MeshAPIMock struct{} - -// latest layer received. -func (m *MeshAPIMock) LatestLayer() types.LayerID { - return layerLatest -} - -// latest layer approved/confirmed/applied to state -// The real logic here is a bit more complicated, as it depends whether the node -// is syncing or not. If it's not syncing, layers are applied to state as they're -// verified by Hare. If it's syncing, Hare is not run, and they are applied to -// state as they're confirmed by Tortoise and it advances pbase. This is all in -// flux right now so keep this simple for the purposes of testing. -func (m *MeshAPIMock) LatestLayerInState() types.LayerID { - return layerVerified -} - -func (m *MeshAPIMock) ProcessedLayer() types.LayerID { - return layerVerified -} - -func (m *MeshAPIMock) GetRewardsByCoinbase(types.Address) (rewards []*types.Reward, err error) { - return []*types.Reward{ - { - Layer: layerFirst, - TotalReward: rewardAmount, - LayerReward: rewardAmount, - Coinbase: addr1, - SmesherID: rewardSmesherID, - }, - }, nil -} - -func (m *MeshAPIMock) GetRewardsBySmesherId(types.NodeID) (rewards []*types.Reward, err error) { - return []*types.Reward{ - { - Layer: layerFirst, - TotalReward: rewardAmount, - LayerReward: rewardAmount, - Coinbase: addr1, - SmesherID: rewardSmesherID, - }, - }, nil -} - -func (m *MeshAPIMock) GetLayer(tid types.LayerID) (*types.Layer, error) { - if tid.After(layerCurrent) { - return nil, errors.New("requested layer later than current layer") - } else if tid.After(m.LatestLayer()) { - return nil, errors.New("haven't received that layer yet") - } - - ballots := []*types.Ballot{ballot1} - blocks := []*types.Block{block1, block2, block3} - return types.NewExistingLayer(tid, ballots, blocks), nil -} - -func (m *MeshAPIMock) GetLayerVerified(tid types.LayerID) (*types.Block, error) { - return block1, nil -} - -func (m *MeshAPIMock) GetATXs( - context.Context, - []types.ATXID, -) (map[types.ATXID]*types.ActivationTx, []types.ATXID) { - atxs := map[types.ATXID]*types.ActivationTx{ - globalAtx.ID(): globalAtx, - globalAtx2.ID(): globalAtx2, - } - return atxs, nil -} - -func (m *MeshAPIMock) MeshHash(types.LayerID) (types.Hash32, error) { - return types.RandomHash(), nil -} - -type ConStateAPIMock struct { - returnTx map[types.TransactionID]*types.Transaction - balances map[types.Address]*big.Int - nonces map[types.Address]uint64 - // In the real txs.txPool struct, there are multiple data structures and they're more complex, - // but we just mock a very simple use case here and only store some of these data - poolByAddress map[types.Address]types.TransactionID - poolByTxId map[types.TransactionID]*types.Transaction -} - -func (t *ConStateAPIMock) put(id types.TransactionID, tx *types.Transaction) { - t.poolByTxId[id] = tx - t.poolByAddress[tx.Principal] = id - events.ReportNewTx(0, tx) -} - -// Return a mock estimated nonce and balance that's different than the default, mimicking transactions that are -// unconfirmed or in the mempool that will update state. -func (t *ConStateAPIMock) GetProjection(types.Address) (uint64, uint64) { - return accountCounter + 1, accountBalance + 1 -} - -func (t *ConStateAPIMock) GetAllAccounts() (res []*types.Account, err error) { - for address, balance := range t.balances { - res = append(res, &types.Account{ - Address: address, - Balance: balance.Uint64(), - NextNonce: t.nonces[address], - }) - } - return res, nil -} - -func (t *ConStateAPIMock) GetStateRoot() (types.Hash32, error) { - return stateRoot, nil -} - -func (t *ConStateAPIMock) HasEvicted(id types.TransactionID) (bool, error) { - panic("not implemented") -} - -func (t *ConStateAPIMock) GetMeshTransaction(id types.TransactionID) (*types.MeshTransaction, error) { - tx, ok := t.returnTx[id] - if ok { - return &types.MeshTransaction{Transaction: *tx, State: types.APPLIED}, nil - } - tx, ok = t.poolByTxId[id] - if ok { - return &types.MeshTransaction{Transaction: *tx, State: types.MEMPOOL}, nil - } - return nil, errors.New("it ain't there") -} - -func (t *ConStateAPIMock) GetTransactionsByAddress( - from, to types.LayerID, - account types.Address, -) ([]*types.MeshTransaction, error) { - if from.After(txReturnLayer) { - return nil, nil - } - var txs []*types.MeshTransaction - for _, tx := range t.returnTx { - if tx.Principal.String() == account.String() { - txs = append(txs, &types.MeshTransaction{Transaction: *tx}) - } - } - return txs, nil -} - -func (t *ConStateAPIMock) GetMeshTransactions( - txIds []types.TransactionID, -) (txs []*types.MeshTransaction, missing map[types.TransactionID]struct{}) { - for _, txId := range txIds { - for _, tx := range t.returnTx { - if tx.ID == txId { - txs = append(txs, &types.MeshTransaction{ - State: types.APPLIED, - Transaction: *tx, - }) - } - } - } - return -} - -func (t *ConStateAPIMock) GetLayerStateRoot(types.LayerID) (types.Hash32, error) { - return stateRoot, nil -} - -func (t *ConStateAPIMock) GetBalance(addr types.Address) (uint64, error) { - return t.balances[addr].Uint64(), nil -} - -func (t *ConStateAPIMock) GetNonce(addr types.Address) (types.Nonce, error) { - return t.nonces[addr], nil -} - -func (t *ConStateAPIMock) Validation(raw types.RawTx) system.ValidationRequest { - panic("dont use this") -} - -func NewTx(nonce uint64, recipient types.Address, signer *signing.EdSigner) *types.Transaction { - tx := types.Transaction{TxHeader: &types.TxHeader{}} - tx.Principal = wallet.Address(signer.PublicKey().Bytes()) - if nonce == 0 { - tx.RawTx = types.NewRawTx(wallet.SelfSpawn(signer.PrivateKey(), - 0, - sdk.WithGasPrice(0), - )) - } else { - tx.RawTx = types.NewRawTx( - wallet.Spend(signer.PrivateKey(), recipient, 1, - nonce, - sdk.WithGasPrice(0), - ), - ) - tx.MaxSpend = 1 - } - return &tx -} - -func launchServer(tb testing.TB, services ...ServiceAPI) (Config, func()) { - cfg := DefaultTestConfig(tb) - grpcService, err := NewWithServices(cfg.PublicListener, zaptest.NewLogger(tb).Named("grpc"), cfg, services) - require.NoError(tb, err) - - // start gRPC server - require.NoError(tb, grpcService.Start()) - - // update config with bound addresses - cfg.PublicListener = grpcService.BoundAddress - - return cfg, func() { assert.NoError(tb, grpcService.Close()) } -} - -func getFreePort(optionalPort int) (int, error) { - l, e := net.Listen("tcp", fmt.Sprintf(":%v", optionalPort)) - if e != nil { - l, e = net.Listen("tcp", ":0") - if e != nil { - return 0, fmt.Errorf("listen TCP: %w", e) - } - } + l, err := net.Listen("tcp", ":0") + require.NoError(tb, err, "Should be able to establish a connection on a port") defer l.Close() - return l.Addr().(*net.TCPAddr).Port, nil -} - -func TestNewServersConfig(t *testing.T) { - port1, err := getFreePort(0) - require.NoError(t, err, "Should be able to establish a connection on a port") - - port2, err := getFreePort(0) - require.NoError(t, err, "Should be able to establish a connection on a port") - - grpcService := New(fmt.Sprintf(":%d", port1), zaptest.NewLogger(t).Named("grpc"), DefaultTestConfig(t)) - jsonService := NewJSONHTTPServer(zaptest.NewLogger(t).Named("grpc.JSON"), fmt.Sprintf(":%d", port2), - []string{}, false) - - require.Contains(t, grpcService.listener, strconv.Itoa(port1), "Expected same port") - require.Contains(t, jsonService.listener, strconv.Itoa(port2), "Expected same port") -} - -func TestNewLocalServer(t *testing.T) { - tt := []struct { - name string - listener string - warn bool - }{ - { - name: "valid", - listener: "192.168.1.1:1234", - warn: false, - }, - { - name: "valid random port", - listener: "10.0.0.1:0", - warn: false, - }, - { - name: "invalid", - listener: "0.0.0.0:1234", - warn: true, - }, - { - name: "invalid random port", - listener: "88.77.66.11:0", - warn: true, - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - observer, observedLogs := observer.New(zapcore.WarnLevel) - logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.WrapCore( - func(core zapcore.Core) zapcore.Core { - return zapcore.NewTee(core, observer) - }, - ))) - - ctrl := gomock.NewController(t) - peerCounter := NewMockpeerCounter(ctrl) - meshApi := NewMockmeshAPI(ctrl) - genTime := NewMockgenesisTimeAPI(ctrl) - syncer := NewMocksyncer(ctrl) - - cfg := DefaultTestConfig(t) - cfg.PostListener = tc.listener - svc := NewNodeService(peerCounter, meshApi, genTime, syncer, "v0.0.0", "cafebabe") - grpcService, err := NewWithServices(cfg.PostListener, logger, cfg, []ServiceAPI{svc}) - if tc.warn { - require.Equal(t, 1, observedLogs.Len(), "Expected a warning log") - require.Equal(t, "unsecured grpc server is listening on a public IP address", - observedLogs.All()[0].Message, - ) - require.Equal(t, tc.listener, observedLogs.All()[0].ContextMap()["address"]) - return - } - - require.NoError(t, err) - require.Equal(t, grpcService.listener, tc.listener, "expected same listener") - }) - } -} - -type smesherServiceConn struct { - pb.SmesherServiceClient - - smeshingProvider *activation.MockSmeshingProvider - postSupervisor *MockpostSupervisor - grpcPostService *MockgrpcPostService -} - -func setupSmesherService(tb testing.TB, sig *signing.EdSigner) (*smesherServiceConn, context.Context) { - ctrl, mockCtx := gomock.WithContext(context.Background(), tb) - smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := NewMockpostSupervisor(ctrl) - grpcPostService := NewMockgrpcPostService(ctrl) - svc := NewSmesherService( - smeshingProvider, - postSupervisor, - grpcPostService, - 10*time.Millisecond, - activation.DefaultPostSetupOpts(), - sig, - ) - svc.SetPostServiceConfig(activation.DefaultTestPostServiceConfig(tb)) - cfg, cleanup := launchServer(tb, svc) - tb.Cleanup(cleanup) - - conn := dialGrpc(tb, cfg) - client := pb.NewSmesherServiceClient(conn) - - return &smesherServiceConn{ - SmesherServiceClient: client, - - smeshingProvider: smeshingProvider, - postSupervisor: postSupervisor, - grpcPostService: grpcPostService, - }, mockCtx -} - -func TestSmesherService(t *testing.T) { - t.Run("IsSmeshing", func(t *testing.T) { - t.Parallel() - - sig, err := signing.NewEdSigner() - require.NoError(t, err) - - c, ctx := setupSmesherService(t, sig) - c.smeshingProvider.EXPECT().Smeshing().Return(false) - res, err := c.IsSmeshing(ctx, &emptypb.Empty{}) - require.NoError(t, err) - require.False(t, res.IsSmeshing, "expected IsSmeshing to be false") - }) - - t.Run("StartSmeshingMissingArgs", func(t *testing.T) { - t.Parallel() - - sig, err := signing.NewEdSigner() - require.NoError(t, err) - - c, ctx := setupSmesherService(t, sig) - _, err = c.StartSmeshing(ctx, &pb.StartSmeshingRequest{}) - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }) - - t.Run("StartSmeshing", func(t *testing.T) { - t.Parallel() - opts := &pb.PostSetupOpts{} - opts.DataDir = t.TempDir() - opts.NumUnits = 1 - opts.MaxFileSize = 1024 - - coinbase := &pb.AccountId{Address: addr1.String()} - sig, err := signing.NewEdSigner() - require.NoError(t, err) - - c, ctx := setupSmesherService(t, sig) - c.smeshingProvider.EXPECT().StartSmeshing(gomock.Any()).Return(nil) - c.postSupervisor.EXPECT().Start(gomock.Any(), - gomock.All( - gomock.Cond(func(postOpts activation.PostSetupOpts) bool { - return postOpts.DataDir == opts.DataDir - }), - gomock.Cond(func(postOpts activation.PostSetupOpts) bool { - return postOpts.NumUnits == opts.NumUnits - }), - gomock.Cond(func(postOpts activation.PostSetupOpts) bool { - return postOpts.MaxFileSize == opts.MaxFileSize - }), - ), sig).Return(nil) - c.grpcPostService.EXPECT().AllowConnections(true) - res, err := c.StartSmeshing(ctx, &pb.StartSmeshingRequest{ - Opts: opts, - Coinbase: coinbase, - }) - require.NoError(t, err) - require.Equal(t, int32(code.Code_OK), res.Status.Code) - }) - - t.Run("StartSmeshingMultiSetup", func(t *testing.T) { - t.Parallel() - opts := &pb.PostSetupOpts{} - opts.DataDir = t.TempDir() - opts.NumUnits = 1 - opts.MaxFileSize = 1024 - - coinbase := &pb.AccountId{Address: addr1.String()} - - c, ctx := setupSmesherService(t, nil) // in 1:n the node id is nil and start smeshing should fail - res, err := c.StartSmeshing(ctx, &pb.StartSmeshingRequest{ - Opts: opts, - Coinbase: coinbase, - }) - require.Equal(t, codes.FailedPrecondition, status.Code(err)) - require.ErrorContains(t, err, "node is not configured for supervised smeshing") - require.Nil(t, res) - }) - - t.Run("StopSmeshing", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - c.smeshingProvider.EXPECT().StopSmeshing(gomock.Any()).Return(nil) - c.postSupervisor.EXPECT().Stop(false).Return(nil) - res, err := c.StopSmeshing(ctx, &pb.StopSmeshingRequest{}) - require.NoError(t, err) - require.Equal(t, int32(code.Code_OK), res.Status.Code) - }) - - t.Run("SmesherIDs", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - nodeId := types.RandomNodeID() - c.smeshingProvider.EXPECT().SmesherIDs().Return([]types.NodeID{nodeId}) - res, err := c.SmesherIDs(ctx, &emptypb.Empty{}) - require.NoError(t, err) - require.Len(t, res.PublicKeys, 1) - require.Equal(t, nodeId.Bytes(), res.PublicKeys[0]) - }) - - t.Run("SetCoinbaseMissingArgs", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - _, err := c.SetCoinbase(ctx, &pb.SetCoinbaseRequest{}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }) - - t.Run("SetCoinbase", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - c.smeshingProvider.EXPECT().SetCoinbase(addr1) - res, err := c.SetCoinbase(ctx, &pb.SetCoinbaseRequest{ - Id: &pb.AccountId{Address: addr1.String()}, - }) - require.NoError(t, err) - require.Equal(t, int32(code.Code_OK), res.Status.Code) - }) - - t.Run("Coinbase", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - c.smeshingProvider.EXPECT().Coinbase().Return(addr1) - res, err := c.Coinbase(ctx, &emptypb.Empty{}) - require.NoError(t, err) - addr, err := types.StringToAddress(res.AccountId.Address) - require.NoError(t, err) - require.Equal(t, addr1, addr) - }) - - t.Run("PostSetupComputeProviders", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - c.postSupervisor.EXPECT().Providers().Return(nil, nil) - _, err := c.PostSetupProviders(ctx, &pb.PostSetupProvidersRequest{Benchmark: false}) - require.NoError(t, err) - }) - - t.Run("PostSetupStatusStream", func(t *testing.T) { - t.Parallel() - c, ctx := setupSmesherService(t, nil) - c.postSupervisor.EXPECT().Status().Return(&activation.PostSetupStatus{}).AnyTimes() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := c.PostSetupStatusStream(ctx, &emptypb.Empty{}) - require.NoError(t, err) - - // Expecting the stream to return updates before closing. - for range 3 { - _, err = stream.Recv() - require.NoError(t, err) - } - - cancel() - require.Eventually(t, func() bool { - _, err = stream.Recv() - return status.Code(err) == codes.Canceled - }, time.Second, time.Millisecond*10) - }) -} - -func TestMeshService(t *testing.T) { - ctrl := gomock.NewController(t) - genTime := NewMockgenesisTimeAPI(ctrl) - genesis := time.Unix(genTimeUnix, 0) - genTime.EXPECT().GenesisTime().Return(genesis) - genTime.EXPECT().CurrentLayer().Return(layerCurrent).AnyTimes() - cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) - t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) - svc := NewMeshService( - cdb, - meshAPIMock, - conStateAPI, - genTime, - layersPerEpoch, - types.Hash20{}, - layerDuration, - layerAvgSize, - txsPerProposal, - ) - require.NoError( - t, - activesets.Add( - cdb, - ballot1.EpochData.ActiveSetHash, - &types.EpochActiveSet{Set: types.ATXIDList{globalAtx.ID(), globalAtx2.ID()}}, - ), - ) - cfg, cleanup := launchServer(t, svc) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - c := pb.NewMeshServiceClient(conn) - - // Construct an array of test cases to test each endpoint in turn - testCases := []struct { - name string - run func(*testing.T) - }{ - {"GenesisTime", func(t *testing.T) { - response, err := c.GenesisTime(context.Background(), &pb.GenesisTimeRequest{}) - require.NoError(t, err) - require.Equal(t, uint64(genesis.Unix()), response.Unixtime.Value) - }}, - {"CurrentLayer", func(t *testing.T) { - response, err := c.CurrentLayer(context.Background(), &pb.CurrentLayerRequest{}) - require.NoError(t, err) - require.Equal(t, layerCurrent.Uint32(), response.Layernum.Number) - }}, - {"CurrentEpoch", func(t *testing.T) { - response, err := c.CurrentEpoch(context.Background(), &pb.CurrentEpochRequest{}) - require.NoError(t, err) - require.Equal(t, layerCurrent.GetEpoch().Uint32(), response.Epochnum.Number) - }}, - {"GenesisID", func(t *testing.T) { - response, err := c.GenesisID(context.Background(), &pb.GenesisIDRequest{}) - require.NoError(t, err) - require.Equal(t, genesisID.Bytes(), response.GenesisId) - }}, - {"LayerDuration", func(t *testing.T) { - response, err := c.LayerDuration(context.Background(), &pb.LayerDurationRequest{}) - require.NoError(t, err) - require.Equal(t, layerDuration, time.Duration(response.Duration.Value)*time.Second) - }}, - {"MaxTransactionsPerSecond", func(t *testing.T) { - response, err := c.MaxTransactionsPerSecond(context.Background(), &pb.MaxTransactionsPerSecondRequest{}) - require.NoError(t, err) - require.Equal( - t, - uint64(layerAvgSize*txsPerProposal/layerDuration.Seconds()), - response.MaxTxsPerSecond.Value, - ) - }}, - {"AccountMeshDataQuery", func(t *testing.T) { - subtests := []struct { - name string - run func(*testing.T) - }{ - { - // all inputs default to zero, no filter - // query is valid but MaxResults is 0 so expect no results - name: "no_inputs", - run: func(t *testing.T) { - _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{}) - require.ErrorContains(t, err, "`Filter` must be provided") - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }, - }, - { - name: "MinLayer_too_high", - run: func(t *testing.T) { - _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MinLayer: &pb.LayerNumber{Number: layerCurrent.Add(1).Uint32()}, - }) - require.ErrorContains(t, err, "`LatestLayer` must be less than") - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }, - }, - { - // This does not produce an error but we expect no results - name: "Offset_too_high", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{ - Address: types.GenerateAddress(make([]byte, types.AddressLength)).String(), - }, - AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), - }, - Offset: math.MaxUint32, - }) - require.NoError(t, err) - require.Equal(t, uint32(0), res.TotalResults) - require.Empty(t, res.Data) - }, - }, - { - name: "no_filter", - run: func(t *testing.T) { - _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - }) - require.ErrorContains(t, err, "`Filter` must be provided") - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }, - }, - { - name: "empty_filter", - run: func(t *testing.T) { - _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - Filter: &pb.AccountMeshDataFilter{}, - }) - require.ErrorContains(t, err, "`Filter.AccountId` must be provided") - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }, - }, - { - name: "filter_with_empty_AccountId", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{ - Address: types.GenerateAddress(make([]byte, types.AddressLength)).String(), - }, - AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(0), res.TotalResults) - require.Empty(t, res.Data) - }, - }, - { - name: "filter_with_valid_AccountId", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - Filter: &pb.AccountMeshDataFilter{ - AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), - AccountId: &pb.AccountId{Address: addr1.String()}, - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(0), res.TotalResults) - require.Empty(t, res.Data) - }, - }, - { - name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_zero", - run: func(t *testing.T) { - _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_UNSPECIFIED), - }, - }) - require.ErrorContains(t, err, "`Filter.AccountMeshDataFlags` must set at least one bitfield") - require.Equal(t, codes.InvalidArgument, status.Code(err)) - }, - }, - { - name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_tx_only", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS, - ), - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(1), res.TotalResults) - require.Len(t, res.Data, 1) - checkAccountMeshDataItemTx(t, res.Data[0].Datum) - }, - }, - { - name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_activations_only", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(10), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(0), res.TotalResults) - require.Empty(t, res.Data) - }, - }, - { - name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_all", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - // Zero means unlimited - MaxResults: uint32(0), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(1), res.TotalResults) - require.Len(t, res.Data, 1) - checkAccountMeshDataItemTx(t, res.Data[0].Datum) - }, - }, - { - name: "max_results", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(1), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(1), res.TotalResults) - require.Len(t, res.Data, 1) - checkAccountMeshDataItemTx(t, res.Data[0].Datum) - }, - }, - { - name: "max_results_page_2", - run: func(t *testing.T) { - res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ - MaxResults: uint32(1), - Offset: uint32(1), - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - }) - require.NoError(t, err) - require.Equal(t, uint32(1), res.TotalResults) - require.Empty(t, res.Data) - }, - }, - } - - // Run sub-subtests - for _, r := range subtests { - t.Run(r.name, r.run) - } - }}, - {name: "AccountMeshDataStream", run: func(t *testing.T) { - // common testing framework - generateRunFn := func(req *pb.AccountMeshDataStreamRequest) func(*testing.T) { - return func(*testing.T) { - // Just try opening and immediately closing the stream - stream, err := c.AccountMeshDataStream(context.Background(), req) - require.NoError(t, err, "unexpected error opening stream") - - // Do we need this? It doesn't seem to cause any harm - stream.Context().Done() - } - } - generateRunFnError := func(msg string, req *pb.AccountMeshDataStreamRequest) func(*testing.T) { - return func(t *testing.T) { - // there should be no error opening the stream - stream, err := c.AccountMeshDataStream(context.Background(), req) - require.NoError(t, err, "unexpected error opening stream") - - // sending a request should generate an error - _, err = stream.Recv() - require.ErrorContains(t, err, msg, "received unexpected error") - require.Equal(t, codes.InvalidArgument, status.Code(err)) - - // Do we need this? It doesn't seem to cause any harm - stream.Context().Done() - } - } - subtests := []struct { - name string - run func(*testing.T) - }{ - // ERROR INPUTS - // We expect these to produce errors - { - name: "missing_filter", - run: generateRunFnError("`Filter` must be provided", &pb.AccountMeshDataStreamRequest{}), - }, - { - name: "empty_filter", - run: generateRunFnError("`Filter.AccountId` must be provided", &pb.AccountMeshDataStreamRequest{ - Filter: &pb.AccountMeshDataFilter{}, - }), - }, - { - name: "missing_address", - run: generateRunFnError("`Filter.AccountId` must be provided", &pb.AccountMeshDataStreamRequest{ - Filter: &pb.AccountMeshDataFilter{ - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - }), - }, - { - name: "filter_with_zero_flags", - run: generateRunFnError( - "`Filter.AccountMeshDataFlags` must set at least one bitfield", - &pb.AccountMeshDataStreamRequest{ - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32(0), - }, - }, - ), - }, - - // SUCCESS - { - name: "empty_address", - run: generateRunFn(&pb.AccountMeshDataStreamRequest{ - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - }), - }, - { - name: "invalid_address", - run: generateRunFn(&pb.AccountMeshDataStreamRequest{ - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: types.GenerateAddress([]byte{'A'}).String()}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - }), - }, - } - - // Run sub-subtests - for _, r := range subtests { - t.Run(r.name, r.run) - } - }}, - {"LayersQuery", func(t *testing.T) { - generateRunFn := func(numResults int, req *pb.LayersQueryRequest) func(*testing.T) { - return func(t *testing.T) { - res, err := c.LayersQuery(context.Background(), req) - require.NoError(t, err, "query returned an unexpected error") - require.Len(t, res.Layer, numResults, "unexpected number of layer results") - } - } - generateRunFnError := func(msg string, req *pb.LayersQueryRequest) func(*testing.T) { - return func(t *testing.T) { - _, err := c.LayersQuery(context.Background(), req) - require.ErrorContains(t, err, msg, "expected error to contain string") - } - } - requests := []struct { - name string - run func(*testing.T) - }{ - // ERROR INPUTS - // We expect these to produce errors - - // end layer after current layer - { - name: "end_layer_after_current_layer", - run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerCurrent.Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerCurrent.Add(2).Uint32()}, - }), - }, - - // start layer after current layer - { - name: "start_layer_after_current_layer", - run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerCurrent.Add(2).Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerCurrent.Add(3).Uint32()}, - }), - }, - - // layer after last received - { - name: "layer_after_last_received", - run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerLatest.Add(1).Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerLatest.Add(2).Uint32()}, - }), - }, - - // very very large range - { - name: "very_very_large_range", - run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: 0}, - EndLayer: &pb.LayerNumber{Number: uint32(math.MaxUint32)}, - }), - }, - - // GOOD INPUTS - - // nil inputs - // not an error since these default to zero, see - // https://github.com/spacemeshos/api/issues/87 - { - name: "nil_inputs", - run: generateRunFn(1, &pb.LayersQueryRequest{}), - }, - - // start layer after end layer: expect no error, zero results - { - name: "start_layer_after_end_layer", - run: generateRunFn(0, &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerCurrent.Add(1).Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerCurrent.Uint32()}, - }), - }, - - // same start/end layer: expect no error, one result - { - name: "same_start_end_layer", - run: generateRunFn(1, &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerVerified.Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerVerified.Uint32()}, - }), - }, - - // start layer after last approved/confirmed layer (but before current layer) - { - name: "start_layer_after_last_approved_confirmed_layer", - run: generateRunFn(2, &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerVerified.Add(1).Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerVerified.Add(2).Uint32()}, - }), - }, - - // end layer after last approved/confirmed layer (but before current layer) - { - name: "end_layer_after_last_approved_confirmed_layer", - // expect difference + 1 return layers - run: generateRunFn( - int(layerVerified.Add(2).Sub(layerFirst.Uint32()).Add(1).Uint32()), - &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerFirst.Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerVerified.Add(2).Uint32()}, - }, - ), - }, - - // comprehensive valid test - { - name: "comprehensive", - run: func(t *testing.T) { - req := &pb.LayersQueryRequest{ - StartLayer: &pb.LayerNumber{Number: layerFirst.Uint32()}, - EndLayer: &pb.LayerNumber{Number: layerLatest.Uint32()}, - } - - res, err := c.LayersQuery(context.Background(), req) - require.NoError(t, err, "query returned unexpected error") - - // endpoint inclusive so add one - numLayers := layerLatest.Difference(layerFirst) + 1 - require.Len(t, res.Layer, int(numLayers)) - checkLayer(t, res.Layer[0]) - - resLayerNine := res.Layer[9] - require.Equal(t, uint32(9), resLayerNine.Number.Number, "layer nine is ninth") - require.NotEmpty(t, resLayerNine.Hash) - require.Equal( - t, - pb.Layer_LAYER_STATUS_UNSPECIFIED, - resLayerNine.Status, - "later layer is unconfirmed", - ) - }, - }, - } - - // Run sub-subtests - for _, r := range requests { - t.Run(r.name, r.run) - } - }}, - // NOTE: There are no simple error tests for LayerStream, as it does not take any arguments. - // See TestLayerStream_comprehensive test, below. - } - - // Run subtests - for _, tc := range testCases { - t.Run(tc.name, tc.run) - } -} - -func TestTransactionServiceSubmitUnsync(t *testing.T) { - req := require.New(t) - - ctrl := gomock.NewController(t) - syncer := NewMocksyncer(ctrl) - syncer.EXPECT().IsSynced(gomock.Any()).Return(false) - publisher := pubsubmocks.NewMockPublisher(ctrl) - publisher.EXPECT().Publish(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - txHandler := NewMocktxValidator(ctrl) - txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(nil) - - svc := NewTransactionService(statesql.InMemoryTest(t), publisher, meshAPIMock, conStateAPI, syncer, txHandler) - cfg, cleanup := launchServer(t, svc) - t.Cleanup(cleanup) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - conn := dialGrpc(t, cfg) - c := pb.NewTransactionServiceClient(conn) - - serializedTx, err := codec.Encode(globalTx) - req.NoError(err, "error serializing tx") - - // This time, we expect an error, since isSynced is false (by default) - // The node should not allow tx submission when not synced - res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{Transaction: serializedTx}) - req.Error(err) - grpcStatus, ok := status.FromError(err) - req.True(ok) - req.Equal(codes.FailedPrecondition, grpcStatus.Code()) - req.Equal("Cannot submit transaction, node is not in sync yet, try again later", grpcStatus.Message()) - req.Nil(res) - - syncer.EXPECT().IsSynced(gomock.Any()).Return(true) - - // This time, we expect no error, since isSynced is now true - _, err = c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{Transaction: serializedTx}) - req.NoError(err) -} - -func TestTransactionServiceSubmitInvalidTx(t *testing.T) { - req := require.New(t) - - ctrl := gomock.NewController(t) - syncer := NewMocksyncer(ctrl) - syncer.EXPECT().IsSynced(gomock.Any()).Return(true) - publisher := pubsubmocks.NewMockPublisher(ctrl) // publish is not called - txHandler := NewMocktxValidator(ctrl) - txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(errors.New("failed validation")) - - db := statesql.InMemoryTest(t) - grpcService := NewTransactionService(db, publisher, meshAPIMock, conStateAPI, syncer, txHandler) - cfg, cleanup := launchServer(t, grpcService) - t.Cleanup(cleanup) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - conn := dialGrpc(t, cfg) - c := pb.NewTransactionServiceClient(conn) - - serializedTx, err := codec.Encode(globalTx) - req.NoError(err, "error serializing tx") - - // When verifying and caching the transaction fails we expect an error - res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{Transaction: serializedTx}) - req.Error(err) - grpcStatus, ok := status.FromError(err) - req.True(ok) - req.Equal(codes.InvalidArgument, grpcStatus.Code()) - req.Contains(grpcStatus.Message(), "Failed to verify transaction") - req.Nil(res) -} - -func TestTransactionService_SubmitNoConcurrency(t *testing.T) { - numTxs := 20 - - ctrl := gomock.NewController(t) - syncer := NewMocksyncer(ctrl) - syncer.EXPECT().IsSynced(gomock.Any()).Return(true).Times(numTxs) - publisher := pubsubmocks.NewMockPublisher(ctrl) - publisher.EXPECT().Publish(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(numTxs) - txHandler := NewMocktxValidator(ctrl) - txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(nil).Times(numTxs) - - db := statesql.InMemoryTest(t) - grpcService := NewTransactionService(db, publisher, meshAPIMock, conStateAPI, syncer, txHandler) - cfg, cleanup := launchServer(t, grpcService) - t.Cleanup(cleanup) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - conn := dialGrpc(t, cfg) - c := pb.NewTransactionServiceClient(conn) - for range numTxs { - res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{ - Transaction: globalTx.Raw, - }) - require.NoError(t, err) - require.Equal(t, int32(code.Code_OK), res.Status.Code) - require.Equal(t, globalTx.ID.Bytes(), res.Txstate.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, res.Txstate.State) - } -} - -func TestTransactionService(t *testing.T) { - ctrl := gomock.NewController(t) - syncer := NewMocksyncer(ctrl) - syncer.EXPECT().IsSynced(gomock.Any()).Return(true).AnyTimes() - publisher := pubsubmocks.NewMockPublisher(ctrl) - publisher.EXPECT().Publish(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - txHandler := NewMocktxValidator(ctrl) - txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - - db := statesql.InMemoryTest(t) - grpcService := NewTransactionService(db, publisher, meshAPIMock, conStateAPI, syncer, txHandler) - cfg, cleanup := launchServer(t, grpcService) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - c := pb.NewTransactionServiceClient(conn) - - // Construct an array of test cases to test each endpoint in turn - testCases := []struct { - name string - run func(*testing.T) - }{ - {"SubmitSpawnTransaction", func(t *testing.T) { - res, err := c.SubmitTransaction(context.Background(), &pb.SubmitTransactionRequest{ - Transaction: globalTx.Raw, - }) - require.NoError(t, err) - require.Equal(t, int32(code.Code_OK), res.Status.Code) - require.Equal(t, globalTx.ID.Bytes(), res.Txstate.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, res.Txstate.State) - }}, - {"TransactionsState_MissingTransactionId", func(t *testing.T) { - _, err := c.TransactionsState(context.Background(), &pb.TransactionsStateRequest{}) - require.Equal(t, codes.InvalidArgument, status.Code(err)) - require.ErrorContains(t, err, "`TransactionId` must include") - }}, - {"TransactionsState_TransactionIdZeroLen", func(t *testing.T) { - _, err := c.TransactionsState(context.Background(), &pb.TransactionsStateRequest{ - TransactionId: []*pb.TransactionId{}, - }) - require.Equal(t, codes.InvalidArgument, status.Code(err)) - require.ErrorContains(t, err, "`TransactionId` must include") - }}, - {"TransactionsState_StateOnly", func(t *testing.T) { - req := &pb.TransactionsStateRequest{} - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - res, err := c.TransactionsState(context.Background(), req) - require.NoError(t, err) - require.Len(t, res.TransactionsState, 1) - require.Empty(t, res.Transactions) - require.Equal(t, globalTx.ID.Bytes(), res.TransactionsState[0].Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionsState[0].State) - }}, - {"TransactionsState_All", func(t *testing.T) { - req := &pb.TransactionsStateRequest{} - req.IncludeTransactions = true - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - res, err := c.TransactionsState(context.Background(), req) - require.NoError(t, err) - require.Len(t, res.TransactionsState, 1) - require.Len(t, res.Transactions, 1) - require.Equal(t, globalTx.ID.Bytes(), res.TransactionsState[0].Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionsState[0].State) - - checkTransaction(t, res.Transactions[0]) - }}, - {"TransactionsStateStream_MissingTransactionId", func(t *testing.T) { - req := &pb.TransactionsStateStreamRequest{} - stream, err := c.TransactionsStateStream(context.Background(), req) - require.NoError(t, err) - _, err = stream.Recv() - require.Equal(t, codes.InvalidArgument, status.Code(err)) - require.ErrorContains(t, err, "`TransactionId` must include") - }}, - {"TransactionsStateStream_TransactionIdZeroLen", func(t *testing.T) { - req := &pb.TransactionsStateStreamRequest{ - TransactionId: []*pb.TransactionId{}, - } - stream, err := c.TransactionsStateStream(context.Background(), req) - require.NoError(t, err) - _, err = stream.Recv() - require.Equal(t, codes.InvalidArgument, status.Code(err)) - require.ErrorContains(t, err, "`TransactionId` must include") - }}, - {"TransactionsStateStream_StateOnly", func(t *testing.T) { - // Set up the reporter - req := &pb.TransactionsStateStreamRequest{} - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - - events.CloseEventReporter() - events.InitializeReporter() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - stream, err := c.TransactionsStateStream(ctx, req) - require.NoError(t, err) - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - require.NoError(t, events.ReportNewTx(0, globalTx)) - res, err := stream.Recv() - require.NoError(t, err) - require.Nil(t, res.Transaction) - require.Equal(t, globalTx.ID.Bytes(), res.TransactionState.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionState.State) - }}, - {"TransactionsStateStream_All", func(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - req := &pb.TransactionsStateStreamRequest{} - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - req.IncludeTransactions = true - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - stream, err := c.TransactionsStateStream(ctx, req) - require.NoError(t, err) - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - require.NoError(t, events.ReportNewTx(0, globalTx)) - - // Verify - res, err := stream.Recv() - require.NoError(t, err) - require.Equal(t, globalTx.ID.Bytes(), res.TransactionState.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionState.State) - checkTransaction(t, res.Transaction) - }}, - // Submit a tx, then receive it over the stream - {"TransactionsState_SubmitThenStream", func(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - // Remove the tx from the mesh so it only appears in the mempool - delete(conStateAPI.returnTx, globalTx.ID) - defer func() { conStateAPI.returnTx[globalTx.ID] = globalTx }() - - // STREAM - // Open the stream first and listen for new transactions - req := &pb.TransactionsStateStreamRequest{} - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - req.IncludeTransactions = true - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Simulate the process by which a newly-broadcast tx lands in the mempool - broadcastSignal := make(chan struct{}) - var eg errgroup.Group - - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-broadcastSignal: - // We assume the data is valid here, and put it directly into the txpool - conStateAPI.put(globalTx.ID, globalTx) - return nil - } - }) - - stream, err := c.TransactionsStateStream(ctx, req) - require.NoError(t, err) - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{ - Transaction: globalTx.Raw, - }) - require.NoError(t, err) - require.Equal(t, int32(code.Code_OK), res.Status.Code) - require.Equal(t, globalTx.ID.Bytes(), res.Txstate.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, res.Txstate.State) - close(broadcastSignal) - require.NoError(t, eg.Wait()) - - response, err := stream.Recv() - require.NoError(t, err) - require.Equal(t, globalTx.ID.Bytes(), response.TransactionState.Id.Id) - // We expect the tx to go to the mempool - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, response.TransactionState.State) - checkTransaction(t, response.Transaction) - }}, - {"TransactionsStateStream_ManySubscribers", func(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - req := &pb.TransactionsStateStreamRequest{} - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - req.IncludeTransactions = true - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - const subscriberCount = 10 - streams := make([]pb.TransactionService_TransactionsStateStreamClient, 0, subscriberCount) - for range subscriberCount { - stream, err := c.TransactionsStateStream(ctx, req) - require.NoError(t, err) - streams = append(streams, stream) - } - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - require.NoError(t, events.ReportNewTx(0, globalTx)) - - for _, stream := range streams { - res, err := stream.Recv() - require.NoError(t, err) - require.Equal(t, globalTx.ID.Bytes(), res.TransactionState.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionState.State) - checkTransaction(t, res.Transaction) - } - }}, - {"TransactionsStateStream_NoEventReceiving", func(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - req := &pb.TransactionsStateStreamRequest{} - req.TransactionId = append(req.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - req.IncludeTransactions = true - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - stream, err := c.TransactionsStateStream(ctx, req) - require.NoError(t, err) - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - for range subscriptionChanBufSize * 2 { - require.NoError(t, events.ReportNewTx(0, globalTx)) - } - - for range subscriptionChanBufSize { - _, err := stream.Recv() - if err != nil { - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, st.Message(), errTxBufferFull) - } - } - }}, - } - - // Run subtests - for _, tc := range testCases { - t.Run(tc.name, tc.run) - } -} - -func checkTransaction(tb testing.TB, tx *pb.Transaction) { - require.Equal(tb, globalTx.ID.Bytes(), tx.Id) - require.Equal(tb, globalTx.Principal.String(), tx.Principal.Address) - require.Equal(tb, globalTx.GasPrice, tx.GasPrice) - require.Equal(tb, globalTx.MaxGas, tx.MaxGas) - require.Equal(tb, globalTx.MaxSpend, tx.MaxSpend) - require.Equal(tb, globalTx.Nonce, tx.Nonce.Counter) -} - -func checkLayer(tb testing.TB, l *pb.Layer) { - require.Equal(tb, uint32(0), l.Number.Number, "first layer is zero") - require.Equal(tb, pb.Layer_LAYER_STATUS_CONFIRMED, l.Status, "first layer is confirmed") - - require.Empty(tb, l.Activations, "unexpected number of activations in layer") - require.Len(tb, l.Blocks, 1, "unexpected number of blocks in layer") - require.Equal(tb, stateRoot.Bytes(), l.RootStateHash, "unexpected state root") - - resBlock := l.Blocks[0] - - resTxIDs := make([]types.TransactionID, 0, len(resBlock.Transactions)) - for _, tx := range resBlock.Transactions { - resTxIDs = append(resTxIDs, types.TransactionID(types.BytesToHash(tx.Id))) - } - require.ElementsMatch(tb, block1.TxIDs, resTxIDs) - require.Equal(tb, types.Hash20(block1.ID()).Bytes(), resBlock.Id) - - // Check the tx as well - resTx := resBlock.Transactions[0] - require.Equal(tb, globalTx.ID.Bytes(), resTx.Id) - require.Equal(tb, globalTx.Principal.String(), resTx.Principal.Address) - require.Equal(tb, globalTx.GasPrice, resTx.GasPrice) - require.Equal(tb, globalTx.MaxGas, resTx.MaxGas) - require.Equal(tb, globalTx.MaxSpend, resTx.MaxSpend) - require.Equal(tb, globalTx.Nonce, resTx.Nonce.Counter) -} - -func TestAccountMeshDataStream_comprehensive(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - ctrl := gomock.NewController(t) - genTime := NewMockgenesisTimeAPI(ctrl) - cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) - t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) - grpcService := NewMeshService( - cdb, - meshAPIMock, - conStateAPI, - genTime, - layersPerEpoch, - types.Hash20{}, - layerDuration, - layerAvgSize, - txsPerProposal, - ) - cfg, cleanup := launchServer(t, grpcService) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - c := pb.NewMeshServiceClient(conn) - - // set up the grpc listener stream - req := &pb.AccountMeshDataStreamRequest{ - Filter: &pb.AccountMeshDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountMeshDataFlags: uint32( - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | - pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), - }, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - stream, err := c.AccountMeshDataStream(ctx, req) - require.NoError(t, err, "stream request returned unexpected error") - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - // publish a tx - require.NoError(t, events.ReportNewTx(0, globalTx)) - res, err := stream.Recv() - require.NoError(t, err, "got error from stream") - checkAccountMeshDataItemTx(t, res.Datum.Datum) - - // test streaming a tx and an atx that are filtered out - // these should not be received - require.NoError(t, events.ReportNewTx(0, globalTx2)) - require.NoError(t, events.ReportNewActivation(globalAtx2)) - - _, err = stream.Recv() - require.Error(t, err) - require.Contains(t, []codes.Code{codes.Unknown, codes.DeadlineExceeded}, status.Convert(err).Code()) -} - -func TestAccountDataStream_comprehensive(t *testing.T) { - if testing.Short() { - t.Skip() - } - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - svc := NewGlobalStateService(meshAPIMock, conStateAPI) - cfg, cleanup := launchServer(t, svc) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - c := pb.NewGlobalStateServiceClient(conn) - - // set up the grpc listener stream - req := &pb.AccountDataStreamRequest{ - Filter: &pb.AccountDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountDataFlags: uint32( - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD | - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT | - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_TRANSACTION_RECEIPT), - }, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - stream, err := c.AccountDataStream(ctx, req) - require.NoError(t, err, "stream request returned unexpected error") - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - require.NoError(t, events.ReportRewardReceived(types.Reward{ - Layer: layerFirst, - TotalReward: rewardAmount, - LayerReward: rewardAmount * 2, - Coinbase: addr1, - SmesherID: rewardSmesherID, - })) - - res, err := stream.Recv() - require.NoError(t, err) - checkAccountDataItemReward(t, res.Datum.Datum) - - // publish an account data update - require.NoError(t, events.ReportAccountUpdate(addr1)) - - res, err = stream.Recv() - require.NoError(t, err) - checkAccountDataItemAccount(t, res.Datum.Datum) - - // test streaming a reward and account update that should be filtered out - // these should not be received - require.NoError(t, events.ReportAccountUpdate(addr2)) - require.NoError(t, events.ReportRewardReceived(types.Reward{Coinbase: addr2})) - - _, err = stream.Recv() - require.Error(t, err) -} - -func TestGlobalStateStream_comprehensive(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - svc := NewGlobalStateService(meshAPIMock, conStateAPI) - cfg, cleanup := launchServer(t, svc) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - c := pb.NewGlobalStateServiceClient(conn) - - // set up the grpc listener stream - req := &pb.GlobalStateStreamRequest{ - GlobalStateDataFlags: uint32( - pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_ACCOUNT | - pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_GLOBAL_STATE_HASH | - pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_REWARD), - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - stream, err := c.GlobalStateStream(ctx, req) - require.NoError(t, err, "stream request returned unexpected error") - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - // publish a reward - require.NoError(t, events.ReportRewardReceived(types.Reward{ - Layer: layerFirst, - TotalReward: rewardAmount, - LayerReward: rewardAmount * 2, - Coinbase: addr1, - SmesherID: rewardSmesherID, - })) - res, err := stream.Recv() - require.NoError(t, err, "got error from stream") - checkGlobalStateDataReward(t, res.Datum.Datum) - - // publish an account data update - require.NoError(t, events.ReportAccountUpdate(addr1)) - res, err = stream.Recv() - require.NoError(t, err, "got error from stream") - checkGlobalStateDataAccountWrapper(t, res.Datum.Datum) - - // publish a new layer - layer, err := meshAPIMock.GetLayer(layerFirst) - require.NoError(t, err) - - require.NoError(t, events.ReportLayerUpdate(events.LayerUpdate{ - LayerID: layer.Index(), - Status: events.LayerStatusTypeApplied, - })) - res, err = stream.Recv() - require.NoError(t, err, "got error from stream") - checkGlobalStateDataGlobalState(t, res.Datum.Datum) -} - -func TestLayerStream_comprehensive(t *testing.T) { - if testing.Short() { - t.Skip() - } - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - ctrl := gomock.NewController(t) - genTime := NewMockgenesisTimeAPI(ctrl) - cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) - t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) - - grpcService := NewMeshService( - cdb, - meshAPIMock, - conStateAPI, - genTime, - layersPerEpoch, - types.Hash20{}, - layerDuration, - layerAvgSize, - txsPerProposal, - ) - cfg, cleanup := launchServer(t, grpcService) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - - // set up the grpc listener stream - c := pb.NewMeshServiceClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - stream, err := c.LayerStream(ctx, &pb.LayerStreamRequest{}) - require.NoError(t, err, "stream request returned unexpected error") - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - layer, err := meshAPIMock.GetLayer(layerFirst) - require.NoError(t, err) - - // Act - require.NoError(t, events.ReportLayerUpdate(events.LayerUpdate{ - LayerID: layer.Index(), - Status: events.LayerStatusTypeConfirmed, - })) - - // Verify - res, err := stream.Recv() - require.NoError(t, err, "got error from stream") - require.Equal(t, uint32(0), res.Layer.Number.Number) - require.Equal(t, events.LayerStatusTypeConfirmed, int(res.Layer.Status)) - require.NotEmpty(t, res.Layer.Hash) - checkLayer(t, res.Layer) -} - -func checkAccountDataQueryItemAccount(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.AccountData_AccountWrapper{}, dataItem) - x := dataItem.(*pb.AccountData_AccountWrapper) - // Check the account, nonce, and balance - require.Equal(tb, addr1.String(), x.AccountWrapper.AccountId.Address, - "inner account has bad address") - require.Equal(tb, uint64(accountCounter), x.AccountWrapper.StateCurrent.Counter, - "inner account has bad current counter") - require.Equal(tb, uint64(accountBalance), x.AccountWrapper.StateCurrent.Balance.Value, - "inner account has bad current balance") - require.Equal(tb, uint64(accountCounter+1), x.AccountWrapper.StateProjected.Counter, - "inner account has bad projected counter") - require.Equal(tb, uint64(accountBalance+1), x.AccountWrapper.StateProjected.Balance.Value, - "inner account has bad projected balance") -} - -func checkAccountDataQueryItemReward(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.AccountData_Reward{}, dataItem) - x := dataItem.(*pb.AccountData_Reward) - require.Equal(tb, layerFirst.Uint32(), x.Reward.Layer.Number) - require.Equal(tb, uint64(rewardAmount), x.Reward.Total.Value) - require.Equal(tb, uint64(rewardAmount), x.Reward.LayerReward.Value) - require.Equal(tb, addr1.String(), x.Reward.Coinbase.Address) - require.Equal(tb, rewardSmesherID.Bytes(), x.Reward.Smesher.Id) -} - -func checkAccountMeshDataItemTx(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.AccountMeshData_MeshTransaction{}, dataItem) - x := dataItem.(*pb.AccountMeshData_MeshTransaction) - // Check the sender - require.Equal(tb, globalTx.Principal.String(), x.MeshTransaction.Transaction.Principal.Address) -} - -func checkAccountDataItemReward(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.AccountData_Reward{}, dataItem) - x := dataItem.(*pb.AccountData_Reward) - require.Equal(tb, uint64(rewardAmount), x.Reward.Total.Value) - require.Equal(tb, layerFirst.Uint32(), x.Reward.Layer.Number) - require.Equal(tb, uint64(rewardAmount*2), x.Reward.LayerReward.Value) - require.Equal(tb, addr1.String(), x.Reward.Coinbase.Address) - require.Equal(tb, rewardSmesherID.Bytes(), x.Reward.Smesher.Id) -} - -func checkAccountDataItemAccount(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.AccountData_AccountWrapper{}, dataItem) - x := dataItem.(*pb.AccountData_AccountWrapper) - require.Equal(tb, addr1.String(), x.AccountWrapper.AccountId.Address) - require.Equal(tb, uint64(accountBalance), x.AccountWrapper.StateCurrent.Balance.Value) - require.Equal(tb, uint64(accountCounter), x.AccountWrapper.StateCurrent.Counter) - require.Equal(tb, uint64(accountBalance+1), x.AccountWrapper.StateProjected.Balance.Value) - require.Equal(tb, uint64(accountCounter+1), x.AccountWrapper.StateProjected.Counter) -} - -func checkGlobalStateDataReward(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.GlobalStateData_Reward{}, dataItem) - x := dataItem.(*pb.GlobalStateData_Reward) - require.Equal(tb, uint64(rewardAmount), x.Reward.Total.Value) - require.Equal(tb, layerFirst.Uint32(), x.Reward.Layer.Number) - require.Equal(tb, uint64(rewardAmount*2), x.Reward.LayerReward.Value) - require.Equal(tb, addr1.String(), x.Reward.Coinbase.Address) - require.Equal(tb, rewardSmesherID.Bytes(), x.Reward.Smesher.Id) -} - -func checkGlobalStateDataAccountWrapper(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.GlobalStateData_AccountWrapper{}, dataItem) - x := dataItem.(*pb.GlobalStateData_AccountWrapper) - require.Equal(tb, addr1.String(), x.AccountWrapper.AccountId.Address) - require.Equal(tb, uint64(accountBalance), x.AccountWrapper.StateCurrent.Balance.Value) - require.Equal(tb, uint64(accountCounter), x.AccountWrapper.StateCurrent.Counter) - require.Equal(tb, uint64(accountBalance+1), x.AccountWrapper.StateProjected.Balance.Value) - require.Equal(tb, uint64(accountCounter+1), x.AccountWrapper.StateProjected.Counter) -} - -func checkGlobalStateDataGlobalState(tb testing.TB, dataItem any) { - tb.Helper() - require.IsType(tb, &pb.GlobalStateData_GlobalState{}, dataItem) - x := dataItem.(*pb.GlobalStateData_GlobalState) - require.Equal(tb, layerFirst.Uint32(), x.GlobalState.Layer.Number) - require.Equal(tb, stateRoot.Bytes(), x.GlobalState.RootHash) -} - -func TestMultiService(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - ctrl, ctx := gomock.WithContext(ctx, t) - syncer := NewMocksyncer(ctrl) - syncer.EXPECT().IsSynced(gomock.Any()).Return(false).AnyTimes() - peerCounter := NewMockpeerCounter(ctrl) - genTime := NewMockgenesisTimeAPI(ctrl) - genesis := time.Unix(genTimeUnix, 0) - genTime.EXPECT().GenesisTime().Return(genesis) - - cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) - t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) - svc1 := NewNodeService(peerCounter, meshAPIMock, genTime, syncer, "v0.0.0", "cafebabe") - svc2 := NewMeshService( - cdb, - meshAPIMock, - conStateAPI, - genTime, - layersPerEpoch, - types.Hash20{}, - layerDuration, - layerAvgSize, - txsPerProposal, - ) - cfg, shutDown := launchServer(t, svc1, svc2) - t.Cleanup(shutDown) - - c1 := pb.NewNodeServiceClient(dialGrpc(t, cfg)) - c2 := pb.NewMeshServiceClient(dialGrpc(t, cfg)) - - // call endpoints and validate results - const message = "Hello World" - res1, err1 := c1.Echo(ctx, &pb.EchoRequest{ - Msg: &pb.SimpleString{Value: message}, - }) - require.NoError(t, err1) - require.Equal(t, message, res1.Msg.Value) - res2, err2 := c2.GenesisTime(ctx, &pb.GenesisTimeRequest{}) - require.NoError(t, err2) - require.Equal(t, uint64(genesis.Unix()), res2.Unixtime.Value) - - // Make sure that shutting down the grpc service shuts them both down - shutDown() - - // Make sure NodeService is off - _, err1 = c1.Echo(ctx, &pb.EchoRequest{ - Msg: &pb.SimpleString{Value: message}, - }) - require.Equal(t, codes.Unavailable, status.Code(err1)) - - // Make sure MeshService is off - _, err2 = c2.GenesisTime(ctx, &pb.GenesisTimeRequest{}) - require.Equal(t, codes.Unavailable, status.Code(err2)) -} - -func TestDebugService(t *testing.T) { - ctrl := gomock.NewController(t) - netInfo := NewMocknetworkInfo(ctrl) - mOracle := NewMockoracle(ctrl) - db := statesql.InMemoryTest(t) - - testLog := zap.NewAtomicLevel() - loggers := map[string]*zap.AtomicLevel{ - "test": &testLog, - } - - svc := NewDebugService(db, conStateAPI, netInfo, mOracle, loggers) - cfg, cleanup := launchServer(t, svc) - t.Cleanup(cleanup) - - conn := dialGrpc(t, cfg) - c := pb.NewDebugServiceClient(conn) - - t.Run("Accounts", func(t *testing.T) { - res, err := c.Accounts(context.Background(), &pb.AccountsRequest{}) - require.NoError(t, err) - require.Len(t, res.AccountWrapper, 2) - - // Get the list of addresses and compare them regardless of order - var addresses []string - for _, a := range res.AccountWrapper { - addresses = append(addresses, a.AccountId.Address) - } - require.Contains(t, addresses, globalTx.Principal.String()) - require.Contains(t, addresses, addr1.String()) - }) - - t.Run("Accounts at layer", func(t *testing.T) { - lid := types.LayerID(11) - for address, balance := range conStateAPI.balances { - accounts.Update(db, &types.Account{ - Address: address, - Balance: balance.Uint64(), - NextNonce: conStateAPI.nonces[address], - Layer: lid, - }) - } - res, err := c.Accounts(context.Background(), &pb.AccountsRequest{Layer: lid.Uint32()}) - require.NoError(t, err) - require.Len(t, res.AccountWrapper, 2) - - // Get the list of addresses and compare them regardless of order - var addresses []string - for _, a := range res.AccountWrapper { - addresses = append(addresses, a.AccountId.Address) - } - require.Contains(t, addresses, globalTx.Principal.String()) - require.Contains(t, addresses, addr1.String()) - - _, err = c.Accounts(context.Background(), &pb.AccountsRequest{Layer: lid.Uint32() - 1}) - require.Error(t, err) - }) - - t.Run("networkID", func(t *testing.T) { - id := p2p.Peer("test") - netInfo.EXPECT().ID().Return(id) - netInfo.EXPECT().ListenAddresses().Return([]ma.Multiaddr{ - mustParseMultiaddr("/ip4/0.0.0.0/tcp/5000"), - mustParseMultiaddr("/ip4/0.0.0.0/udp/5001/quic-v1"), - }) - netInfo.EXPECT().KnownAddresses().Return([]ma.Multiaddr{ - mustParseMultiaddr("/ip4/10.36.0.221/tcp/5000"), - mustParseMultiaddr("/ip4/10.36.0.221/udp/5001/quic-v1"), - }) - netInfo.EXPECT().NATDeviceType().Return(network.NATDeviceTypeCone, network.NATDeviceTypeSymmetric) - netInfo.EXPECT().Reachability().Return(network.ReachabilityPrivate) - netInfo.EXPECT().DHTServerEnabled().Return(true) - peerInfo := peerinfomocks.NewMockPeerInfo(ctrl) - peerInfo.EXPECT().Protocols().Return([]protocol.ID{"foo"}) - peerInfo.EXPECT().EnsureProtoStats(protocol.ID("foo")). - DoAndReturn(func(protocol.ID) *peerinfo.DataStats { - var ds peerinfo.DataStats - ds.RecordReceived(6000) - ds.RecordSent(3000) - ds.Tick(1) - ds.Tick(2) - return &ds - }) - netInfo.EXPECT().PeerInfo().Return(peerInfo).AnyTimes() - - response, err := c.NetworkInfo(context.Background(), &emptypb.Empty{}) - require.NoError(t, err) - require.NotNil(t, response) - require.Equal(t, id.String(), response.Id) - require.Equal(t, []string{"/ip4/0.0.0.0/tcp/5000", "/ip4/0.0.0.0/udp/5001/quic-v1"}, - response.ListenAddresses) - require.Equal(t, []string{"/ip4/10.36.0.221/tcp/5000", "/ip4/10.36.0.221/udp/5001/quic-v1"}, - response.KnownAddresses) - require.Equal(t, pb.NetworkInfoResponse_Cone, response.NatTypeUdp) - require.Equal(t, pb.NetworkInfoResponse_Symmetric, response.NatTypeTcp) - require.Equal(t, pb.NetworkInfoResponse_Private, response.Reachability) - require.True(t, response.DhtServerEnabled) - require.Equal(t, map[string]*pb.DataStats{ - "foo": { - BytesSent: 3000, - BytesReceived: 6000, - SendRate: []uint64{300, 10}, - RecvRate: []uint64{600, 20}, - }, - }, response.Stats) - }) - - t.Run("ActiveSet", func(t *testing.T) { - epoch := types.EpochID(3) - activeSet := types.RandomActiveSet(11) - mOracle.EXPECT().ActiveSet(gomock.Any(), epoch).Return(activeSet, nil) - res, err := c.ActiveSet(context.Background(), &pb.ActiveSetRequest{ - Epoch: epoch.Uint32(), - }) - require.NoError(t, err) - require.Equal(t, len(activeSet), len(res.GetIds())) - - var ids []types.ATXID - for _, a := range res.GetIds() { - ids = append(ids, types.ATXID(types.BytesToHash(a.GetId()))) - } - require.ElementsMatch(t, activeSet, ids) - }) - t.Run("ProposalsStream", func(t *testing.T) { - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - stream, err := c.ProposalsStream(ctx, &emptypb.Empty{}) - require.NoError(t, err) - - _, err = stream.Header() - require.NoError(t, err) - events.ReportProposal(events.ProposalCreated, &types.Proposal{}) - events.ReportProposal(events.ProposalIncluded, &types.Proposal{}) - - msg, err := stream.Recv() - require.NoError(t, err) - require.Equal(t, pb.Proposal_Created, msg.Status) - - msg, err = stream.Recv() - require.NoError(t, err) - require.Equal(t, pb.Proposal_Included, msg.Status) - }) - - t.Run("ChangeLogLevel module debug", func(t *testing.T) { - _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ - Module: "test", - Level: "DEBUG", - }) - require.NoError(t, err) - - require.Equal(t, zapcore.DebugLevel, testLog.Level()) - }) - - t.Run("ChangeLogLevel module not found", func(t *testing.T) { - _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ - Module: "unknown-module", - Level: "DEBUG", - }) - require.Error(t, err) - - s, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, "cannot find logger unknown-module", s.Message()) - }) - - t.Run("ChangeLogLevel unknown level", func(t *testing.T) { - _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ - Module: "test", - Level: "unknown-level", - }) - require.Error(t, err) - - s, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, "parse level: unrecognized level: \"unknown-level\"", s.Message()) - }) - - t.Run("ChangeLogLevel '*' to debug", func(t *testing.T) { - _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ - Module: "*", - Level: "DEBUG", - }) - require.NoError(t, err) - - require.Equal(t, zapcore.DebugLevel, testLog.Level()) - }) + return l.Addr().(*net.TCPAddr).Port } -func TestEventsReceived(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - txService := NewTransactionService(statesql.InMemoryTest(t), nil, meshAPIMock, conStateAPI, nil, nil) - gsService := NewGlobalStateService(meshAPIMock, conStateAPI) - cfg, cleanup := launchServer(t, txService, gsService) - t.Cleanup(cleanup) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - conn1 := dialGrpc(t, cfg) - conn2 := dialGrpc(t, cfg) - - txClient := pb.NewTransactionServiceClient(conn1) - accountClient := pb.NewGlobalStateServiceClient(conn2) - - txReq := &pb.TransactionsStateStreamRequest{} - txReq.TransactionId = append(txReq.TransactionId, &pb.TransactionId{ - Id: globalTx.ID.Bytes(), - }) - - principalReq := &pb.AccountDataStreamRequest{ - Filter: &pb.AccountDataFilter{ - AccountId: &pb.AccountId{Address: addr1.String()}, - AccountDataFlags: uint32( - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT | - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_TRANSACTION_RECEIPT), - }, - } - - receiverReq := &pb.AccountDataStreamRequest{ - Filter: &pb.AccountDataFilter{ - AccountId: &pb.AccountId{Address: addr2.String()}, - AccountDataFlags: uint32( - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT | - pb.AccountDataFlag_ACCOUNT_DATA_FLAG_TRANSACTION_RECEIPT), - }, - } - - txStream, err := txClient.TransactionsStateStream(ctx, txReq) - require.NoError(t, err) - - principalStream, err := accountClient.AccountDataStream(ctx, principalReq) - require.NoError(t, err, "stream request returned unexpected error") - - receiverStream, err := accountClient.AccountDataStream(ctx, receiverReq) - require.NoError(t, err, "receiver stream") - - // Give the server-side time to subscribe to events - time.Sleep(time.Millisecond * 50) - - lg := zaptest.NewLogger(t) - db := statesql.InMemoryTest(t) - svm := vm.New(db, vm.WithLogger(lg)) - conState := txs.NewConservativeState(svm, db, txs.WithLogger(lg.Named("conState"))) - conState.AddToCache(context.Background(), globalTx, time.Now()) - - weight := new(big.Rat).SetFloat64(18.7) - require.NoError(t, err) - rewards := []types.CoinbaseReward{ - {Coinbase: addr2, Weight: types.RatNum{Num: weight.Num().Uint64(), Denom: weight.Denom().Uint64()}}, - } - svm.Apply(types.GetEffectiveGenesis(), []types.Transaction{*globalTx}, rewards) - - txRes, err := txStream.Recv() - require.NoError(t, err) - require.Nil(t, txRes.Transaction) - require.Equal(t, globalTx.ID.Bytes(), txRes.TransactionState.Id.Id) - require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, txRes.TransactionState.State) +func TestNewServersConfig(t *testing.T) { + port1 := getFreePort(t) + port2 := getFreePort(t) - acc1Res, err := principalStream.Recv() - require.NoError(t, err) - require.Equal( - t, - addr1.String(), - acc1Res.Datum.Datum.(*pb.AccountData_AccountWrapper).AccountWrapper.AccountId.Address, + grpcService := New( + fmt.Sprintf(":%d", port1), + zaptest.NewLogger(t).Named("grpc"), + DefaultTestConfig(t), ) - - receiverRes, err := receiverStream.Recv() - require.NoError(t, err) - require.Equal( - t, - addr2.String(), - receiverRes.Datum.Datum.(*pb.AccountData_AccountWrapper).AccountWrapper.AccountId.Address, + jsonService := NewJSONHTTPServer( + zaptest.NewLogger(t).Named("grpc.JSON"), + fmt.Sprintf(":%d", port2), + []string{}, + false, ) -} - -func TestTransactionsRewards(t *testing.T) { - req := require.New(t) - events.CloseEventReporter() - events.InitializeReporter() - t.Cleanup(events.CloseEventReporter) - - cfg, cleanup := launchServer(t, NewGlobalStateService(meshAPIMock, conStateAPI)) - t.Cleanup(cleanup) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - t.Cleanup(cancel) - client := pb.NewGlobalStateServiceClient(dialGrpc(t, cfg)) - - address := wallet.Address(types.RandomNodeID().Bytes()) - weight := new(big.Rat).SetFloat64(18.7) - rewards := []types.CoinbaseReward{{Coinbase: address, Weight: types.RatNumFromBigRat(weight)}} - t.Run("Get rewards from AccountDataStream", func(t *testing.T) { - t.Parallel() - request := &pb.AccountDataStreamRequest{ - Filter: &pb.AccountDataFilter{ - AccountId: &pb.AccountId{Address: address.String()}, - AccountDataFlags: uint32(pb.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD), - }, - } - stream, err := client.AccountDataStream(ctx, request) - req.NoError(err, "stream request returned unexpected error") - time.Sleep(50 * time.Millisecond) - - svm := vm.New(statesql.InMemoryTest(t), vm.WithLogger(zaptest.NewLogger(t))) - _, _, err = svm.Apply(types.LayerID(17), []types.Transaction{*globalTx}, rewards) - req.NoError(err) - - data, err := stream.Recv() - req.NoError(err) - req.IsType(&pb.AccountData_Reward{}, data.Datum.Datum) - reward := data.Datum.GetReward() - req.Equal(address.String(), reward.Coinbase.Address) - req.EqualValues(17, reward.Layer.GetNumber()) - }) - t.Run("Get rewards from GlobalStateStream", func(t *testing.T) { - t.Parallel() - request := &pb.GlobalStateStreamRequest{ - GlobalStateDataFlags: uint32(pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_REWARD), - } - stream, err := client.GlobalStateStream(ctx, request) - req.NoError(err, "stream request returned unexpected error") - time.Sleep(50 * time.Millisecond) - - svm := vm.New(statesql.InMemoryTest(t), vm.WithLogger(zaptest.NewLogger(t))) - _, _, err = svm.Apply(types.LayerID(17), []types.Transaction{*globalTx}, rewards) - req.NoError(err) - - data, err := stream.Recv() - req.NoError(err) - req.IsType(&pb.GlobalStateData_Reward{}, data.Datum.Datum) - reward := data.Datum.GetReward() - req.Equal(address.String(), reward.Coinbase.Address) - req.EqualValues(17, reward.Layer.GetNumber()) - }) -} - -func TestVMAccountUpdates(t *testing.T) { - events.CloseEventReporter() - events.InitializeReporter() - - // in memory database doesn't allow reads while writer locked db - db, err := statesql.Open("file:" + filepath.Join(t.TempDir(), "test.sql")) - require.NoError(t, err) - t.Cleanup(func() { db.Close() }) - svm := vm.New(db, vm.WithLogger(zaptest.NewLogger(t))) - cfg, cleanup := launchServer(t, NewGlobalStateService(nil, txs.NewConservativeState(svm, db))) - t.Cleanup(cleanup) - - keys := make([]*signing.EdSigner, 10) - accounts := make([]types.Account, len(keys)) - const initial = 100_000_000 - for i := range keys { - signer, err := signing.NewEdSigner() - require.NoError(t, err) - keys[i] = signer - accounts[i] = types.Account{ - Address: wallet.Address(signer.NodeID().Bytes()), - Balance: initial, - } - } - require.NoError(t, svm.ApplyGenesis(accounts)) - spawns := []types.Transaction{} - for _, key := range keys { - spawns = append(spawns, types.Transaction{ - RawTx: types.NewRawTx(wallet.SelfSpawn(key.PrivateKey(), 0)), - }) - } - lid := types.GetEffectiveGenesis().Add(1) - _, _, err = svm.Apply(lid, spawns, nil) - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - t.Cleanup(cancel) - client := pb.NewGlobalStateServiceClient(dialGrpc(t, cfg)) - eg, ctx := errgroup.WithContext(ctx) - states := make(chan *pb.AccountState, len(accounts)) - for _, account := range accounts { - stream, err := client.AccountDataStream(ctx, &pb.AccountDataStreamRequest{ - Filter: &pb.AccountDataFilter{ - AccountId: &pb.AccountId{Address: account.Address.String()}, - AccountDataFlags: uint32(pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT), - }, - }) - require.NoError(t, err) - _, err = stream.Header() - require.NoError(t, err) - eg.Go(func() error { - response, err := stream.Recv() - if err != nil { - return err - } - states <- response.Datum.GetAccountWrapper().StateCurrent - return nil - }) - } - - spends := []types.Transaction{} - const amount = 100_000 - for _, key := range keys { - spends = append(spends, types.Transaction{ - RawTx: types.NewRawTx(wallet.Spend( - key.PrivateKey(), types.Address{1}, amount, 1, - )), - }) - } - _, _, err = svm.Apply(lid.Add(1), spends, nil) - require.NoError(t, err) - require.NoError(t, eg.Wait()) - close(states) - i := 0 - for state := range states { - i++ - require.Equal(t, 2, int(state.Counter)) - require.Less(t, int(state.Balance.Value), initial-amount) - } - require.Equal(t, len(accounts), i) -} - -func createAtxs(tb testing.TB, epoch types.EpochID, atxids []types.ATXID) []*types.ActivationTx { - all := make([]*types.ActivationTx, 0, len(atxids)) - for _, id := range atxids { - atx := &types.ActivationTx{ - PublishEpoch: epoch, - NumUnits: 1, - TickCount: 1, - SmesherID: types.RandomNodeID(), - } - atx.SetID(id) - atx.SetReceived(time.Now()) - all = append(all, atx) - } - return all -} - -func TestMeshService_EpochStream(t *testing.T) { - ctrl := gomock.NewController(t) - genTime := NewMockgenesisTimeAPI(ctrl) - db := statesql.InMemoryTest(t) - - cdb := datastore.NewCachedDB(db, zaptest.NewLogger(t)) - t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) - srv := NewMeshService( - cdb, - meshAPIMock, - conStateAPI, - genTime, - layersPerEpoch, - types.Hash20{}, - layerDuration, - layerAvgSize, - txsPerProposal, - ) - cfg, cleanup := launchServer(t, srv) - t.Cleanup(cleanup) - - epoch := types.EpochID(3) - atxids := types.RandomActiveSet(100) - all := createAtxs(t, epoch, atxids) - var expected, got []types.ATXID - for i, vatx := range all { - require.NoError(t, atxs.Add(db, vatx, types.AtxBlob{})) - if i%2 == 0 { - require.NoError(t, identities.SetMalicious(db, vatx.SmesherID, []byte("bad"), time.Now())) - } else { - expected = append(expected, vatx.ID()) - } - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - conn := dialGrpc(t, cfg) - client := pb.NewMeshServiceClient(conn) - - stream, err := client.EpochStream(ctx, &pb.EpochStreamRequest{Epoch: epoch.Uint32()}) - require.NoError(t, err) - for { - resp, err := stream.Recv() - if errors.Is(err, io.EOF) { - break - } - got = append(got, types.ATXID(types.BytesToHash(resp.GetId().GetId()))) - } - require.ElementsMatch(t, expected, got) -} - -func mustParseMultiaddr(s string) ma.Multiaddr { - maddr, err := ma.NewMultiaddr(s) - if err != nil { - panic("can't parse multiaddr: " + err.Error()) - } - return maddr + require.Contains(t, grpcService.listener, strconv.Itoa(port1), "Expected same port") + require.Contains(t, jsonService.listener, strconv.Itoa(port2), "Expected same port") } diff --git a/api/grpcserver/http_server_test.go b/api/grpcserver/http_server_test.go index cb25b37530..05cc531bff 100644 --- a/api/grpcserver/http_server_test.go +++ b/api/grpcserver/http_server_test.go @@ -16,6 +16,7 @@ import ( "go.uber.org/zap/zaptest" "google.golang.org/protobuf/encoding/protojson" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/datastore" "github.com/spacemeshos/go-spacemesh/sql/statesql" @@ -59,16 +60,16 @@ func TestJsonApi(t *testing.T) { const build = "cafebabe" ctrl, ctx := gomock.WithContext(context.Background(), t) - peerCounter := NewMockpeerCounter(ctrl) - meshAPIMock := NewMockmeshAPI(ctrl) - genTime := NewMockgenesisTimeAPI(ctrl) - syncer := NewMocksyncer(ctrl) - conStateAPI := NewMockconservativeState(ctrl) + peerCounter := v1.NewMockpeerCounter(ctrl) + meshAPIMock := v1.NewMockmeshAPI(ctrl) + genTime := v1.NewMockgenesisTimeAPI(ctrl) + syncer := v1.NewMocksyncer(ctrl) + conStateAPI := v1.NewMockconservativeState(ctrl) cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) - svc1 := NewNodeService(peerCounter, meshAPIMock, genTime, syncer, version, build) - svc2 := NewMeshService( + svc1 := v1.NewNodeService(peerCounter, meshAPIMock, genTime, syncer, version, build) + svc2 := v1.NewMeshService( cdb, meshAPIMock, conStateAPI, diff --git a/api/grpcserver/activation_service.go b/api/grpcserver/v1/activation_service.go similarity index 99% rename from api/grpcserver/activation_service.go rename to api/grpcserver/v1/activation_service.go index 08fbecb2df..a24de0e378 100644 --- a/api/grpcserver/activation_service.go +++ b/api/grpcserver/v1/activation_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/activation_service_test.go b/api/grpcserver/v1/activation_service_test.go similarity index 99% rename from api/grpcserver/activation_service_test.go rename to api/grpcserver/v1/activation_service_test.go index 6d65524c03..42d82ad51a 100644 --- a/api/grpcserver/activation_service_test.go +++ b/api/grpcserver/v1/activation_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/admin_service.go b/api/grpcserver/v1/admin_service.go similarity index 99% rename from api/grpcserver/admin_service.go rename to api/grpcserver/v1/admin_service.go index 69f04e7400..e3531d5969 100644 --- a/api/grpcserver/admin_service.go +++ b/api/grpcserver/v1/admin_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/admin_service_test.go b/api/grpcserver/v1/admin_service_test.go similarity index 99% rename from api/grpcserver/admin_service_test.go rename to api/grpcserver/v1/admin_service_test.go index b08b84a183..183e43f62f 100644 --- a/api/grpcserver/admin_service_test.go +++ b/api/grpcserver/v1/admin_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/debug_service.go b/api/grpcserver/v1/debug_service.go similarity index 99% rename from api/grpcserver/debug_service.go rename to api/grpcserver/v1/debug_service.go index 787734920d..507627b2b1 100644 --- a/api/grpcserver/debug_service.go +++ b/api/grpcserver/v1/debug_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/events.go b/api/grpcserver/v1/events.go similarity index 98% rename from api/grpcserver/events.go rename to api/grpcserver/v1/events.go index 3ac23caf73..26aa228059 100644 --- a/api/grpcserver/events.go +++ b/api/grpcserver/v1/events.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/globalstate_service.go b/api/grpcserver/v1/globalstate_service.go similarity index 99% rename from api/grpcserver/globalstate_service.go rename to api/grpcserver/v1/globalstate_service.go index d536c3e939..8dfad23ca2 100644 --- a/api/grpcserver/globalstate_service.go +++ b/api/grpcserver/v1/globalstate_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/globalstate_service_test.go b/api/grpcserver/v1/globalstate_service_test.go similarity index 80% rename from api/grpcserver/globalstate_service_test.go rename to api/grpcserver/v1/globalstate_service_test.go index 5fc36e493d..392df65b26 100644 --- a/api/grpcserver/globalstate_service_test.go +++ b/api/grpcserver/v1/globalstate_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" @@ -12,6 +12,8 @@ import ( "google.golang.org/grpc/status" "github.com/spacemeshos/go-spacemesh/common/types" + "github.com/spacemeshos/go-spacemesh/genvm/sdk/wallet" + "github.com/spacemeshos/go-spacemesh/signing" ) type globalStateServiceConn struct { @@ -41,6 +43,11 @@ func setupGlobalStateService(tb testing.TB) (*globalStateServiceConn, context.Co } func TestGlobalStateService(t *testing.T) { + signer1, err := signing.NewEdSigner() + require.NoError(t, err) + + addr1 := wallet.Address(signer1.PublicKey().Bytes()) + t.Run("GlobalStateHash", func(t *testing.T) { t.Parallel() c, ctx := setupGlobalStateService(t) @@ -59,6 +66,11 @@ func TestGlobalStateService(t *testing.T) { t.Parallel() c, ctx := setupGlobalStateService(t) + const ( + accountBalance = 8675301 + accountCounter = 0 + ) + c.conStateAPI.EXPECT().GetBalance(addr1).Return(accountBalance, nil) c.conStateAPI.EXPECT().GetNonce(addr1).Return(accountCounter, nil) c.conStateAPI.EXPECT().GetProjection(addr1).Return(accountCounter+1, accountBalance+1) @@ -97,9 +109,15 @@ func TestGlobalStateService(t *testing.T) { t.Parallel() c, ctx := setupGlobalStateService(t) + const ( + accountBalance = 8675301 + accountCounter = 0 + rewardAmount = 5551234 + ) + c.meshAPI.EXPECT().GetRewardsByCoinbase(addr1).Return([]*types.Reward{ { - Layer: layerFirst, + Layer: types.LayerID(0), TotalReward: rewardAmount, LayerReward: rewardAmount, Coinbase: addr1, @@ -127,9 +145,15 @@ func TestGlobalStateService(t *testing.T) { t.Parallel() c, ctx := setupGlobalStateService(t) + const ( + accountBalance = 8675301 + accountCounter = 0 + rewardAmount = 5551234 + ) + c.meshAPI.EXPECT().GetRewardsByCoinbase(addr1).Return([]*types.Reward{ { - Layer: layerFirst, + Layer: types.LayerID(0), TotalReward: rewardAmount, LayerReward: rewardAmount, Coinbase: addr1, @@ -156,9 +180,16 @@ func TestGlobalStateService(t *testing.T) { t.Parallel() c, ctx := setupGlobalStateService(t) + const ( + accountBalance = 8675301 + accountCounter = 0 + rewardAmount = 5551234 + ) + rewardSmesherID := types.RandomNodeID() + c.meshAPI.EXPECT().GetRewardsByCoinbase(addr1).Return([]*types.Reward{ { - Layer: layerFirst, + Layer: types.LayerID(0), TotalReward: rewardAmount, LayerReward: rewardAmount, Coinbase: addr1, @@ -180,15 +211,29 @@ func TestGlobalStateService(t *testing.T) { require.NoError(t, err) require.Equal(t, uint32(2), res.TotalResults) require.Len(t, res.AccountItem, 1) - checkAccountDataQueryItemReward(t, res.AccountItem[0].Datum) + + require.IsType(t, &pb.AccountData_Reward{}, res.AccountItem[0].Datum) + x := res.AccountItem[0].Datum.(*pb.AccountData_Reward) + require.Equal(t, uint32(0), x.Reward.Layer.Number) + require.Equal(t, uint64(rewardAmount), x.Reward.Total.Value) + require.Equal(t, uint64(rewardAmount), x.Reward.LayerReward.Value) + require.Equal(t, addr1.String(), x.Reward.Coinbase.Address) + require.Equal(t, rewardSmesherID.Bytes(), x.Reward.Smesher.Id) }) t.Run("AccountDataQuery", func(t *testing.T) { t.Parallel() c, ctx := setupGlobalStateService(t) + const ( + accountBalance = 8675301 + accountCounter = 0 + rewardAmount = 5551234 + ) + rewardSmesherID := types.RandomNodeID() + c.meshAPI.EXPECT().GetRewardsByCoinbase(addr1).Return([]*types.Reward{ { - Layer: layerFirst, + Layer: types.LayerID(0), TotalReward: rewardAmount, LayerReward: rewardAmount, Coinbase: addr1, @@ -210,8 +255,27 @@ func TestGlobalStateService(t *testing.T) { require.Equal(t, uint32(2), res.TotalResults) require.Len(t, res.AccountItem, 2) - checkAccountDataQueryItemReward(t, res.AccountItem[0].Datum) - checkAccountDataQueryItemAccount(t, res.AccountItem[1].Datum) + require.IsType(t, &pb.AccountData_Reward{}, res.AccountItem[0].Datum) + rewardAccountData := res.AccountItem[0].Datum.(*pb.AccountData_Reward) + require.Equal(t, uint32(0), rewardAccountData.Reward.Layer.Number) + require.Equal(t, uint64(rewardAmount), rewardAccountData.Reward.Total.Value) + require.Equal(t, uint64(rewardAmount), rewardAccountData.Reward.LayerReward.Value) + require.Equal(t, addr1.String(), rewardAccountData.Reward.Coinbase.Address) + require.Equal(t, rewardSmesherID.Bytes(), rewardAccountData.Reward.Smesher.Id) + + require.IsType(t, &pb.AccountData_AccountWrapper{}, res.AccountItem[1].Datum) + accountDataWrapper := res.AccountItem[1].Datum.(*pb.AccountData_AccountWrapper) + // Check the account, nonce, and balance + require.Equal(t, addr1.String(), accountDataWrapper.AccountWrapper.AccountId.Address, + "inner account has bad address") + require.Equal(t, uint64(accountCounter), accountDataWrapper.AccountWrapper.StateCurrent.Counter, + "inner account has bad current counter") + require.Equal(t, uint64(accountBalance), accountDataWrapper.AccountWrapper.StateCurrent.Balance.Value, + "inner account has bad current balance") + require.Equal(t, uint64(accountCounter+1), accountDataWrapper.AccountWrapper.StateProjected.Counter, + "inner account has bad projected counter") + require.Equal(t, uint64(accountBalance+1), accountDataWrapper.AccountWrapper.StateProjected.Balance.Value, + "inner account has bad projected balance") }) t.Run("AccountDataStream_emptyAddress", func(t *testing.T) { diff --git a/api/grpcserver/v1/grpcserver_test.go b/api/grpcserver/v1/grpcserver_test.go new file mode 100644 index 0000000000..7e44fb758d --- /dev/null +++ b/api/grpcserver/v1/grpcserver_test.go @@ -0,0 +1,2425 @@ +package v1 + +import ( + "context" + "errors" + "io" + "log" + "math" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" + pb "github.com/spacemeshos/api/release/go/spacemesh/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest" + "go.uber.org/zap/zaptest/observer" + "golang.org/x/sync/errgroup" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/spacemeshos/go-spacemesh/activation" + "github.com/spacemeshos/go-spacemesh/api/grpcserver" + "github.com/spacemeshos/go-spacemesh/codec" + "github.com/spacemeshos/go-spacemesh/common/types" + "github.com/spacemeshos/go-spacemesh/datastore" + "github.com/spacemeshos/go-spacemesh/events" + vm "github.com/spacemeshos/go-spacemesh/genvm" + "github.com/spacemeshos/go-spacemesh/genvm/sdk" + "github.com/spacemeshos/go-spacemesh/genvm/sdk/wallet" + "github.com/spacemeshos/go-spacemesh/p2p" + "github.com/spacemeshos/go-spacemesh/p2p/peerinfo" + peerinfomocks "github.com/spacemeshos/go-spacemesh/p2p/peerinfo/mocks" + pubsubmocks "github.com/spacemeshos/go-spacemesh/p2p/pubsub/mocks" + "github.com/spacemeshos/go-spacemesh/signing" + "github.com/spacemeshos/go-spacemesh/sql/accounts" + "github.com/spacemeshos/go-spacemesh/sql/activesets" + "github.com/spacemeshos/go-spacemesh/sql/atxs" + "github.com/spacemeshos/go-spacemesh/sql/identities" + "github.com/spacemeshos/go-spacemesh/sql/statesql" + "github.com/spacemeshos/go-spacemesh/system" + "github.com/spacemeshos/go-spacemesh/txs" +) + +const ( + labelsPerUnit = 2048 + numUnits = 2 + genTimeUnix = 1000000 + layerDuration = 10 * time.Second + layerAvgSize = 10 + txsPerProposal = 99 + layersPerEpoch = uint32(5) + + accountBalance = 8675301 + accountCounter = 0 + rewardAmount = 5551234 +) + +var ( + txReturnLayer = types.LayerID(1) + layerFirst = types.LayerID(0) + layerVerified = types.LayerID(8) + layerLatest = types.LayerID(10) + layerCurrent = types.LayerID(12) + postGenesisEpoch = types.EpochID(2) + genesisID = types.Hash20{} + + addr1 types.Address + addr2 types.Address + rewardSmesherID = types.RandomNodeID() + globalAtx *types.ActivationTx + globalAtx2 *types.ActivationTx + globalTx *types.Transaction + globalTx2 *types.Transaction + ballot1 = genLayerBallot(types.LayerID(11)) + block1 = genLayerBlock(types.LayerID(11), nil) + block2 = genLayerBlock(types.LayerID(11), nil) + block3 = genLayerBlock(types.LayerID(11), nil) + meshAPIMock = &MeshAPIMock{} + conStateAPI = &ConStateAPIMock{ + returnTx: make(map[types.TransactionID]*types.Transaction), + balances: make(map[types.Address]*big.Int), + nonces: make(map[types.Address]uint64), + poolByAddress: make(map[types.Address]types.TransactionID), + poolByTxId: make(map[types.TransactionID]*types.Transaction), + } + stateRoot = types.HexToHash32("11111") +) + +func genLayerBallot(layerID types.LayerID) *types.Ballot { + b := types.RandomBallot() + b.Layer = layerID + signer, _ := signing.NewEdSigner() + b.Signature = signer.Sign(signing.BALLOT, b.SignedBytes()) + b.SmesherID = signer.NodeID() + b.Initialize() + return b +} + +func genLayerBlock(layerID types.LayerID, txs []types.TransactionID) *types.Block { + b := &types.Block{ + InnerBlock: types.InnerBlock{ + LayerIndex: layerID, + TxIDs: txs, + }, + } + b.Initialize() + return b +} + +func TestMain(m *testing.M) { + types.SetLayersPerEpoch(layersPerEpoch) + + var err error + signer, err := signing.NewEdSigner() + if err != nil { + log.Println("failed to create signer:", err) + os.Exit(1) + } + signer1, err := signing.NewEdSigner() + if err != nil { + log.Println("failed to create signer:", err) + os.Exit(1) + } + signer2, err := signing.NewEdSigner() + if err != nil { + log.Println("failed to create signer:", err) + os.Exit(1) + } + + addr1 = wallet.Address(signer1.PublicKey().Bytes()) + addr2 = wallet.Address(signer2.PublicKey().Bytes()) + + globalAtx = &types.ActivationTx{ + PublishEpoch: postGenesisEpoch, + Sequence: 1, + Coinbase: addr1, + NumUnits: numUnits, + Weight: numUnits, + TickCount: 1, + SmesherID: signer.NodeID(), + } + globalAtx.SetReceived(time.Now()) + + globalAtx2 = &types.ActivationTx{ + PublishEpoch: postGenesisEpoch, + Sequence: 1, + Coinbase: addr2, + NumUnits: numUnits, + Weight: numUnits, + TickCount: 1, + SmesherID: signer.NodeID(), + } + globalAtx2.SetReceived(time.Now()) + globalAtx2.SmesherID = signer.NodeID() + globalAtx2.TickCount = 1 + + // These create circular dependencies so they have to be initialized + // after the global vars + ballot1.AtxID = globalAtx.ID() + ballot1.EpochData = &types.EpochData{ActiveSetHash: types.ATXIDList{globalAtx.ID(), globalAtx2.ID()}.Hash()} + + globalTx = NewTx(0, addr1, signer1) + globalTx2 = NewTx(1, addr2, signer2) + + block1.TxIDs = []types.TransactionID{globalTx.ID, globalTx2.ID} + conStateAPI.returnTx[globalTx.ID] = globalTx + conStateAPI.returnTx[globalTx2.ID] = globalTx2 + conStateAPI.balances[addr1] = big.NewInt(int64(accountBalance)) + conStateAPI.balances[addr2] = big.NewInt(int64(accountBalance)) + conStateAPI.nonces[globalTx.Principal] = uint64(accountCounter) + + types.SetLayersPerEpoch(layersPerEpoch) + + res := m.Run() + os.Exit(res) +} + +type MeshAPIMock struct{} + +// latest layer received. +func (m *MeshAPIMock) LatestLayer() types.LayerID { + return layerLatest +} + +// latest layer approved/confirmed/applied to state +// The real logic here is a bit more complicated, as it depends whether the node +// is syncing or not. If it's not syncing, layers are applied to state as they're +// verified by Hare. If it's syncing, Hare is not run, and they are applied to +// state as they're confirmed by Tortoise and it advances pbase. This is all in +// flux right now so keep this simple for the purposes of testing. +func (m *MeshAPIMock) LatestLayerInState() types.LayerID { + return layerVerified +} + +func (m *MeshAPIMock) ProcessedLayer() types.LayerID { + return layerVerified +} + +func (m *MeshAPIMock) GetRewardsByCoinbase(types.Address) (rewards []*types.Reward, err error) { + return []*types.Reward{ + { + Layer: layerFirst, + TotalReward: rewardAmount, + LayerReward: rewardAmount, + Coinbase: addr1, + SmesherID: rewardSmesherID, + }, + }, nil +} + +func (m *MeshAPIMock) GetRewardsBySmesherId(types.NodeID) (rewards []*types.Reward, err error) { + return []*types.Reward{ + { + Layer: layerFirst, + TotalReward: rewardAmount, + LayerReward: rewardAmount, + Coinbase: addr1, + SmesherID: rewardSmesherID, + }, + }, nil +} + +func (m *MeshAPIMock) GetLayer(tid types.LayerID) (*types.Layer, error) { + if tid.After(layerCurrent) { + return nil, errors.New("requested layer later than current layer") + } else if tid.After(m.LatestLayer()) { + return nil, errors.New("haven't received that layer yet") + } + + ballots := []*types.Ballot{ballot1} + blocks := []*types.Block{block1, block2, block3} + return types.NewExistingLayer(tid, ballots, blocks), nil +} + +func (m *MeshAPIMock) GetLayerVerified(tid types.LayerID) (*types.Block, error) { + return block1, nil +} + +func (m *MeshAPIMock) GetATXs( + context.Context, + []types.ATXID, +) (map[types.ATXID]*types.ActivationTx, []types.ATXID) { + atxs := map[types.ATXID]*types.ActivationTx{ + globalAtx.ID(): globalAtx, + globalAtx2.ID(): globalAtx2, + } + return atxs, nil +} + +func (m *MeshAPIMock) MeshHash(types.LayerID) (types.Hash32, error) { + return types.RandomHash(), nil +} + +type ConStateAPIMock struct { + returnTx map[types.TransactionID]*types.Transaction + balances map[types.Address]*big.Int + nonces map[types.Address]uint64 + + // In the real txs.txPool struct, there are multiple data structures and they're more complex, + // but we just mock a very simple use case here and only store some of these data + poolByAddress map[types.Address]types.TransactionID + poolByTxId map[types.TransactionID]*types.Transaction +} + +func (t *ConStateAPIMock) put(id types.TransactionID, tx *types.Transaction) { + t.poolByTxId[id] = tx + t.poolByAddress[tx.Principal] = id + events.ReportNewTx(0, tx) +} + +// Return a mock estimated nonce and balance that's different than the default, mimicking transactions that are +// unconfirmed or in the mempool that will update state. +func (t *ConStateAPIMock) GetProjection(types.Address) (uint64, uint64) { + return accountCounter + 1, accountBalance + 1 +} + +func (t *ConStateAPIMock) GetAllAccounts() (res []*types.Account, err error) { + for address, balance := range t.balances { + res = append(res, &types.Account{ + Address: address, + Balance: balance.Uint64(), + NextNonce: t.nonces[address], + }) + } + return res, nil +} + +func (t *ConStateAPIMock) GetStateRoot() (types.Hash32, error) { + return stateRoot, nil +} + +func (t *ConStateAPIMock) HasEvicted(id types.TransactionID) (bool, error) { + panic("not implemented") +} + +func (t *ConStateAPIMock) GetMeshTransaction(id types.TransactionID) (*types.MeshTransaction, error) { + tx, ok := t.returnTx[id] + if ok { + return &types.MeshTransaction{Transaction: *tx, State: types.APPLIED}, nil + } + tx, ok = t.poolByTxId[id] + if ok { + return &types.MeshTransaction{Transaction: *tx, State: types.MEMPOOL}, nil + } + return nil, errors.New("it ain't there") +} + +func (t *ConStateAPIMock) GetTransactionsByAddress( + from, to types.LayerID, + account types.Address, +) ([]*types.MeshTransaction, error) { + if from.After(txReturnLayer) { + return nil, nil + } + var txs []*types.MeshTransaction + for _, tx := range t.returnTx { + if tx.Principal.String() == account.String() { + txs = append(txs, &types.MeshTransaction{Transaction: *tx}) + } + } + return txs, nil +} + +func (t *ConStateAPIMock) GetMeshTransactions( + txIds []types.TransactionID, +) (txs []*types.MeshTransaction, missing map[types.TransactionID]struct{}) { + for _, txId := range txIds { + for _, tx := range t.returnTx { + if tx.ID == txId { + txs = append(txs, &types.MeshTransaction{ + State: types.APPLIED, + Transaction: *tx, + }) + } + } + } + return +} + +func (t *ConStateAPIMock) GetLayerStateRoot(types.LayerID) (types.Hash32, error) { + return stateRoot, nil +} + +func (t *ConStateAPIMock) GetBalance(addr types.Address) (uint64, error) { + return t.balances[addr].Uint64(), nil +} + +func (t *ConStateAPIMock) GetNonce(addr types.Address) (types.Nonce, error) { + return t.nonces[addr], nil +} + +func (t *ConStateAPIMock) Validation(raw types.RawTx) system.ValidationRequest { + panic("dont use this") +} + +func NewTx(nonce uint64, recipient types.Address, signer *signing.EdSigner) *types.Transaction { + tx := types.Transaction{TxHeader: &types.TxHeader{}} + tx.Principal = wallet.Address(signer.PublicKey().Bytes()) + if nonce == 0 { + tx.RawTx = types.NewRawTx(wallet.SelfSpawn(signer.PrivateKey(), + 0, + sdk.WithGasPrice(0), + )) + } else { + tx.RawTx = types.NewRawTx( + wallet.Spend(signer.PrivateKey(), recipient, 1, + nonce, + sdk.WithGasPrice(0), + ), + ) + tx.MaxSpend = 1 + } + return &tx +} + +func TestNewLocalServer(t *testing.T) { + tt := []struct { + name string + listener string + warn bool + }{ + { + name: "valid", + listener: "192.168.1.1:1234", + warn: false, + }, + { + name: "valid random port", + listener: "10.0.0.1:0", + warn: false, + }, + { + name: "invalid", + listener: "0.0.0.0:1234", + warn: true, + }, + { + name: "invalid random port", + listener: "88.77.66.11:0", + warn: true, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + observer, observedLogs := observer.New(zapcore.WarnLevel) + logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.WrapCore( + func(core zapcore.Core) zapcore.Core { + return zapcore.NewTee(core, observer) + }, + ))) + + ctrl := gomock.NewController(t) + peerCounter := NewMockpeerCounter(ctrl) + meshApi := NewMockmeshAPI(ctrl) + genTime := NewMockgenesisTimeAPI(ctrl) + syncer := NewMocksyncer(ctrl) + + cfg := grpcserver.DefaultTestConfig(t) + cfg.PostListener = tc.listener + svc := NewNodeService(peerCounter, meshApi, genTime, syncer, "v0.0.0", "cafebabe") + _, err := grpcserver.NewWithServices(cfg.PostListener, logger, cfg, []grpcserver.ServiceAPI{svc}) + if tc.warn { + require.Equal(t, 1, observedLogs.Len(), "Expected a warning log") + require.Equal(t, "unsecured grpc server is listening on a public IP address", + observedLogs.All()[0].Message, + ) + require.Equal(t, tc.listener, observedLogs.All()[0].ContextMap()["address"]) + return + } + + require.NoError(t, err) + }) + } +} + +type smesherServiceConn struct { + pb.SmesherServiceClient + + smeshingProvider *activation.MockSmeshingProvider + postSupervisor *MockpostSupervisor + grpcPostService *MockgrpcPostService +} + +func setupSmesherService(tb testing.TB, sig *signing.EdSigner) (*smesherServiceConn, context.Context) { + ctrl, mockCtx := gomock.WithContext(context.Background(), tb) + smeshingProvider := activation.NewMockSmeshingProvider(ctrl) + postSupervisor := NewMockpostSupervisor(ctrl) + grpcPostService := NewMockgrpcPostService(ctrl) + svc := NewSmesherService( + smeshingProvider, + postSupervisor, + grpcPostService, + 10*time.Millisecond, + activation.DefaultPostSetupOpts(), + sig, + ) + svc.SetPostServiceConfig(activation.DefaultTestPostServiceConfig(tb)) + cfg, cleanup := launchServer(tb, svc) + tb.Cleanup(cleanup) + + conn := dialGrpc(tb, cfg) + client := pb.NewSmesherServiceClient(conn) + + return &smesherServiceConn{ + SmesherServiceClient: client, + + smeshingProvider: smeshingProvider, + postSupervisor: postSupervisor, + grpcPostService: grpcPostService, + }, mockCtx +} + +func TestSmesherService(t *testing.T) { + t.Run("IsSmeshing", func(t *testing.T) { + t.Parallel() + + sig, err := signing.NewEdSigner() + require.NoError(t, err) + + c, ctx := setupSmesherService(t, sig) + c.smeshingProvider.EXPECT().Smeshing().Return(false) + res, err := c.IsSmeshing(ctx, &emptypb.Empty{}) + require.NoError(t, err) + require.False(t, res.IsSmeshing, "expected IsSmeshing to be false") + }) + + t.Run("StartSmeshingMissingArgs", func(t *testing.T) { + t.Parallel() + + sig, err := signing.NewEdSigner() + require.NoError(t, err) + + c, ctx := setupSmesherService(t, sig) + _, err = c.StartSmeshing(ctx, &pb.StartSmeshingRequest{}) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }) + + t.Run("StartSmeshing", func(t *testing.T) { + t.Parallel() + opts := &pb.PostSetupOpts{} + opts.DataDir = t.TempDir() + opts.NumUnits = 1 + opts.MaxFileSize = 1024 + + coinbase := &pb.AccountId{Address: addr1.String()} + sig, err := signing.NewEdSigner() + require.NoError(t, err) + + c, ctx := setupSmesherService(t, sig) + c.smeshingProvider.EXPECT().StartSmeshing(gomock.Any()).Return(nil) + c.postSupervisor.EXPECT().Start(gomock.Any(), + gomock.All( + gomock.Cond(func(postOpts activation.PostSetupOpts) bool { + return postOpts.DataDir == opts.DataDir + }), + gomock.Cond(func(postOpts activation.PostSetupOpts) bool { + return postOpts.NumUnits == opts.NumUnits + }), + gomock.Cond(func(postOpts activation.PostSetupOpts) bool { + return postOpts.MaxFileSize == opts.MaxFileSize + }), + ), sig).Return(nil) + c.grpcPostService.EXPECT().AllowConnections(true) + res, err := c.StartSmeshing(ctx, &pb.StartSmeshingRequest{ + Opts: opts, + Coinbase: coinbase, + }) + require.NoError(t, err) + require.Equal(t, int32(code.Code_OK), res.Status.Code) + }) + + t.Run("StartSmeshingMultiSetup", func(t *testing.T) { + t.Parallel() + opts := &pb.PostSetupOpts{} + opts.DataDir = t.TempDir() + opts.NumUnits = 1 + opts.MaxFileSize = 1024 + + coinbase := &pb.AccountId{Address: addr1.String()} + + c, ctx := setupSmesherService(t, nil) // in 1:n the node id is nil and start smeshing should fail + res, err := c.StartSmeshing(ctx, &pb.StartSmeshingRequest{ + Opts: opts, + Coinbase: coinbase, + }) + require.Equal(t, codes.FailedPrecondition, status.Code(err)) + require.ErrorContains(t, err, "node is not configured for supervised smeshing") + require.Nil(t, res) + }) + + t.Run("StopSmeshing", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + c.smeshingProvider.EXPECT().StopSmeshing(gomock.Any()).Return(nil) + c.postSupervisor.EXPECT().Stop(false).Return(nil) + res, err := c.StopSmeshing(ctx, &pb.StopSmeshingRequest{}) + require.NoError(t, err) + require.Equal(t, int32(code.Code_OK), res.Status.Code) + }) + + t.Run("SmesherIDs", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + nodeId := types.RandomNodeID() + c.smeshingProvider.EXPECT().SmesherIDs().Return([]types.NodeID{nodeId}) + res, err := c.SmesherIDs(ctx, &emptypb.Empty{}) + require.NoError(t, err) + require.Len(t, res.PublicKeys, 1) + require.Equal(t, nodeId.Bytes(), res.PublicKeys[0]) + }) + + t.Run("SetCoinbaseMissingArgs", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + _, err := c.SetCoinbase(ctx, &pb.SetCoinbaseRequest{}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }) + + t.Run("SetCoinbase", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + c.smeshingProvider.EXPECT().SetCoinbase(addr1) + res, err := c.SetCoinbase(ctx, &pb.SetCoinbaseRequest{ + Id: &pb.AccountId{Address: addr1.String()}, + }) + require.NoError(t, err) + require.Equal(t, int32(code.Code_OK), res.Status.Code) + }) + + t.Run("Coinbase", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + c.smeshingProvider.EXPECT().Coinbase().Return(addr1) + res, err := c.Coinbase(ctx, &emptypb.Empty{}) + require.NoError(t, err) + addr, err := types.StringToAddress(res.AccountId.Address) + require.NoError(t, err) + require.Equal(t, addr1, addr) + }) + + t.Run("PostSetupComputeProviders", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + c.postSupervisor.EXPECT().Providers().Return(nil, nil) + _, err := c.PostSetupProviders(ctx, &pb.PostSetupProvidersRequest{Benchmark: false}) + require.NoError(t, err) + }) + + t.Run("PostSetupStatusStream", func(t *testing.T) { + t.Parallel() + c, ctx := setupSmesherService(t, nil) + c.postSupervisor.EXPECT().Status().Return(&activation.PostSetupStatus{}).AnyTimes() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := c.PostSetupStatusStream(ctx, &emptypb.Empty{}) + require.NoError(t, err) + + // Expecting the stream to return updates before closing. + for range 3 { + _, err = stream.Recv() + require.NoError(t, err) + } + + cancel() + require.Eventually(t, func() bool { + _, err = stream.Recv() + return status.Code(err) == codes.Canceled + }, time.Second, time.Millisecond*10) + }) +} + +func TestMeshService(t *testing.T) { + ctrl := gomock.NewController(t) + genTime := NewMockgenesisTimeAPI(ctrl) + genesis := time.Unix(genTimeUnix, 0) + genTime.EXPECT().GenesisTime().Return(genesis) + genTime.EXPECT().CurrentLayer().Return(layerCurrent).AnyTimes() + cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) + t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) + svc := NewMeshService( + cdb, + meshAPIMock, + conStateAPI, + genTime, + layersPerEpoch, + types.Hash20{}, + layerDuration, + layerAvgSize, + txsPerProposal, + ) + require.NoError( + t, + activesets.Add( + cdb, + ballot1.EpochData.ActiveSetHash, + &types.EpochActiveSet{Set: types.ATXIDList{globalAtx.ID(), globalAtx2.ID()}}, + ), + ) + cfg, cleanup := launchServer(t, svc) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + c := pb.NewMeshServiceClient(conn) + + // Construct an array of test cases to test each endpoint in turn + testCases := []struct { + name string + run func(*testing.T) + }{ + {"GenesisTime", func(t *testing.T) { + response, err := c.GenesisTime(context.Background(), &pb.GenesisTimeRequest{}) + require.NoError(t, err) + require.Equal(t, uint64(genesis.Unix()), response.Unixtime.Value) + }}, + {"CurrentLayer", func(t *testing.T) { + response, err := c.CurrentLayer(context.Background(), &pb.CurrentLayerRequest{}) + require.NoError(t, err) + require.Equal(t, layerCurrent.Uint32(), response.Layernum.Number) + }}, + {"CurrentEpoch", func(t *testing.T) { + response, err := c.CurrentEpoch(context.Background(), &pb.CurrentEpochRequest{}) + require.NoError(t, err) + require.Equal(t, layerCurrent.GetEpoch().Uint32(), response.Epochnum.Number) + }}, + {"GenesisID", func(t *testing.T) { + response, err := c.GenesisID(context.Background(), &pb.GenesisIDRequest{}) + require.NoError(t, err) + require.Equal(t, genesisID.Bytes(), response.GenesisId) + }}, + {"LayerDuration", func(t *testing.T) { + response, err := c.LayerDuration(context.Background(), &pb.LayerDurationRequest{}) + require.NoError(t, err) + require.Equal(t, layerDuration, time.Duration(response.Duration.Value)*time.Second) + }}, + {"MaxTransactionsPerSecond", func(t *testing.T) { + response, err := c.MaxTransactionsPerSecond(context.Background(), &pb.MaxTransactionsPerSecondRequest{}) + require.NoError(t, err) + require.Equal( + t, + uint64(layerAvgSize*txsPerProposal/layerDuration.Seconds()), + response.MaxTxsPerSecond.Value, + ) + }}, + {"AccountMeshDataQuery", func(t *testing.T) { + subtests := []struct { + name string + run func(*testing.T) + }{ + { + // all inputs default to zero, no filter + // query is valid but MaxResults is 0 so expect no results + name: "no_inputs", + run: func(t *testing.T) { + _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{}) + require.ErrorContains(t, err, "`Filter` must be provided") + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }, + }, + { + name: "MinLayer_too_high", + run: func(t *testing.T) { + _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MinLayer: &pb.LayerNumber{Number: layerCurrent.Add(1).Uint32()}, + }) + require.ErrorContains(t, err, "`LatestLayer` must be less than") + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }, + }, + { + // This does not produce an error but we expect no results + name: "Offset_too_high", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{ + Address: types.GenerateAddress(make([]byte, types.AddressLength)).String(), + }, + AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), + }, + Offset: math.MaxUint32, + }) + require.NoError(t, err) + require.Equal(t, uint32(0), res.TotalResults) + require.Empty(t, res.Data) + }, + }, + { + name: "no_filter", + run: func(t *testing.T) { + _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + }) + require.ErrorContains(t, err, "`Filter` must be provided") + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }, + }, + { + name: "empty_filter", + run: func(t *testing.T) { + _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + Filter: &pb.AccountMeshDataFilter{}, + }) + require.ErrorContains(t, err, "`Filter.AccountId` must be provided") + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }, + }, + { + name: "filter_with_empty_AccountId", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{ + Address: types.GenerateAddress(make([]byte, types.AddressLength)).String(), + }, + AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(0), res.TotalResults) + require.Empty(t, res.Data) + }, + }, + { + name: "filter_with_valid_AccountId", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + Filter: &pb.AccountMeshDataFilter{ + AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), + AccountId: &pb.AccountId{Address: addr1.String()}, + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(0), res.TotalResults) + require.Empty(t, res.Data) + }, + }, + { + name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_zero", + run: func(t *testing.T) { + _, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_UNSPECIFIED), + }, + }) + require.ErrorContains(t, err, "`Filter.AccountMeshDataFlags` must set at least one bitfield") + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }, + }, + { + name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_tx_only", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS, + ), + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(1), res.TotalResults) + require.Len(t, res.Data, 1) + checkAccountMeshDataItemTx(t, res.Data[0].Datum) + }, + }, + { + name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_activations_only", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(10), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32(pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS), + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(0), res.TotalResults) + require.Empty(t, res.Data) + }, + }, + { + name: "filter_with_valid_AccountId_and_AccountMeshDataFlags_all", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + // Zero means unlimited + MaxResults: uint32(0), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(1), res.TotalResults) + require.Len(t, res.Data, 1) + checkAccountMeshDataItemTx(t, res.Data[0].Datum) + }, + }, + { + name: "max_results", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(1), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(1), res.TotalResults) + require.Len(t, res.Data, 1) + checkAccountMeshDataItemTx(t, res.Data[0].Datum) + }, + }, + { + name: "max_results_page_2", + run: func(t *testing.T) { + res, err := c.AccountMeshDataQuery(context.Background(), &pb.AccountMeshDataQueryRequest{ + MaxResults: uint32(1), + Offset: uint32(1), + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + }) + require.NoError(t, err) + require.Equal(t, uint32(1), res.TotalResults) + require.Empty(t, res.Data) + }, + }, + } + + // Run sub-subtests + for _, r := range subtests { + t.Run(r.name, r.run) + } + }}, + {name: "AccountMeshDataStream", run: func(t *testing.T) { + // common testing framework + generateRunFn := func(req *pb.AccountMeshDataStreamRequest) func(*testing.T) { + return func(*testing.T) { + // Just try opening and immediately closing the stream + stream, err := c.AccountMeshDataStream(context.Background(), req) + require.NoError(t, err, "unexpected error opening stream") + + // Do we need this? It doesn't seem to cause any harm + stream.Context().Done() + } + } + generateRunFnError := func(msg string, req *pb.AccountMeshDataStreamRequest) func(*testing.T) { + return func(t *testing.T) { + // there should be no error opening the stream + stream, err := c.AccountMeshDataStream(context.Background(), req) + require.NoError(t, err, "unexpected error opening stream") + + // sending a request should generate an error + _, err = stream.Recv() + require.ErrorContains(t, err, msg, "received unexpected error") + require.Equal(t, codes.InvalidArgument, status.Code(err)) + + // Do we need this? It doesn't seem to cause any harm + stream.Context().Done() + } + } + subtests := []struct { + name string + run func(*testing.T) + }{ + // ERROR INPUTS + // We expect these to produce errors + { + name: "missing_filter", + run: generateRunFnError("`Filter` must be provided", &pb.AccountMeshDataStreamRequest{}), + }, + { + name: "empty_filter", + run: generateRunFnError("`Filter.AccountId` must be provided", &pb.AccountMeshDataStreamRequest{ + Filter: &pb.AccountMeshDataFilter{}, + }), + }, + { + name: "missing_address", + run: generateRunFnError("`Filter.AccountId` must be provided", &pb.AccountMeshDataStreamRequest{ + Filter: &pb.AccountMeshDataFilter{ + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + }), + }, + { + name: "filter_with_zero_flags", + run: generateRunFnError( + "`Filter.AccountMeshDataFlags` must set at least one bitfield", + &pb.AccountMeshDataStreamRequest{ + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32(0), + }, + }, + ), + }, + + // SUCCESS + { + name: "empty_address", + run: generateRunFn(&pb.AccountMeshDataStreamRequest{ + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + }), + }, + { + name: "invalid_address", + run: generateRunFn(&pb.AccountMeshDataStreamRequest{ + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: types.GenerateAddress([]byte{'A'}).String()}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + }), + }, + } + + // Run sub-subtests + for _, r := range subtests { + t.Run(r.name, r.run) + } + }}, + {"LayersQuery", func(t *testing.T) { + generateRunFn := func(numResults int, req *pb.LayersQueryRequest) func(*testing.T) { + return func(t *testing.T) { + res, err := c.LayersQuery(context.Background(), req) + require.NoError(t, err, "query returned an unexpected error") + require.Len(t, res.Layer, numResults, "unexpected number of layer results") + } + } + generateRunFnError := func(msg string, req *pb.LayersQueryRequest) func(*testing.T) { + return func(t *testing.T) { + _, err := c.LayersQuery(context.Background(), req) + require.ErrorContains(t, err, msg, "expected error to contain string") + } + } + requests := []struct { + name string + run func(*testing.T) + }{ + // ERROR INPUTS + // We expect these to produce errors + + // end layer after current layer + { + name: "end_layer_after_current_layer", + run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerCurrent.Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerCurrent.Add(2).Uint32()}, + }), + }, + + // start layer after current layer + { + name: "start_layer_after_current_layer", + run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerCurrent.Add(2).Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerCurrent.Add(3).Uint32()}, + }), + }, + + // layer after last received + { + name: "layer_after_last_received", + run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerLatest.Add(1).Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerLatest.Add(2).Uint32()}, + }), + }, + + // very very large range + { + name: "very_very_large_range", + run: generateRunFnError("error retrieving layer data", &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: 0}, + EndLayer: &pb.LayerNumber{Number: uint32(math.MaxUint32)}, + }), + }, + + // GOOD INPUTS + + // nil inputs + // not an error since these default to zero, see + // https://github.com/spacemeshos/api/issues/87 + { + name: "nil_inputs", + run: generateRunFn(1, &pb.LayersQueryRequest{}), + }, + + // start layer after end layer: expect no error, zero results + { + name: "start_layer_after_end_layer", + run: generateRunFn(0, &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerCurrent.Add(1).Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerCurrent.Uint32()}, + }), + }, + + // same start/end layer: expect no error, one result + { + name: "same_start_end_layer", + run: generateRunFn(1, &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerVerified.Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerVerified.Uint32()}, + }), + }, + + // start layer after last approved/confirmed layer (but before current layer) + { + name: "start_layer_after_last_approved_confirmed_layer", + run: generateRunFn(2, &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerVerified.Add(1).Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerVerified.Add(2).Uint32()}, + }), + }, + + // end layer after last approved/confirmed layer (but before current layer) + { + name: "end_layer_after_last_approved_confirmed_layer", + // expect difference + 1 return layers + run: generateRunFn( + int(layerVerified.Add(2).Sub(layerFirst.Uint32()).Add(1).Uint32()), + &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerFirst.Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerVerified.Add(2).Uint32()}, + }, + ), + }, + + // comprehensive valid test + { + name: "comprehensive", + run: func(t *testing.T) { + req := &pb.LayersQueryRequest{ + StartLayer: &pb.LayerNumber{Number: layerFirst.Uint32()}, + EndLayer: &pb.LayerNumber{Number: layerLatest.Uint32()}, + } + + res, err := c.LayersQuery(context.Background(), req) + require.NoError(t, err, "query returned unexpected error") + + // endpoint inclusive so add one + numLayers := layerLatest.Difference(layerFirst) + 1 + require.Len(t, res.Layer, int(numLayers)) + checkLayer(t, res.Layer[0]) + + resLayerNine := res.Layer[9] + require.Equal(t, uint32(9), resLayerNine.Number.Number, "layer nine is ninth") + require.NotEmpty(t, resLayerNine.Hash) + require.Equal( + t, + pb.Layer_LAYER_STATUS_UNSPECIFIED, + resLayerNine.Status, + "later layer is unconfirmed", + ) + }, + }, + } + + // Run sub-subtests + for _, r := range requests { + t.Run(r.name, r.run) + } + }}, + // NOTE: There are no simple error tests for LayerStream, as it does not take any arguments. + // See TestLayerStream_comprehensive test, below. + } + + // Run subtests + for _, tc := range testCases { + t.Run(tc.name, tc.run) + } +} + +func TestTransactionServiceSubmitUnsync(t *testing.T) { + req := require.New(t) + + ctrl := gomock.NewController(t) + syncer := NewMocksyncer(ctrl) + syncer.EXPECT().IsSynced(gomock.Any()).Return(false) + publisher := pubsubmocks.NewMockPublisher(ctrl) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + txHandler := NewMocktxValidator(ctrl) + txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(nil) + + svc := NewTransactionService(statesql.InMemoryTest(t), publisher, meshAPIMock, conStateAPI, syncer, txHandler) + cfg, cleanup := launchServer(t, svc) + t.Cleanup(cleanup) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + conn := dialGrpc(t, cfg) + c := pb.NewTransactionServiceClient(conn) + + serializedTx, err := codec.Encode(globalTx) + req.NoError(err, "error serializing tx") + + // This time, we expect an error, since isSynced is false (by default) + // The node should not allow tx submission when not synced + res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{Transaction: serializedTx}) + req.Error(err) + grpcStatus, ok := status.FromError(err) + req.True(ok) + req.Equal(codes.FailedPrecondition, grpcStatus.Code()) + req.Equal("Cannot submit transaction, node is not in sync yet, try again later", grpcStatus.Message()) + req.Nil(res) + + syncer.EXPECT().IsSynced(gomock.Any()).Return(true) + + // This time, we expect no error, since isSynced is now true + _, err = c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{Transaction: serializedTx}) + req.NoError(err) +} + +func TestTransactionServiceSubmitInvalidTx(t *testing.T) { + req := require.New(t) + + ctrl := gomock.NewController(t) + syncer := NewMocksyncer(ctrl) + syncer.EXPECT().IsSynced(gomock.Any()).Return(true) + publisher := pubsubmocks.NewMockPublisher(ctrl) // publish is not called + txHandler := NewMocktxValidator(ctrl) + txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(errors.New("failed validation")) + + db := statesql.InMemoryTest(t) + grpcService := NewTransactionService(db, publisher, meshAPIMock, conStateAPI, syncer, txHandler) + cfg, cleanup := launchServer(t, grpcService) + t.Cleanup(cleanup) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + conn := dialGrpc(t, cfg) + c := pb.NewTransactionServiceClient(conn) + + serializedTx, err := codec.Encode(globalTx) + req.NoError(err, "error serializing tx") + + // When verifying and caching the transaction fails we expect an error + res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{Transaction: serializedTx}) + req.Error(err) + grpcStatus, ok := status.FromError(err) + req.True(ok) + req.Equal(codes.InvalidArgument, grpcStatus.Code()) + req.Contains(grpcStatus.Message(), "Failed to verify transaction") + req.Nil(res) +} + +func TestTransactionService_SubmitNoConcurrency(t *testing.T) { + numTxs := 20 + + ctrl := gomock.NewController(t) + syncer := NewMocksyncer(ctrl) + syncer.EXPECT().IsSynced(gomock.Any()).Return(true).Times(numTxs) + publisher := pubsubmocks.NewMockPublisher(ctrl) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(numTxs) + txHandler := NewMocktxValidator(ctrl) + txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(nil).Times(numTxs) + + db := statesql.InMemoryTest(t) + grpcService := NewTransactionService(db, publisher, meshAPIMock, conStateAPI, syncer, txHandler) + cfg, cleanup := launchServer(t, grpcService) + t.Cleanup(cleanup) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + conn := dialGrpc(t, cfg) + c := pb.NewTransactionServiceClient(conn) + for range numTxs { + res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{ + Transaction: globalTx.Raw, + }) + require.NoError(t, err) + require.Equal(t, int32(code.Code_OK), res.Status.Code) + require.Equal(t, globalTx.ID.Bytes(), res.Txstate.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, res.Txstate.State) + } +} + +func TestTransactionService(t *testing.T) { + ctrl := gomock.NewController(t) + syncer := NewMocksyncer(ctrl) + syncer.EXPECT().IsSynced(gomock.Any()).Return(true).AnyTimes() + publisher := pubsubmocks.NewMockPublisher(ctrl) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + txHandler := NewMocktxValidator(ctrl) + txHandler.EXPECT().VerifyAndCacheTx(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + db := statesql.InMemoryTest(t) + grpcService := NewTransactionService(db, publisher, meshAPIMock, conStateAPI, syncer, txHandler) + cfg, cleanup := launchServer(t, grpcService) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + c := pb.NewTransactionServiceClient(conn) + + // Construct an array of test cases to test each endpoint in turn + testCases := []struct { + name string + run func(*testing.T) + }{ + {"SubmitSpawnTransaction", func(t *testing.T) { + res, err := c.SubmitTransaction(context.Background(), &pb.SubmitTransactionRequest{ + Transaction: globalTx.Raw, + }) + require.NoError(t, err) + require.Equal(t, int32(code.Code_OK), res.Status.Code) + require.Equal(t, globalTx.ID.Bytes(), res.Txstate.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, res.Txstate.State) + }}, + {"TransactionsState_MissingTransactionId", func(t *testing.T) { + _, err := c.TransactionsState(context.Background(), &pb.TransactionsStateRequest{}) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "`TransactionId` must include") + }}, + {"TransactionsState_TransactionIdZeroLen", func(t *testing.T) { + _, err := c.TransactionsState(context.Background(), &pb.TransactionsStateRequest{ + TransactionId: []*pb.TransactionId{}, + }) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "`TransactionId` must include") + }}, + {"TransactionsState_StateOnly", func(t *testing.T) { + req := &pb.TransactionsStateRequest{} + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + res, err := c.TransactionsState(context.Background(), req) + require.NoError(t, err) + require.Len(t, res.TransactionsState, 1) + require.Empty(t, res.Transactions) + require.Equal(t, globalTx.ID.Bytes(), res.TransactionsState[0].Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionsState[0].State) + }}, + {"TransactionsState_All", func(t *testing.T) { + req := &pb.TransactionsStateRequest{} + req.IncludeTransactions = true + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + res, err := c.TransactionsState(context.Background(), req) + require.NoError(t, err) + require.Len(t, res.TransactionsState, 1) + require.Len(t, res.Transactions, 1) + require.Equal(t, globalTx.ID.Bytes(), res.TransactionsState[0].Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionsState[0].State) + + checkTransaction(t, res.Transactions[0]) + }}, + {"TransactionsStateStream_MissingTransactionId", func(t *testing.T) { + req := &pb.TransactionsStateStreamRequest{} + stream, err := c.TransactionsStateStream(context.Background(), req) + require.NoError(t, err) + _, err = stream.Recv() + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "`TransactionId` must include") + }}, + {"TransactionsStateStream_TransactionIdZeroLen", func(t *testing.T) { + req := &pb.TransactionsStateStreamRequest{ + TransactionId: []*pb.TransactionId{}, + } + stream, err := c.TransactionsStateStream(context.Background(), req) + require.NoError(t, err) + _, err = stream.Recv() + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "`TransactionId` must include") + }}, + {"TransactionsStateStream_StateOnly", func(t *testing.T) { + // Set up the reporter + req := &pb.TransactionsStateStreamRequest{} + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + + events.CloseEventReporter() + + events.InitializeReporter() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stream, err := c.TransactionsStateStream(ctx, req) + require.NoError(t, err) + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + require.NoError(t, events.ReportNewTx(0, globalTx)) + res, err := stream.Recv() + require.NoError(t, err) + require.Nil(t, res.Transaction) + require.Equal(t, globalTx.ID.Bytes(), res.TransactionState.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionState.State) + }}, + {"TransactionsStateStream_All", func(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + req := &pb.TransactionsStateStreamRequest{} + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + req.IncludeTransactions = true + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stream, err := c.TransactionsStateStream(ctx, req) + require.NoError(t, err) + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + require.NoError(t, events.ReportNewTx(0, globalTx)) + + // Verify + res, err := stream.Recv() + require.NoError(t, err) + require.Equal(t, globalTx.ID.Bytes(), res.TransactionState.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionState.State) + checkTransaction(t, res.Transaction) + }}, + // Submit a tx, then receive it over the stream + {"TransactionsState_SubmitThenStream", func(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + // Remove the tx from the mesh so it only appears in the mempool + delete(conStateAPI.returnTx, globalTx.ID) + defer func() { conStateAPI.returnTx[globalTx.ID] = globalTx }() + + // STREAM + // Open the stream first and listen for new transactions + req := &pb.TransactionsStateStreamRequest{} + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + req.IncludeTransactions = true + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Simulate the process by which a newly-broadcast tx lands in the mempool + broadcastSignal := make(chan struct{}) + var eg errgroup.Group + + eg.Go(func() error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-broadcastSignal: + // We assume the data is valid here, and put it directly into the txpool + conStateAPI.put(globalTx.ID, globalTx) + return nil + } + }) + + stream, err := c.TransactionsStateStream(ctx, req) + require.NoError(t, err) + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + res, err := c.SubmitTransaction(ctx, &pb.SubmitTransactionRequest{ + Transaction: globalTx.Raw, + }) + require.NoError(t, err) + require.Equal(t, int32(code.Code_OK), res.Status.Code) + require.Equal(t, globalTx.ID.Bytes(), res.Txstate.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, res.Txstate.State) + close(broadcastSignal) + require.NoError(t, eg.Wait()) + + response, err := stream.Recv() + require.NoError(t, err) + require.Equal(t, globalTx.ID.Bytes(), response.TransactionState.Id.Id) + // We expect the tx to go to the mempool + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_MEMPOOL, response.TransactionState.State) + checkTransaction(t, response.Transaction) + }}, + {"TransactionsStateStream_ManySubscribers", func(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + req := &pb.TransactionsStateStreamRequest{} + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + req.IncludeTransactions = true + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + const subscriberCount = 10 + streams := make([]pb.TransactionService_TransactionsStateStreamClient, 0, subscriberCount) + for range subscriberCount { + stream, err := c.TransactionsStateStream(ctx, req) + require.NoError(t, err) + streams = append(streams, stream) + } + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + require.NoError(t, events.ReportNewTx(0, globalTx)) + + for _, stream := range streams { + res, err := stream.Recv() + require.NoError(t, err) + require.Equal(t, globalTx.ID.Bytes(), res.TransactionState.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, res.TransactionState.State) + checkTransaction(t, res.Transaction) + } + }}, + {"TransactionsStateStream_NoEventReceiving", func(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + req := &pb.TransactionsStateStreamRequest{} + req.TransactionId = append(req.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + req.IncludeTransactions = true + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + stream, err := c.TransactionsStateStream(ctx, req) + require.NoError(t, err) + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + for range subscriptionChanBufSize * 2 { + require.NoError(t, events.ReportNewTx(0, globalTx)) + } + + for range subscriptionChanBufSize { + _, err := stream.Recv() + if err != nil { + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, st.Message(), errTxBufferFull) + } + } + }}, + } + + // Run subtests + for _, tc := range testCases { + t.Run(tc.name, tc.run) + } +} + +func checkTransaction(tb testing.TB, tx *pb.Transaction) { + require.Equal(tb, globalTx.ID.Bytes(), tx.Id) + require.Equal(tb, globalTx.Principal.String(), tx.Principal.Address) + require.Equal(tb, globalTx.GasPrice, tx.GasPrice) + require.Equal(tb, globalTx.MaxGas, tx.MaxGas) + require.Equal(tb, globalTx.MaxSpend, tx.MaxSpend) + require.Equal(tb, globalTx.Nonce, tx.Nonce.Counter) +} + +func checkLayer(tb testing.TB, l *pb.Layer) { + require.Equal(tb, uint32(0), l.Number.Number, "first layer is zero") + require.Equal(tb, pb.Layer_LAYER_STATUS_CONFIRMED, l.Status, "first layer is confirmed") + + require.Empty(tb, l.Activations, "unexpected number of activations in layer") + require.Len(tb, l.Blocks, 1, "unexpected number of blocks in layer") + require.Equal(tb, stateRoot.Bytes(), l.RootStateHash, "unexpected state root") + + resBlock := l.Blocks[0] + + resTxIDs := make([]types.TransactionID, 0, len(resBlock.Transactions)) + for _, tx := range resBlock.Transactions { + resTxIDs = append(resTxIDs, types.TransactionID(types.BytesToHash(tx.Id))) + } + require.ElementsMatch(tb, block1.TxIDs, resTxIDs) + require.Equal(tb, types.Hash20(block1.ID()).Bytes(), resBlock.Id) + + // Check the tx as well + resTx := resBlock.Transactions[0] + require.Equal(tb, globalTx.ID.Bytes(), resTx.Id) + require.Equal(tb, globalTx.Principal.String(), resTx.Principal.Address) + require.Equal(tb, globalTx.GasPrice, resTx.GasPrice) + require.Equal(tb, globalTx.MaxGas, resTx.MaxGas) + require.Equal(tb, globalTx.MaxSpend, resTx.MaxSpend) + require.Equal(tb, globalTx.Nonce, resTx.Nonce.Counter) +} + +func TestAccountMeshDataStream_comprehensive(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + ctrl := gomock.NewController(t) + genTime := NewMockgenesisTimeAPI(ctrl) + cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) + t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) + grpcService := NewMeshService( + cdb, + meshAPIMock, + conStateAPI, + genTime, + layersPerEpoch, + types.Hash20{}, + layerDuration, + layerAvgSize, + txsPerProposal, + ) + cfg, cleanup := launchServer(t, grpcService) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + c := pb.NewMeshServiceClient(conn) + + // set up the grpc listener stream + req := &pb.AccountMeshDataStreamRequest{ + Filter: &pb.AccountMeshDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountMeshDataFlags: uint32( + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_ACTIVATIONS | + pb.AccountMeshDataFlag_ACCOUNT_MESH_DATA_FLAG_TRANSACTIONS), + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + stream, err := c.AccountMeshDataStream(ctx, req) + require.NoError(t, err, "stream request returned unexpected error") + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + // publish a tx + require.NoError(t, events.ReportNewTx(0, globalTx)) + res, err := stream.Recv() + require.NoError(t, err, "got error from stream") + checkAccountMeshDataItemTx(t, res.Datum.Datum) + + // test streaming a tx and an atx that are filtered out + // these should not be received + require.NoError(t, events.ReportNewTx(0, globalTx2)) + require.NoError(t, events.ReportNewActivation(globalAtx2)) + + _, err = stream.Recv() + require.Error(t, err) + require.Contains(t, []codes.Code{codes.Unknown, codes.DeadlineExceeded}, status.Convert(err).Code()) +} + +func TestAccountDataStream_comprehensive(t *testing.T) { + if testing.Short() { + t.Skip() + } + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + svc := NewGlobalStateService(meshAPIMock, conStateAPI) + cfg, cleanup := launchServer(t, svc) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + c := pb.NewGlobalStateServiceClient(conn) + + // set up the grpc listener stream + req := &pb.AccountDataStreamRequest{ + Filter: &pb.AccountDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountDataFlags: uint32( + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD | + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT | + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_TRANSACTION_RECEIPT), + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + stream, err := c.AccountDataStream(ctx, req) + require.NoError(t, err, "stream request returned unexpected error") + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + require.NoError(t, events.ReportRewardReceived(types.Reward{ + Layer: layerFirst, + TotalReward: rewardAmount, + LayerReward: rewardAmount * 2, + Coinbase: addr1, + SmesherID: rewardSmesherID, + })) + + res, err := stream.Recv() + require.NoError(t, err) + checkAccountDataItemReward(t, res.Datum.Datum) + + // publish an account data update + require.NoError(t, events.ReportAccountUpdate(addr1)) + + res, err = stream.Recv() + require.NoError(t, err) + checkAccountDataItemAccount(t, res.Datum.Datum) + + // test streaming a reward and account update that should be filtered out + // these should not be received + require.NoError(t, events.ReportAccountUpdate(addr2)) + require.NoError(t, events.ReportRewardReceived(types.Reward{Coinbase: addr2})) + + _, err = stream.Recv() + require.Error(t, err) +} + +func TestGlobalStateStream_comprehensive(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + svc := NewGlobalStateService(meshAPIMock, conStateAPI) + cfg, cleanup := launchServer(t, svc) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + c := pb.NewGlobalStateServiceClient(conn) + + // set up the grpc listener stream + req := &pb.GlobalStateStreamRequest{ + GlobalStateDataFlags: uint32( + pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_ACCOUNT | + pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_GLOBAL_STATE_HASH | + pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_REWARD), + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + stream, err := c.GlobalStateStream(ctx, req) + require.NoError(t, err, "stream request returned unexpected error") + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + // publish a reward + require.NoError(t, events.ReportRewardReceived(types.Reward{ + Layer: layerFirst, + TotalReward: rewardAmount, + LayerReward: rewardAmount * 2, + Coinbase: addr1, + SmesherID: rewardSmesherID, + })) + res, err := stream.Recv() + require.NoError(t, err, "got error from stream") + checkGlobalStateDataReward(t, res.Datum.Datum) + + // publish an account data update + require.NoError(t, events.ReportAccountUpdate(addr1)) + res, err = stream.Recv() + require.NoError(t, err, "got error from stream") + checkGlobalStateDataAccountWrapper(t, res.Datum.Datum) + + // publish a new layer + layer, err := meshAPIMock.GetLayer(layerFirst) + require.NoError(t, err) + + require.NoError(t, events.ReportLayerUpdate(events.LayerUpdate{ + LayerID: layer.Index(), + Status: events.LayerStatusTypeApplied, + })) + res, err = stream.Recv() + require.NoError(t, err, "got error from stream") + checkGlobalStateDataGlobalState(t, res.Datum.Datum) +} + +func TestLayerStream_comprehensive(t *testing.T) { + if testing.Short() { + t.Skip() + } + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + ctrl := gomock.NewController(t) + genTime := NewMockgenesisTimeAPI(ctrl) + cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) + t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) + + grpcService := NewMeshService( + cdb, + meshAPIMock, + conStateAPI, + genTime, + layersPerEpoch, + types.Hash20{}, + layerDuration, + layerAvgSize, + txsPerProposal, + ) + cfg, cleanup := launchServer(t, grpcService) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + + // set up the grpc listener stream + c := pb.NewMeshServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stream, err := c.LayerStream(ctx, &pb.LayerStreamRequest{}) + require.NoError(t, err, "stream request returned unexpected error") + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + layer, err := meshAPIMock.GetLayer(layerFirst) + require.NoError(t, err) + + // Act + require.NoError(t, events.ReportLayerUpdate(events.LayerUpdate{ + LayerID: layer.Index(), + Status: events.LayerStatusTypeConfirmed, + })) + + // Verify + res, err := stream.Recv() + require.NoError(t, err, "got error from stream") + require.Equal(t, uint32(0), res.Layer.Number.Number) + require.Equal(t, events.LayerStatusTypeConfirmed, int(res.Layer.Status)) + require.NotEmpty(t, res.Layer.Hash) + checkLayer(t, res.Layer) +} + +func checkAccountMeshDataItemTx(tb testing.TB, dataItem any) { + tb.Helper() + require.IsType(tb, &pb.AccountMeshData_MeshTransaction{}, dataItem) + x := dataItem.(*pb.AccountMeshData_MeshTransaction) + // Check the sender + require.Equal(tb, globalTx.Principal.String(), x.MeshTransaction.Transaction.Principal.Address) +} + +func checkAccountDataItemReward(tb testing.TB, dataItem any) { + tb.Helper() + require.IsType(tb, &pb.AccountData_Reward{}, dataItem) + x := dataItem.(*pb.AccountData_Reward) + require.Equal(tb, uint64(rewardAmount), x.Reward.Total.Value) + require.Equal(tb, layerFirst.Uint32(), x.Reward.Layer.Number) + require.Equal(tb, uint64(rewardAmount*2), x.Reward.LayerReward.Value) + require.Equal(tb, addr1.String(), x.Reward.Coinbase.Address) + require.Equal(tb, rewardSmesherID.Bytes(), x.Reward.Smesher.Id) +} + +func checkAccountDataItemAccount(tb testing.TB, dataItem any) { + tb.Helper() + require.IsType(tb, &pb.AccountData_AccountWrapper{}, dataItem) + x := dataItem.(*pb.AccountData_AccountWrapper) + require.Equal(tb, addr1.String(), x.AccountWrapper.AccountId.Address) + require.Equal(tb, uint64(accountBalance), x.AccountWrapper.StateCurrent.Balance.Value) + require.Equal(tb, uint64(accountCounter), x.AccountWrapper.StateCurrent.Counter) + require.Equal(tb, uint64(accountBalance+1), x.AccountWrapper.StateProjected.Balance.Value) + require.Equal(tb, uint64(accountCounter+1), x.AccountWrapper.StateProjected.Counter) +} + +func checkGlobalStateDataReward(tb testing.TB, dataItem any) { + tb.Helper() + require.IsType(tb, &pb.GlobalStateData_Reward{}, dataItem) + x := dataItem.(*pb.GlobalStateData_Reward) + require.Equal(tb, uint64(rewardAmount), x.Reward.Total.Value) + require.Equal(tb, layerFirst.Uint32(), x.Reward.Layer.Number) + require.Equal(tb, uint64(rewardAmount*2), x.Reward.LayerReward.Value) + require.Equal(tb, addr1.String(), x.Reward.Coinbase.Address) + require.Equal(tb, rewardSmesherID.Bytes(), x.Reward.Smesher.Id) +} + +func checkGlobalStateDataAccountWrapper(tb testing.TB, dataItem any) { + tb.Helper() + require.IsType(tb, &pb.GlobalStateData_AccountWrapper{}, dataItem) + x := dataItem.(*pb.GlobalStateData_AccountWrapper) + require.Equal(tb, addr1.String(), x.AccountWrapper.AccountId.Address) + require.Equal(tb, uint64(accountBalance), x.AccountWrapper.StateCurrent.Balance.Value) + require.Equal(tb, uint64(accountCounter), x.AccountWrapper.StateCurrent.Counter) + require.Equal(tb, uint64(accountBalance+1), x.AccountWrapper.StateProjected.Balance.Value) + require.Equal(tb, uint64(accountCounter+1), x.AccountWrapper.StateProjected.Counter) +} + +func checkGlobalStateDataGlobalState(tb testing.TB, dataItem any) { + tb.Helper() + require.IsType(tb, &pb.GlobalStateData_GlobalState{}, dataItem) + x := dataItem.(*pb.GlobalStateData_GlobalState) + require.Equal(tb, layerFirst.Uint32(), x.GlobalState.Layer.Number) + require.Equal(tb, stateRoot.Bytes(), x.GlobalState.RootHash) +} + +func TestMultiService(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ctrl, ctx := gomock.WithContext(ctx, t) + syncer := NewMocksyncer(ctrl) + syncer.EXPECT().IsSynced(gomock.Any()).Return(false).AnyTimes() + peerCounter := NewMockpeerCounter(ctrl) + genTime := NewMockgenesisTimeAPI(ctrl) + genesis := time.Unix(genTimeUnix, 0) + genTime.EXPECT().GenesisTime().Return(genesis) + + cdb := datastore.NewCachedDB(statesql.InMemoryTest(t), zaptest.NewLogger(t)) + t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) + svc1 := NewNodeService(peerCounter, meshAPIMock, genTime, syncer, "v0.0.0", "cafebabe") + svc2 := NewMeshService( + cdb, + meshAPIMock, + conStateAPI, + genTime, + layersPerEpoch, + types.Hash20{}, + layerDuration, + layerAvgSize, + txsPerProposal, + ) + cfg, shutDown := launchServer(t, svc1, svc2) + t.Cleanup(shutDown) + + c1 := pb.NewNodeServiceClient(dialGrpc(t, cfg)) + c2 := pb.NewMeshServiceClient(dialGrpc(t, cfg)) + + // call endpoints and validate results + const message = "Hello World" + res1, err1 := c1.Echo(ctx, &pb.EchoRequest{ + Msg: &pb.SimpleString{Value: message}, + }) + require.NoError(t, err1) + require.Equal(t, message, res1.Msg.Value) + res2, err2 := c2.GenesisTime(ctx, &pb.GenesisTimeRequest{}) + require.NoError(t, err2) + require.Equal(t, uint64(genesis.Unix()), res2.Unixtime.Value) + + // Make sure that shutting down the grpc service shuts them both down + shutDown() + + // Make sure NodeService is off + _, err1 = c1.Echo(ctx, &pb.EchoRequest{ + Msg: &pb.SimpleString{Value: message}, + }) + require.Equal(t, codes.Unavailable, status.Code(err1)) + + // Make sure MeshService is off + _, err2 = c2.GenesisTime(ctx, &pb.GenesisTimeRequest{}) + require.Equal(t, codes.Unavailable, status.Code(err2)) +} + +func TestDebugService(t *testing.T) { + ctrl := gomock.NewController(t) + netInfo := NewMocknetworkInfo(ctrl) + mOracle := NewMockoracle(ctrl) + db := statesql.InMemoryTest(t) + + testLog := zap.NewAtomicLevel() + loggers := map[string]*zap.AtomicLevel{ + "test": &testLog, + } + + svc := NewDebugService(db, conStateAPI, netInfo, mOracle, loggers) + cfg, cleanup := launchServer(t, svc) + t.Cleanup(cleanup) + + conn := dialGrpc(t, cfg) + c := pb.NewDebugServiceClient(conn) + + t.Run("Accounts", func(t *testing.T) { + res, err := c.Accounts(context.Background(), &pb.AccountsRequest{}) + require.NoError(t, err) + require.Len(t, res.AccountWrapper, 2) + + // Get the list of addresses and compare them regardless of order + var addresses []string + for _, a := range res.AccountWrapper { + addresses = append(addresses, a.AccountId.Address) + } + require.Contains(t, addresses, globalTx.Principal.String()) + require.Contains(t, addresses, addr1.String()) + }) + + t.Run("Accounts at layer", func(t *testing.T) { + lid := types.LayerID(11) + for address, balance := range conStateAPI.balances { + accounts.Update(db, &types.Account{ + Address: address, + Balance: balance.Uint64(), + NextNonce: conStateAPI.nonces[address], + Layer: lid, + }) + } + res, err := c.Accounts(context.Background(), &pb.AccountsRequest{Layer: lid.Uint32()}) + require.NoError(t, err) + require.Len(t, res.AccountWrapper, 2) + + // Get the list of addresses and compare them regardless of order + var addresses []string + for _, a := range res.AccountWrapper { + addresses = append(addresses, a.AccountId.Address) + } + require.Contains(t, addresses, globalTx.Principal.String()) + require.Contains(t, addresses, addr1.String()) + + _, err = c.Accounts(context.Background(), &pb.AccountsRequest{Layer: lid.Uint32() - 1}) + require.Error(t, err) + }) + + t.Run("networkID", func(t *testing.T) { + id := p2p.Peer("test") + netInfo.EXPECT().ID().Return(id) + netInfo.EXPECT().ListenAddresses().Return([]ma.Multiaddr{ + mustParseMultiaddr("/ip4/0.0.0.0/tcp/5000"), + mustParseMultiaddr("/ip4/0.0.0.0/udp/5001/quic-v1"), + }) + netInfo.EXPECT().KnownAddresses().Return([]ma.Multiaddr{ + mustParseMultiaddr("/ip4/10.36.0.221/tcp/5000"), + mustParseMultiaddr("/ip4/10.36.0.221/udp/5001/quic-v1"), + }) + netInfo.EXPECT().NATDeviceType().Return(network.NATDeviceTypeCone, network.NATDeviceTypeSymmetric) + netInfo.EXPECT().Reachability().Return(network.ReachabilityPrivate) + netInfo.EXPECT().DHTServerEnabled().Return(true) + peerInfo := peerinfomocks.NewMockPeerInfo(ctrl) + peerInfo.EXPECT().Protocols().Return([]protocol.ID{"foo"}) + peerInfo.EXPECT().EnsureProtoStats(protocol.ID("foo")). + DoAndReturn(func(protocol.ID) *peerinfo.DataStats { + var ds peerinfo.DataStats + ds.RecordReceived(6000) + ds.RecordSent(3000) + ds.Tick(1) + ds.Tick(2) + return &ds + }) + netInfo.EXPECT().PeerInfo().Return(peerInfo).AnyTimes() + + response, err := c.NetworkInfo(context.Background(), &emptypb.Empty{}) + require.NoError(t, err) + require.NotNil(t, response) + require.Equal(t, id.String(), response.Id) + require.Equal(t, []string{"/ip4/0.0.0.0/tcp/5000", "/ip4/0.0.0.0/udp/5001/quic-v1"}, + response.ListenAddresses) + require.Equal(t, []string{"/ip4/10.36.0.221/tcp/5000", "/ip4/10.36.0.221/udp/5001/quic-v1"}, + response.KnownAddresses) + require.Equal(t, pb.NetworkInfoResponse_Cone, response.NatTypeUdp) + require.Equal(t, pb.NetworkInfoResponse_Symmetric, response.NatTypeTcp) + require.Equal(t, pb.NetworkInfoResponse_Private, response.Reachability) + require.True(t, response.DhtServerEnabled) + require.Equal(t, map[string]*pb.DataStats{ + "foo": { + BytesSent: 3000, + BytesReceived: 6000, + SendRate: []uint64{300, 10}, + RecvRate: []uint64{600, 20}, + }, + }, response.Stats) + }) + + t.Run("ActiveSet", func(t *testing.T) { + epoch := types.EpochID(3) + activeSet := types.RandomActiveSet(11) + mOracle.EXPECT().ActiveSet(gomock.Any(), epoch).Return(activeSet, nil) + res, err := c.ActiveSet(context.Background(), &pb.ActiveSetRequest{ + Epoch: epoch.Uint32(), + }) + require.NoError(t, err) + require.Equal(t, len(activeSet), len(res.GetIds())) + + var ids []types.ATXID + for _, a := range res.GetIds() { + ids = append(ids, types.ATXID(types.BytesToHash(a.GetId()))) + } + require.ElementsMatch(t, activeSet, ids) + }) + t.Run("ProposalsStream", func(t *testing.T) { + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + stream, err := c.ProposalsStream(ctx, &emptypb.Empty{}) + require.NoError(t, err) + + _, err = stream.Header() + require.NoError(t, err) + events.ReportProposal(events.ProposalCreated, &types.Proposal{}) + events.ReportProposal(events.ProposalIncluded, &types.Proposal{}) + + msg, err := stream.Recv() + require.NoError(t, err) + require.Equal(t, pb.Proposal_Created, msg.Status) + + msg, err = stream.Recv() + require.NoError(t, err) + require.Equal(t, pb.Proposal_Included, msg.Status) + }) + + t.Run("ChangeLogLevel module debug", func(t *testing.T) { + _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ + Module: "test", + Level: "DEBUG", + }) + require.NoError(t, err) + + require.Equal(t, zapcore.DebugLevel, testLog.Level()) + }) + + t.Run("ChangeLogLevel module not found", func(t *testing.T) { + _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ + Module: "unknown-module", + Level: "DEBUG", + }) + require.Error(t, err) + + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, "cannot find logger unknown-module", s.Message()) + }) + + t.Run("ChangeLogLevel unknown level", func(t *testing.T) { + _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ + Module: "test", + Level: "unknown-level", + }) + require.Error(t, err) + + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, "parse level: unrecognized level: \"unknown-level\"", s.Message()) + }) + + t.Run("ChangeLogLevel '*' to debug", func(t *testing.T) { + _, err := c.ChangeLogLevel(context.Background(), &pb.ChangeLogLevelRequest{ + Module: "*", + Level: "DEBUG", + }) + require.NoError(t, err) + + require.Equal(t, zapcore.DebugLevel, testLog.Level()) + }) +} + +func TestEventsReceived(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + txService := NewTransactionService(statesql.InMemoryTest(t), nil, meshAPIMock, conStateAPI, nil, nil) + gsService := NewGlobalStateService(meshAPIMock, conStateAPI) + cfg, cleanup := launchServer(t, txService, gsService) + t.Cleanup(cleanup) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + conn1 := dialGrpc(t, cfg) + conn2 := dialGrpc(t, cfg) + + txClient := pb.NewTransactionServiceClient(conn1) + accountClient := pb.NewGlobalStateServiceClient(conn2) + + txReq := &pb.TransactionsStateStreamRequest{} + txReq.TransactionId = append(txReq.TransactionId, &pb.TransactionId{ + Id: globalTx.ID.Bytes(), + }) + + principalReq := &pb.AccountDataStreamRequest{ + Filter: &pb.AccountDataFilter{ + AccountId: &pb.AccountId{Address: addr1.String()}, + AccountDataFlags: uint32( + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT | + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_TRANSACTION_RECEIPT), + }, + } + + receiverReq := &pb.AccountDataStreamRequest{ + Filter: &pb.AccountDataFilter{ + AccountId: &pb.AccountId{Address: addr2.String()}, + AccountDataFlags: uint32( + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT | + pb.AccountDataFlag_ACCOUNT_DATA_FLAG_TRANSACTION_RECEIPT), + }, + } + + txStream, err := txClient.TransactionsStateStream(ctx, txReq) + require.NoError(t, err) + + principalStream, err := accountClient.AccountDataStream(ctx, principalReq) + require.NoError(t, err, "stream request returned unexpected error") + + receiverStream, err := accountClient.AccountDataStream(ctx, receiverReq) + require.NoError(t, err, "receiver stream") + + // Give the server-side time to subscribe to events + time.Sleep(time.Millisecond * 50) + + lg := zaptest.NewLogger(t) + db := statesql.InMemoryTest(t) + svm := vm.New(db, vm.WithLogger(lg)) + conState := txs.NewConservativeState(svm, db, txs.WithLogger(lg.Named("conState"))) + conState.AddToCache(context.Background(), globalTx, time.Now()) + + weight := new(big.Rat).SetFloat64(18.7) + require.NoError(t, err) + rewards := []types.CoinbaseReward{ + {Coinbase: addr2, Weight: types.RatNum{Num: weight.Num().Uint64(), Denom: weight.Denom().Uint64()}}, + } + svm.Apply(types.GetEffectiveGenesis(), []types.Transaction{*globalTx}, rewards) + + txRes, err := txStream.Recv() + require.NoError(t, err) + require.Nil(t, txRes.Transaction) + require.Equal(t, globalTx.ID.Bytes(), txRes.TransactionState.Id.Id) + require.Equal(t, pb.TransactionState_TRANSACTION_STATE_PROCESSED, txRes.TransactionState.State) + + acc1Res, err := principalStream.Recv() + require.NoError(t, err) + require.Equal( + t, + addr1.String(), + acc1Res.Datum.Datum.(*pb.AccountData_AccountWrapper).AccountWrapper.AccountId.Address, + ) + + receiverRes, err := receiverStream.Recv() + require.NoError(t, err) + require.Equal( + t, + addr2.String(), + receiverRes.Datum.Datum.(*pb.AccountData_AccountWrapper).AccountWrapper.AccountId.Address, + ) +} + +func TestTransactionsRewards(t *testing.T) { + req := require.New(t) + events.CloseEventReporter() + events.InitializeReporter() + t.Cleanup(events.CloseEventReporter) + + cfg, cleanup := launchServer(t, NewGlobalStateService(meshAPIMock, conStateAPI)) + t.Cleanup(cleanup) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + client := pb.NewGlobalStateServiceClient(dialGrpc(t, cfg)) + + address := wallet.Address(types.RandomNodeID().Bytes()) + weight := new(big.Rat).SetFloat64(18.7) + rewards := []types.CoinbaseReward{{Coinbase: address, Weight: types.RatNumFromBigRat(weight)}} + + t.Run("Get rewards from AccountDataStream", func(t *testing.T) { + t.Parallel() + request := &pb.AccountDataStreamRequest{ + Filter: &pb.AccountDataFilter{ + AccountId: &pb.AccountId{Address: address.String()}, + AccountDataFlags: uint32(pb.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD), + }, + } + stream, err := client.AccountDataStream(ctx, request) + req.NoError(err, "stream request returned unexpected error") + time.Sleep(50 * time.Millisecond) + + svm := vm.New(statesql.InMemoryTest(t), vm.WithLogger(zaptest.NewLogger(t))) + _, _, err = svm.Apply(types.LayerID(17), []types.Transaction{*globalTx}, rewards) + req.NoError(err) + + data, err := stream.Recv() + req.NoError(err) + req.IsType(&pb.AccountData_Reward{}, data.Datum.Datum) + reward := data.Datum.GetReward() + req.Equal(address.String(), reward.Coinbase.Address) + req.EqualValues(17, reward.Layer.GetNumber()) + }) + t.Run("Get rewards from GlobalStateStream", func(t *testing.T) { + t.Parallel() + request := &pb.GlobalStateStreamRequest{ + GlobalStateDataFlags: uint32(pb.GlobalStateDataFlag_GLOBAL_STATE_DATA_FLAG_REWARD), + } + stream, err := client.GlobalStateStream(ctx, request) + req.NoError(err, "stream request returned unexpected error") + time.Sleep(50 * time.Millisecond) + + svm := vm.New(statesql.InMemoryTest(t), vm.WithLogger(zaptest.NewLogger(t))) + _, _, err = svm.Apply(types.LayerID(17), []types.Transaction{*globalTx}, rewards) + req.NoError(err) + + data, err := stream.Recv() + req.NoError(err) + req.IsType(&pb.GlobalStateData_Reward{}, data.Datum.Datum) + reward := data.Datum.GetReward() + req.Equal(address.String(), reward.Coinbase.Address) + req.EqualValues(17, reward.Layer.GetNumber()) + }) +} + +func TestVMAccountUpdates(t *testing.T) { + events.CloseEventReporter() + events.InitializeReporter() + + // in memory database doesn't allow reads while writer locked db + db, err := statesql.Open("file:" + filepath.Join(t.TempDir(), "test.sql")) + require.NoError(t, err) + t.Cleanup(func() { db.Close() }) + svm := vm.New(db, vm.WithLogger(zaptest.NewLogger(t))) + cfg, cleanup := launchServer(t, NewGlobalStateService(nil, txs.NewConservativeState(svm, db))) + t.Cleanup(cleanup) + + keys := make([]*signing.EdSigner, 10) + accounts := make([]types.Account, len(keys)) + const initial = 100_000_000 + for i := range keys { + signer, err := signing.NewEdSigner() + require.NoError(t, err) + keys[i] = signer + accounts[i] = types.Account{ + Address: wallet.Address(signer.NodeID().Bytes()), + Balance: initial, + } + } + require.NoError(t, svm.ApplyGenesis(accounts)) + spawns := []types.Transaction{} + for _, key := range keys { + spawns = append(spawns, types.Transaction{ + RawTx: types.NewRawTx(wallet.SelfSpawn(key.PrivateKey(), 0)), + }) + } + lid := types.GetEffectiveGenesis().Add(1) + _, _, err = svm.Apply(lid, spawns, nil) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + client := pb.NewGlobalStateServiceClient(dialGrpc(t, cfg)) + eg, ctx := errgroup.WithContext(ctx) + states := make(chan *pb.AccountState, len(accounts)) + for _, account := range accounts { + stream, err := client.AccountDataStream(ctx, &pb.AccountDataStreamRequest{ + Filter: &pb.AccountDataFilter{ + AccountId: &pb.AccountId{Address: account.Address.String()}, + AccountDataFlags: uint32(pb.AccountDataFlag_ACCOUNT_DATA_FLAG_ACCOUNT), + }, + }) + require.NoError(t, err) + _, err = stream.Header() + require.NoError(t, err) + eg.Go(func() error { + response, err := stream.Recv() + if err != nil { + return err + } + states <- response.Datum.GetAccountWrapper().StateCurrent + return nil + }) + } + + spends := []types.Transaction{} + const amount = 100_000 + for _, key := range keys { + spends = append(spends, types.Transaction{ + RawTx: types.NewRawTx(wallet.Spend( + key.PrivateKey(), types.Address{1}, amount, 1, + )), + }) + } + _, _, err = svm.Apply(lid.Add(1), spends, nil) + require.NoError(t, err) + require.NoError(t, eg.Wait()) + close(states) + i := 0 + for state := range states { + i++ + require.Equal(t, 2, int(state.Counter)) + require.Less(t, int(state.Balance.Value), initial-amount) + } + require.Equal(t, len(accounts), i) +} + +func createAtxs(tb testing.TB, epoch types.EpochID, atxids []types.ATXID) []*types.ActivationTx { + all := make([]*types.ActivationTx, 0, len(atxids)) + for _, id := range atxids { + atx := &types.ActivationTx{ + PublishEpoch: epoch, + NumUnits: 1, + TickCount: 1, + SmesherID: types.RandomNodeID(), + } + atx.SetID(id) + atx.SetReceived(time.Now()) + all = append(all, atx) + } + return all +} + +func TestMeshService_EpochStream(t *testing.T) { + ctrl := gomock.NewController(t) + genTime := NewMockgenesisTimeAPI(ctrl) + db := statesql.InMemoryTest(t) + + cdb := datastore.NewCachedDB(db, zaptest.NewLogger(t)) + t.Cleanup(func() { assert.NoError(t, cdb.Close()) }) + srv := NewMeshService( + cdb, + meshAPIMock, + conStateAPI, + genTime, + layersPerEpoch, + types.Hash20{}, + layerDuration, + layerAvgSize, + txsPerProposal, + ) + cfg, cleanup := launchServer(t, srv) + t.Cleanup(cleanup) + + epoch := types.EpochID(3) + atxids := types.RandomActiveSet(100) + all := createAtxs(t, epoch, atxids) + var expected, got []types.ATXID + for i, vatx := range all { + require.NoError(t, atxs.Add(db, vatx, types.AtxBlob{})) + if i%2 == 0 { + require.NoError(t, identities.SetMalicious(db, vatx.SmesherID, []byte("bad"), time.Now())) + } else { + expected = append(expected, vatx.ID()) + } + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + conn := dialGrpc(t, cfg) + client := pb.NewMeshServiceClient(conn) + + stream, err := client.EpochStream(ctx, &pb.EpochStreamRequest{Epoch: epoch.Uint32()}) + require.NoError(t, err) + for { + resp, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + got = append(got, types.ATXID(types.BytesToHash(resp.GetId().GetId()))) + } + require.ElementsMatch(t, expected, got) +} diff --git a/api/grpcserver/interface.go b/api/grpcserver/v1/interface.go similarity index 96% rename from api/grpcserver/interface.go rename to api/grpcserver/v1/interface.go index bfa88836b9..b5b841525b 100644 --- a/api/grpcserver/interface.go +++ b/api/grpcserver/v1/interface.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" @@ -15,7 +15,7 @@ import ( "github.com/spacemeshos/go-spacemesh/system" ) -//go:generate mockgen -typed -package=grpcserver -destination=./mocks.go -source=./interface.go +//go:generate mockgen -typed -package=v1 -destination=./mocks.go -source=./interface.go // networkInfo interface. type networkInfo interface { diff --git a/api/grpcserver/mesh_service.go b/api/grpcserver/v1/mesh_service.go similarity index 99% rename from api/grpcserver/mesh_service.go rename to api/grpcserver/v1/mesh_service.go index e9ad22b00e..a703e9c676 100644 --- a/api/grpcserver/mesh_service.go +++ b/api/grpcserver/v1/mesh_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/mesh_service_test.go b/api/grpcserver/v1/mesh_service_test.go similarity index 99% rename from api/grpcserver/mesh_service_test.go rename to api/grpcserver/v1/mesh_service_test.go index 988750b7cc..c50c163cfe 100644 --- a/api/grpcserver/mesh_service_test.go +++ b/api/grpcserver/v1/mesh_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/mocks.go b/api/grpcserver/v1/mocks.go similarity index 99% rename from api/grpcserver/mocks.go rename to api/grpcserver/v1/mocks.go index 3867be40c6..234ea8d057 100644 --- a/api/grpcserver/mocks.go +++ b/api/grpcserver/v1/mocks.go @@ -3,11 +3,11 @@ // // Generated by this command: // -// mockgen -typed -package=grpcserver -destination=./mocks.go -source=./interface.go +// mockgen -typed -package=v1 -destination=./mocks.go -source=./interface.go // -// Package grpcserver is a generated GoMock package. -package grpcserver +// Package v1 is a generated GoMock package. +package v1 import ( context "context" diff --git a/api/grpcserver/node_service.go b/api/grpcserver/v1/node_service.go similarity index 99% rename from api/grpcserver/node_service.go rename to api/grpcserver/v1/node_service.go index f429cc9283..e14003cdca 100644 --- a/api/grpcserver/node_service.go +++ b/api/grpcserver/v1/node_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/node_service_test.go b/api/grpcserver/v1/node_service_test.go similarity index 97% rename from api/grpcserver/node_service_test.go rename to api/grpcserver/v1/node_service_test.go index d9b937868f..378e330feb 100644 --- a/api/grpcserver/node_service_test.go +++ b/api/grpcserver/v1/node_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" @@ -100,7 +100,7 @@ func TestNodeService(t *testing.T) { // During genesis all layers should be set to current layer layerLatest := types.LayerID(10) c.meshAPI.EXPECT().LatestLayer().Return(layerLatest) - layerCurrent := types.LayerID(layersPerEpoch) // end of first epoch + layerCurrent := types.LayerID(types.GetLayersPerEpoch()) // end of first epoch c.genTime.EXPECT().CurrentLayer().Return(layerCurrent) c.peerCounter.EXPECT().PeerCount().Return(0) c.syncer.EXPECT().IsSynced(gomock.Any()).Return(false) @@ -120,7 +120,7 @@ func TestNodeService(t *testing.T) { // Now do a mock check post-genesis layerLatest := types.LayerID(10) c.meshAPI.EXPECT().LatestLayer().Return(layerLatest) - layerCurrent = types.LayerID(12) + layerCurrent := types.LayerID(12) c.genTime.EXPECT().CurrentLayer().Return(layerCurrent) layerVerified := types.LayerID(8) c.meshAPI.EXPECT().LatestLayerInState().Return(layerVerified) diff --git a/api/grpcserver/post_client.go b/api/grpcserver/v1/post_client.go similarity index 99% rename from api/grpcserver/post_client.go rename to api/grpcserver/v1/post_client.go index 4810ad8c92..c19fc0c9f8 100644 --- a/api/grpcserver/post_client.go +++ b/api/grpcserver/v1/post_client.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "bytes" diff --git a/api/grpcserver/post_info_service.go b/api/grpcserver/v1/post_info_service.go similarity index 98% rename from api/grpcserver/post_info_service.go rename to api/grpcserver/v1/post_info_service.go index 284a459f90..41b99a85ff 100644 --- a/api/grpcserver/post_info_service.go +++ b/api/grpcserver/v1/post_info_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/post_info_service_test.go b/api/grpcserver/v1/post_info_service_test.go similarity index 98% rename from api/grpcserver/post_info_service_test.go rename to api/grpcserver/v1/post_info_service_test.go index 319e5b413b..12b4737e6a 100644 --- a/api/grpcserver/post_info_service_test.go +++ b/api/grpcserver/v1/post_info_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/post_service.go b/api/grpcserver/v1/post_service.go similarity index 99% rename from api/grpcserver/post_service.go rename to api/grpcserver/v1/post_service.go index 102e366fc2..d1591dc7e9 100644 --- a/api/grpcserver/post_service.go +++ b/api/grpcserver/v1/post_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/post_service_test.go b/api/grpcserver/v1/post_service_test.go similarity index 99% rename from api/grpcserver/post_service_test.go rename to api/grpcserver/v1/post_service_test.go index 02f1c9a29e..e8f7fc24b5 100644 --- a/api/grpcserver/post_service_test.go +++ b/api/grpcserver/v1/post_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" @@ -20,6 +20,7 @@ import ( "google.golang.org/grpc/status" "github.com/spacemeshos/go-spacemesh/activation" + "github.com/spacemeshos/go-spacemesh/api/grpcserver" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/signing" @@ -29,7 +30,7 @@ import ( func launchPostSupervisor( tb testing.TB, log *zap.Logger, - cfg Config, + cfg grpcserver.Config, serviceCfg activation.PostSupervisorConfig, postOpts activation.PostSetupOpts, ) (types.NodeID, func()) { @@ -74,7 +75,7 @@ func launchPostSupervisor( func launchPostSupervisorTLS( tb testing.TB, log *zap.Logger, - cfg Config, + cfg grpcserver.Config, serviceCfg activation.PostSupervisorConfig, postOpts activation.PostSetupOpts, ) (types.NodeID, func()) { diff --git a/api/grpcserver/smesher_service.go b/api/grpcserver/v1/smesher_service.go similarity index 99% rename from api/grpcserver/smesher_service.go rename to api/grpcserver/v1/smesher_service.go index 66381ab5db..e88c442553 100644 --- a/api/grpcserver/smesher_service.go +++ b/api/grpcserver/v1/smesher_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/smesher_service_test.go b/api/grpcserver/v1/smesher_service_test.go similarity index 87% rename from api/grpcserver/smesher_service_test.go rename to api/grpcserver/v1/smesher_service_test.go index c7b5cddfae..f600c8598f 100644 --- a/api/grpcserver/smesher_service_test.go +++ b/api/grpcserver/v1/smesher_service_test.go @@ -1,4 +1,4 @@ -package grpcserver_test +package v1_test import ( "context" @@ -15,7 +15,7 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/spacemeshos/go-spacemesh/activation" - "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/signing" ) @@ -23,9 +23,9 @@ import ( func TestPostConfig(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) - svc := grpcserver.NewSmesherService( + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -56,12 +56,12 @@ func TestPostConfig(t *testing.T) { func TestStartSmeshingPassesCorrectSmeshingOpts(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) sig, err := signing.NewEdSigner() require.NoError(t, err) cmdCfg := activation.DefaultTestPostServiceConfig(t) - svc := grpcserver.NewSmesherService( + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -104,11 +104,11 @@ func TestStartSmeshingPassesCorrectSmeshingOpts(t *testing.T) { func TestStartSmeshing_ErrorOnMissingPostServiceConfig(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) sig, err := signing.NewEdSigner() require.NoError(t, err) - svc := grpcserver.NewSmesherService( + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -145,9 +145,9 @@ func TestStartSmeshing_ErrorOnMissingPostServiceConfig(t *testing.T) { func TestStartSmeshing_ErrorOnMultiSmeshingSetup(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) - svc := grpcserver.NewSmesherService( + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -186,9 +186,9 @@ func TestStartSmeshing_ErrorOnMultiSmeshingSetup(t *testing.T) { func TestSmesherService_PostSetupProviders(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) - svc := grpcserver.NewSmesherService( + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -235,9 +235,9 @@ func TestSmesherService_PostSetupStatus(t *testing.T) { t.Run("completed", func(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) - svc := grpcserver.NewSmesherService( + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -261,9 +261,9 @@ func TestSmesherService_PostSetupStatus(t *testing.T) { t.Run("completed with last Opts", func(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) - svc := grpcserver.NewSmesherService( + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, @@ -300,9 +300,9 @@ func TestSmesherService_PostSetupStatus(t *testing.T) { t.Run("in progress", func(t *testing.T) { ctrl := gomock.NewController(t) smeshingProvider := activation.NewMockSmeshingProvider(ctrl) - postSupervisor := grpcserver.NewMockpostSupervisor(ctrl) - grpcPostService := grpcserver.NewMockgrpcPostService(ctrl) - svc := grpcserver.NewSmesherService( + postSupervisor := v1.NewMockpostSupervisor(ctrl) + grpcPostService := v1.NewMockgrpcPostService(ctrl) + svc := v1.NewSmesherService( smeshingProvider, postSupervisor, grpcPostService, diff --git a/api/grpcserver/transaction_service.go b/api/grpcserver/v1/transaction_service.go similarity index 99% rename from api/grpcserver/transaction_service.go rename to api/grpcserver/v1/transaction_service.go index a47a9fb151..c2ec621c95 100644 --- a/api/grpcserver/transaction_service.go +++ b/api/grpcserver/v1/transaction_service.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "bytes" diff --git a/api/grpcserver/transaction_service_test.go b/api/grpcserver/v1/transaction_service_test.go similarity index 99% rename from api/grpcserver/transaction_service_test.go rename to api/grpcserver/v1/transaction_service_test.go index f6a6786154..12bfcb3ce9 100644 --- a/api/grpcserver/transaction_service_test.go +++ b/api/grpcserver/v1/transaction_service_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "context" diff --git a/api/grpcserver/grpcserver_tls_test.go b/api/grpcserver/v1/v1_test.go similarity index 77% rename from api/grpcserver/grpcserver_tls_test.go rename to api/grpcserver/v1/v1_test.go index c4fc4be4db..e4bc683d09 100644 --- a/api/grpcserver/grpcserver_tls_test.go +++ b/api/grpcserver/v1/v1_test.go @@ -1,4 +1,4 @@ -package grpcserver +package v1 import ( "crypto/rand" @@ -14,9 +14,14 @@ import ( "testing" "time" + ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/spacemeshos/go-spacemesh/api/grpcserver" ) const ( @@ -28,6 +33,25 @@ const ( clientKeyName = "client.key" ) +func launchServer(tb testing.TB, services ...grpcserver.ServiceAPI) (grpcserver.Config, func()) { + cfg := grpcserver.DefaultTestConfig(tb) + grpcService, err := grpcserver.NewWithServices( + cfg.PublicListener, + zaptest.NewLogger(tb).Named("grpc"), + cfg, + services, + ) + require.NoError(tb, err) + + // start gRPC server + require.NoError(tb, grpcService.Start()) + + // update config with bound addresses + cfg.PublicListener = grpcService.BoundAddress + + return cfg, func() { assert.NoError(tb, grpcService.Close()) } +} + func genPrivateKey(tb testing.TB, path string) *rsa.PrivateKey { caKey, err := rsa.GenerateKey(rand.Reader, 4096) require.NoError(tb, err) @@ -142,18 +166,18 @@ func genKeys(tb testing.TB) string { return dir } -func launchTLSServer(tb testing.TB, certDir string, services ...ServiceAPI) (Config, func()) { +func launchTLSServer(tb testing.TB, certDir string, services ...grpcserver.ServiceAPI) (grpcserver.Config, func()) { caCert := filepath.Join(certDir, caCertName) serverCert := filepath.Join(certDir, serverCertName) serverKey := filepath.Join(certDir, serverKeyName) - cfg := DefaultTestConfig(tb) + cfg := grpcserver.DefaultTestConfig(tb) cfg.TLSListener = "127.0.0.1:0" cfg.TLSCACert = caCert cfg.TLSCert = serverCert cfg.TLSKey = serverKey - grpcService, err := NewTLS(zaptest.NewLogger(tb).Named("grpc.TLS"), cfg, services) + grpcService, err := grpcserver.NewTLS(zaptest.NewLogger(tb).Named("grpc.TLS"), cfg, services) require.NoError(tb, err) // start gRPC server @@ -164,3 +188,22 @@ func launchTLSServer(tb testing.TB, certDir string, services ...ServiceAPI) (Con return cfg, func() { assert.NoError(tb, grpcService.Close()) } } + +func dialGrpc(tb testing.TB, cfg grpcserver.Config) *grpc.ClientConn { + tb.Helper() + conn, err := grpc.NewClient( + cfg.PublicListener, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + require.NoError(tb, err) + tb.Cleanup(func() { require.NoError(tb, conn.Close()) }) + return conn +} + +func mustParseMultiaddr(s string) ma.Multiaddr { + maddr, err := ma.NewMultiaddr(s) + if err != nil { + panic("can't parse multiaddr: " + err.Error()) + } + return maddr +} diff --git a/api/grpcserver/v2alpha1/account.go b/api/grpcserver/v2alpha1/account.go index 68aab55698..5f91d25af9 100644 --- a/api/grpcserver/v2alpha1/account.go +++ b/api/grpcserver/v2alpha1/account.go @@ -15,10 +15,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/builder" ) -const ( - Account = "account_v2alpha1" -) - type accountConState interface { GetProjection(types.Address) (uint64, uint64) } diff --git a/api/grpcserver/v2alpha1/activation.go b/api/grpcserver/v2alpha1/activation.go index 6a455de255..e3782da448 100644 --- a/api/grpcserver/v2alpha1/activation.go +++ b/api/grpcserver/v2alpha1/activation.go @@ -24,11 +24,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/builder" ) -const ( - Activation = "activation_v2alpha1" - ActivationStream = "activation_stream_v2alpha1" -) - func NewActivationStreamService(db sql.Executor) *ActivationStreamService { return &ActivationStreamService{db: db} } diff --git a/api/grpcserver/v2alpha1/layer.go b/api/grpcserver/v2alpha1/layer.go index 5a143e0ba6..39470e50d1 100644 --- a/api/grpcserver/v2alpha1/layer.go +++ b/api/grpcserver/v2alpha1/layer.go @@ -20,11 +20,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/layers" ) -const ( - Layer = "layer_v2alpha1" - LayerStream = "layer_stream_v2alpha1" -) - func NewLayerStreamService(db sql.Executor) *LayerStreamService { return &LayerStreamService{db: db} } diff --git a/api/grpcserver/v2alpha1/malfeasance.go b/api/grpcserver/v2alpha1/malfeasance.go index 42502056cf..4df6f46e4b 100644 --- a/api/grpcserver/v2alpha1/malfeasance.go +++ b/api/grpcserver/v2alpha1/malfeasance.go @@ -26,11 +26,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/malfeasance" ) -const ( - Malfeasance = "malfeasance_v2alpha1" - MalfeasanceStream = "malfeasance_stream_v2alpha1" -) - func NewMalfeasanceService(db sql.StateDatabase, malHandler, legacyHandler malfeasanceInfo) *MalfeasanceService { return &MalfeasanceService{ db: db, diff --git a/api/grpcserver/v2alpha1/network.go b/api/grpcserver/v2alpha1/network.go index f9454394cc..a971804964 100644 --- a/api/grpcserver/v2alpha1/network.go +++ b/api/grpcserver/v2alpha1/network.go @@ -14,10 +14,6 @@ import ( "github.com/spacemeshos/go-spacemesh/config" ) -const ( - Network = "network_v2alpha1" -) - func NewNetworkService(genesisTime time.Time, config *config.Config) *NetworkService { return &NetworkService{ genesisTime: genesisTime, diff --git a/api/grpcserver/v2alpha1/node.go b/api/grpcserver/v2alpha1/node.go index 5ce526239f..a39edca931 100644 --- a/api/grpcserver/v2alpha1/node.go +++ b/api/grpcserver/v2alpha1/node.go @@ -11,10 +11,6 @@ import ( "github.com/spacemeshos/go-spacemesh/timesync" ) -const ( - Node = "node_v2alpha1" -) - // nodePeerCounter is an api to get current peer count. type nodePeerCounter interface { PeerCount() uint64 diff --git a/api/grpcserver/v2alpha1/reward.go b/api/grpcserver/v2alpha1/reward.go index 56e90d6753..27fca288cc 100644 --- a/api/grpcserver/v2alpha1/reward.go +++ b/api/grpcserver/v2alpha1/reward.go @@ -21,11 +21,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/rewards" ) -const ( - Reward = "reward_v2alpha1" - RewardStream = "reward_stream_v2alpha1" -) - func NewRewardStreamService(db sql.Executor) *RewardStreamService { return &RewardStreamService{db: db} } diff --git a/api/grpcserver/v2alpha1/transaction.go b/api/grpcserver/v2alpha1/transaction.go index d3ec47b041..9654cf6a16 100644 --- a/api/grpcserver/v2alpha1/transaction.go +++ b/api/grpcserver/v2alpha1/transaction.go @@ -31,11 +31,6 @@ import ( "github.com/spacemeshos/go-spacemesh/system" ) -const ( - Transaction = "transaction_v2alpha1" - TransactionStream = "transaction_stream_v2alpha1" -) - // transactionConState is an API to validate transaction. type transactionConState interface { Validation(raw types.RawTx) system.ValidationRequest diff --git a/api/grpcserver/v2beta1/account.go b/api/grpcserver/v2beta1/account.go index 3f00b1beb7..d9b38f035c 100644 --- a/api/grpcserver/v2beta1/account.go +++ b/api/grpcserver/v2beta1/account.go @@ -15,10 +15,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/builder" ) -const ( - Account = "account_v2beta1" -) - type accountConState interface { GetProjection(types.Address) (uint64, uint64) } diff --git a/api/grpcserver/v2beta1/activation.go b/api/grpcserver/v2beta1/activation.go index 54b44281dc..ea8d08b360 100644 --- a/api/grpcserver/v2beta1/activation.go +++ b/api/grpcserver/v2beta1/activation.go @@ -24,11 +24,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/builder" ) -const ( - Activation = "activation_v2beta1" - ActivationStream = "activation_stream_v2beta1" -) - func NewActivationStreamService(db sql.Executor) *ActivationStreamService { return &ActivationStreamService{db: db} } diff --git a/api/grpcserver/v2beta1/layer.go b/api/grpcserver/v2beta1/layer.go index 1af69d9b0c..62245152a2 100644 --- a/api/grpcserver/v2beta1/layer.go +++ b/api/grpcserver/v2beta1/layer.go @@ -20,11 +20,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/layers" ) -const ( - Layer = "layer_v2beta1" - LayerStream = "layer_stream_v2beta1" -) - func NewLayerStreamService(db sql.Executor) *LayerStreamService { return &LayerStreamService{db: db} } diff --git a/api/grpcserver/v2beta1/malfeasance.go b/api/grpcserver/v2beta1/malfeasance.go index 3a1150dd16..91df366dae 100644 --- a/api/grpcserver/v2beta1/malfeasance.go +++ b/api/grpcserver/v2beta1/malfeasance.go @@ -26,11 +26,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/malfeasance" ) -const ( - Malfeasance = "malfeasance_v2beta1" - MalfeasanceStream = "malfeasance_stream_v2beta1" -) - func NewMalfeasanceService(db sql.StateDatabase, malHandler, legacyHandler malfeasanceInfo) *MalfeasanceService { return &MalfeasanceService{ db: db, diff --git a/api/grpcserver/v2beta1/network.go b/api/grpcserver/v2beta1/network.go index 66821c74dc..01aed6615b 100644 --- a/api/grpcserver/v2beta1/network.go +++ b/api/grpcserver/v2beta1/network.go @@ -14,10 +14,6 @@ import ( "github.com/spacemeshos/go-spacemesh/config" ) -const ( - Network = "network_v2beta1" -) - func NewNetworkService(genesisTime time.Time, config *config.Config) *NetworkService { return &NetworkService{ genesisTime: genesisTime, diff --git a/api/grpcserver/v2beta1/node.go b/api/grpcserver/v2beta1/node.go index b69a21a3ba..30c5a28133 100644 --- a/api/grpcserver/v2beta1/node.go +++ b/api/grpcserver/v2beta1/node.go @@ -11,10 +11,6 @@ import ( "github.com/spacemeshos/go-spacemesh/timesync" ) -const ( - Node = "node_v2beta1" -) - // nodePeerCounter is an api to get current peer count. type nodePeerCounter interface { PeerCount() uint64 diff --git a/api/grpcserver/v2beta1/reward.go b/api/grpcserver/v2beta1/reward.go index e91686e7a8..0cfd5cf5b7 100644 --- a/api/grpcserver/v2beta1/reward.go +++ b/api/grpcserver/v2beta1/reward.go @@ -21,11 +21,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/rewards" ) -const ( - Reward = "reward_v2beta1" - RewardStream = "reward_stream_v2beta1" -) - func NewRewardStreamService(db sql.Executor) *RewardStreamService { return &RewardStreamService{db: db} } diff --git a/api/grpcserver/v2beta1/transaction.go b/api/grpcserver/v2beta1/transaction.go index cbe229e2fd..490f4d1cb0 100644 --- a/api/grpcserver/v2beta1/transaction.go +++ b/api/grpcserver/v2beta1/transaction.go @@ -31,11 +31,6 @@ import ( "github.com/spacemeshos/go-spacemesh/system" ) -const ( - Transaction = "transaction_v2beta1" - TransactionStream = "transaction_stream_v2beta1" -) - // transactionConState is an API to validate transaction. type transactionConState interface { Validation(raw types.RawTx) system.ValidationRequest diff --git a/cmd/bootstrapper/generator_test.go b/cmd/bootstrapper/generator_test.go index 19ad507fec..b7fd4a6560 100644 --- a/cmd/bootstrapper/generator_test.go +++ b/cmd/bootstrapper/generator_test.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap/zaptest" "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/bootstrap" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/datastore" @@ -61,8 +62,11 @@ func launchServer(tb testing.TB, db sql.StateDatabase) (grpcserver.Config, func( []string{}, false) cdb := datastore.NewCachedDB(db, zaptest.NewLogger(tb)) tb.Cleanup(func() { assert.NoError(tb, cdb.Close()) }) - s := grpcserver.NewMeshService(cdb, grpcserver.NewMockmeshAPI(gomock.NewController(tb)), nil, nil, - 0, types.Hash20{}, 0, 0, 0) + s := v1.NewMeshService( + cdb, + v1.NewMockmeshAPI(gomock.NewController(tb)), + nil, nil, 0, types.Hash20{}, 0, 0, 0, + ) pb.RegisterMeshServiceServer(grpcService.GrpcServer, s) // start gRPC and json servers diff --git a/node/node.go b/node/node.go index 50ca677628..6b3aaa16f8 100644 --- a/node/node.go +++ b/node/node.go @@ -38,6 +38,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/api/grpcserver/v2alpha1" "github.com/spacemeshos/go-spacemesh/api/grpcserver/v2beta1" "github.com/spacemeshos/go-spacemesh/atxsdata" @@ -1103,7 +1104,7 @@ func (app *App) initServices(ctx context.Context) error { nipostBuilder, err := activation.NewNIPostBuilder( app.localDB, - grpcPostService.(*grpcserver.PostService), + grpcPostService.(*v1.PostService), nipostLogger, app.Config.POET, app.clock, @@ -1498,15 +1499,15 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv switch svc { case grpcserver.Debug: - service := grpcserver.NewDebugService(app.db, app.conState, app.host, app.hOracle, app.loggers) + service := v1.NewDebugService(app.db, app.conState, app.host, app.hOracle, app.loggers) app.grpcServices[svc] = service return service, nil case grpcserver.GlobalState: - service := grpcserver.NewGlobalStateService(app.mesh, app.conState) + service := v1.NewGlobalStateService(app.mesh, app.conState) app.grpcServices[svc] = service return service, nil case grpcserver.Mesh: - service := grpcserver.NewMeshService( + service := v1.NewMeshService( app.cachedDB, app.mesh, app.conState, @@ -1520,7 +1521,7 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv app.grpcServices[svc] = service return service, nil case grpcserver.Node: - service := grpcserver.NewNodeService( + service := v1.NewNodeService( app.host, app.mesh, app.clock, @@ -1531,7 +1532,7 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv app.grpcServices[svc] = service return service, nil case grpcserver.Admin: - service := grpcserver.NewAdminService(app.db, app.Config.DataDir(), app.host) + service := v1.NewAdminService(app.db, app.Config.DataDir(), app.host) app.grpcServices[svc] = service return service, nil case grpcserver.Smesher: @@ -1544,10 +1545,10 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv if err != nil { return nil, err } - service := grpcserver.NewSmesherService( + service := v1.NewSmesherService( app.atxBuilder, app.postSupervisor, - postService.(*grpcserver.PostService), + postService.(*v1.PostService), app.Config.API.SmesherStreamInterval, app.Config.SMESHING.Opts, sig, @@ -1555,7 +1556,7 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv app.grpcServices[svc] = service return service, nil case grpcserver.Post: - service := grpcserver.NewPostService(app.addLogger(PostServiceLogger, lg).Zap()) + service := v1.NewPostService(app.addLogger(PostServiceLogger, lg).Zap()) isCoinbaseSet := app.Config.SMESHING.CoinbaseAccount != "" if !isCoinbaseSet { lg.Warning("coinbase account is not set, connections from remote post services will be rejected") @@ -1564,11 +1565,11 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv app.grpcServices[svc] = service return service, nil case grpcserver.PostInfo: - service := grpcserver.NewPostInfoService(app.atxBuilder) + service := v1.NewPostInfoService(app.atxBuilder) app.grpcServices[svc] = service return service, nil case grpcserver.Transaction: - service := grpcserver.NewTransactionService( + service := v1.NewTransactionService( app.db, app.host, app.mesh, @@ -1579,117 +1580,117 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv app.grpcServices[svc] = service return service, nil case grpcserver.Activation: - service := grpcserver.NewActivationService(app.cachedDB, types.ATXID(app.Config.Genesis.GoldenATX())) + service := v1.NewActivationService(app.cachedDB, types.ATXID(app.Config.Genesis.GoldenATX())) app.grpcServices[svc] = service return service, nil - case v2alpha1.Activation: + case grpcserver.ActivationV2Alpha1: service := v2alpha1.NewActivationService(app.apiDB, types.ATXID(app.Config.Genesis.GoldenATX())) app.grpcServices[svc] = service return service, nil - case v2alpha1.ActivationStream: + case grpcserver.ActivationStreamV2Alpha1: service := v2alpha1.NewActivationStreamService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2alpha1.Reward: + case grpcserver.RewardV2Alpha1: service := v2alpha1.NewRewardService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2alpha1.RewardStream: + case grpcserver.RewardStreamV2Alpha1: service := v2alpha1.NewRewardStreamService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2alpha1.Malfeasance: + case grpcserver.MalfeasanceV2Alpha1: service := v2alpha1.NewMalfeasanceService(app.apiDB, app.malfeasance2Handler, app.malfeasanceHandler) app.grpcServices[svc] = service return service, nil - case v2alpha1.MalfeasanceStream: + case grpcserver.MalfeasanceStreamV2Alpha1: service := v2alpha1.NewMalfeasanceStreamService(app.apiDB, app.malfeasance2Handler, app.malfeasanceHandler) app.grpcServices[svc] = service return service, nil - case v2alpha1.Network: + case grpcserver.NetworkV2Alpha1: service := v2alpha1.NewNetworkService( app.clock.GenesisTime(), app.Config, ) app.grpcServices[svc] = service return service, nil - case v2alpha1.Node: + case grpcserver.NodeV2Alpha1: service := v2alpha1.NewNodeService(app.host, app.mesh, app.clock, app.syncer) app.grpcServices[svc] = service return service, nil - case v2alpha1.Layer: + case grpcserver.LayerV2Alpha1: service := v2alpha1.NewLayerService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2alpha1.LayerStream: + case grpcserver.LayerStreamV2Alpha1: service := v2alpha1.NewLayerStreamService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2alpha1.Transaction: + case grpcserver.TransactionV2Alpha1: service := v2alpha1.NewTransactionService(app.apiDB, app.conState, app.syncer, app.txHandler, app.host) app.grpcServices[svc] = service return service, nil - case v2alpha1.TransactionStream: + case grpcserver.TransactionStreamV2Alpha1: service := v2alpha1.NewTransactionStreamService() app.grpcServices[svc] = service return service, nil - case v2alpha1.Account: + case grpcserver.AccountV2Alpha1: service := v2alpha1.NewAccountService(app.apiDB, app.conState) app.grpcServices[svc] = service return service, nil // v2beta1 - case v2beta1.Activation: + case grpcserver.ActivationV2Beta1: service := v2beta1.NewActivationService(app.apiDB, types.ATXID(app.Config.Genesis.GoldenATX())) app.grpcServices[svc] = service return service, nil - case v2beta1.ActivationStream: + case grpcserver.ActivationStreamV2Beta1: service := v2beta1.NewActivationStreamService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2beta1.Reward: + case grpcserver.RewardV2Beta1: service := v2beta1.NewRewardService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2beta1.RewardStream: + case grpcserver.RewardStreamV2Beta1: service := v2beta1.NewRewardStreamService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2beta1.Malfeasance: + case grpcserver.MalfeasanceV2Beta1: service := v2beta1.NewMalfeasanceService(app.apiDB, app.malfeasance2Handler, app.malfeasanceHandler) app.grpcServices[svc] = service return service, nil - case v2beta1.MalfeasanceStream: + case grpcserver.MalfeasanceStreamV2Beta1: service := v2beta1.NewMalfeasanceStreamService(app.apiDB, app.malfeasance2Handler, app.malfeasanceHandler) app.grpcServices[svc] = service return service, nil - case v2beta1.Network: + case grpcserver.NetworkV2Beta1: service := v2beta1.NewNetworkService( app.clock.GenesisTime(), app.Config, ) app.grpcServices[svc] = service return service, nil - case v2beta1.Node: + case grpcserver.NodeV2Beta1: service := v2beta1.NewNodeService(app.host, app.mesh, app.clock, app.syncer) app.grpcServices[svc] = service return service, nil - case v2beta1.Layer: + case grpcserver.LayerV2Beta1: service := v2beta1.NewLayerService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2beta1.LayerStream: + case grpcserver.LayerStreamV2Beta1: service := v2beta1.NewLayerStreamService(app.apiDB) app.grpcServices[svc] = service return service, nil - case v2beta1.Transaction: + case grpcserver.TransactionV2Beta1: service := v2beta1.NewTransactionService(app.apiDB, app.conState, app.syncer, app.txHandler, app.host) app.grpcServices[svc] = service return service, nil - case v2beta1.TransactionStream: + case grpcserver.TransactionStreamV2Beta1: service := v2beta1.NewTransactionStreamService() app.grpcServices[svc] = service return service, nil - case v2beta1.Account: + case grpcserver.AccountV2Beta1: service := v2beta1.NewAccountService(app.apiDB, app.conState) app.grpcServices[svc] = service return service, nil @@ -1854,7 +1855,7 @@ func (app *App) startAPIServices(ctx context.Context) error { if err != nil { return err } - svc.(*grpcserver.SmesherService).SetPostServiceConfig(app.Config.POSTService) + svc.(*v1.SmesherService).SetPostServiceConfig(app.Config.POSTService) if app.Config.SMESHING.Start { if app.Config.SMESHING.CoinbaseAccount == "" { return errors.New("smeshing enabled but no coinbase account provided") diff --git a/systest/tests/distributed_post_verification_test.go b/systest/tests/distributed_post_verification_test.go index f396307922..2c88334675 100644 --- a/systest/tests/distributed_post_verification_test.go +++ b/systest/tests/distributed_post_verification_test.go @@ -27,6 +27,7 @@ import ( "github.com/spacemeshos/go-spacemesh/activation" "github.com/spacemeshos/go-spacemesh/activation/wire" "github.com/spacemeshos/go-spacemesh/api/grpcserver" + v1 "github.com/spacemeshos/go-spacemesh/api/grpcserver/v1" "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/codec" "github.com/spacemeshos/go-spacemesh/common/types" @@ -208,9 +209,9 @@ func testPostMalfeasance( defer postSupervisor.Stop(false) // 2. create ATX with invalid POST labels - grpcPostService := grpcserver.NewPostService( + grpcPostService := v1.NewPostService( logger.Named("grpc-post-service"), - grpcserver.PostServiceQueryInterval(500*time.Millisecond), + v1.PostServiceQueryInterval(500*time.Millisecond), ) grpcPostService.AllowConnections(true)