diff --git a/api/admin/service.go b/api/admin/service.go index cf57a28264e..cc352b3deb2 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -15,15 +15,14 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" - "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" ) @@ -40,14 +39,14 @@ var ( ) type Config struct { - Log logging.Logger - ProfileDir string - LogFactory logging.Factory - NodeConfig interface{} - ChainManager chains.Manager - HTTPServer server.PathAdderWithReadLock - VMRegistry registry.VMRegistry - VMManager vms.Manager + Log logging.Logger + ProfileDir string + LogFactory logging.Factory + NodeConfig interface{} + Aliaser ids.Aliaser + HTTPServer server.PathAdderWithReadLock + VMRegistry registry.VMRegistry + VMManager rpcchainvm.Manager } // Admin is the API service for node admin management @@ -165,7 +164,7 @@ func (a *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.EmptyRe if len(args.Alias) > maxAliasLength { return errAliasTooLong } - chainID, err := a.ChainManager.Lookup(args.Chain) + chainID, err := a.Aliaser.Lookup(args.Chain) if err != nil { return err } @@ -173,7 +172,7 @@ func (a *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.EmptyRe a.lock.Lock() defer a.lock.Unlock() - if err := a.ChainManager.Alias(chainID, args.Alias); err != nil { + if err := a.Aliaser.Alias(chainID, args.Alias); err != nil { return err } @@ -205,7 +204,7 @@ func (a *Admin) GetChainAliases(_ *http.Request, args *GetChainAliasesArgs, repl return err } - reply.Aliases, err = a.ChainManager.Aliases(id) + reply.Aliases, err = a.Aliaser.Aliases(id) return err } diff --git a/api/admin/service_test.go b/api/admin/service_test.go index ea159c655c6..3ef798687b9 100644 --- a/api/admin/service_test.go +++ b/api/admin/service_test.go @@ -12,15 +12,15 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" ) type loadVMsTest struct { admin *Admin ctrl *gomock.Controller - mockVMManager *vms.MockManager + mockVMManager *rpcchainvm.MockManager mockVMRegistry *registry.MockVMRegistry } @@ -28,7 +28,7 @@ func initLoadVMsTest(t *testing.T) *loadVMsTest { ctrl := gomock.NewController(t) mockVMRegistry := registry.NewMockVMRegistry(ctrl) - mockVMManager := vms.NewMockManager(ctrl) + mockVMManager := rpcchainvm.NewMockManager(ctrl) return &loadVMsTest{ admin: &Admin{Config: Config{ diff --git a/api/info/service.go b/api/info/service.go index 9bdf245569a..1a1dfd9623d 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -12,10 +12,10 @@ import ( "go.uber.org/zap" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/peer" + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" @@ -24,8 +24,8 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -36,13 +36,14 @@ var errNoChainProvided = errors.New("argument 'chain' not given") // Info is the API service for unprivileged info on a node type Info struct { Parameters - log logging.Logger - validators validators.Manager - myIP ips.DynamicIPPort - networking network.Network - chainManager chains.Manager - vmManager vms.Manager - benchlist benchlist.Manager + log logging.Logger + validators validators.Manager + myIP ips.DynamicIPPort + networking network.Network + aliaser ids.Aliaser + platformVM *platformvm.VM + vmManager rpcchainvm.Manager + benchlist benchlist.Manager } type Parameters struct { @@ -59,15 +60,16 @@ type Parameters struct { AddPrimaryNetworkDelegatorFee uint64 AddSubnetValidatorFee uint64 AddSubnetDelegatorFee uint64 - VMManager vms.Manager + VMManager rpcchainvm.Manager } func NewService( parameters Parameters, log logging.Logger, validators validators.Manager, - chainManager chains.Manager, - vmManager vms.Manager, + aliaser ids.Aliaser, + platformVM *platformvm.VM, + vmManager rpcchainvm.Manager, myIP ips.DynamicIPPort, network network.Network, benchlist benchlist.Manager, @@ -78,14 +80,15 @@ func NewService( server.RegisterCodec(codec, "application/json;charset=UTF-8") return server, server.RegisterService( &Info{ - Parameters: parameters, - log: log, - validators: validators, - chainManager: chainManager, - vmManager: vmManager, - myIP: myIP, - networking: network, - benchlist: benchlist, + Parameters: parameters, + log: log, + validators: validators, + aliaser: aliaser, + platformVM: platformVM, + vmManager: vmManager, + myIP: myIP, + networking: network, + benchlist: benchlist, }, "info", ) @@ -203,7 +206,7 @@ func (i *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply zap.String("method", "getBlockchainID"), ) - bID, err := i.chainManager.Lookup(args.Alias) + bID, err := i.aliaser.Lookup(args.Alias) reply.BlockchainID = bID return err } @@ -240,7 +243,7 @@ func (i *Info) Peers(_ *http.Request, args *PeersArgs, reply *PeersReply) error benchedIDs := i.benchlist.GetBenched(peer.ID) benchedAliases := make([]string, len(benchedIDs)) for idx, id := range benchedIDs { - alias, err := i.chainManager.PrimaryAlias(id) + alias, err := i.aliaser.PrimaryAlias(id) if err != nil { return fmt.Errorf("failed to get primary alias for chain ID %s: %w", id, err) } @@ -282,11 +285,11 @@ func (i *Info) IsBootstrapped(_ *http.Request, args *IsBootstrappedArgs, reply * if args.Chain == "" { return errNoChainProvided } - chainID, err := i.chainManager.Lookup(args.Chain) + chainID, err := i.aliaser.Lookup(args.Chain) if err != nil { return fmt.Errorf("there is no chain with alias/ID '%s'", args.Chain) } - reply.IsBootstrapped = i.chainManager.IsBootstrapped(chainID) + reply.IsBootstrapped = i.platformVM.IsBootstrapped(chainID) return nil } diff --git a/api/info/service_test.go b/api/info/service_test.go index b91f87354d1..651f4b9e39a 100644 --- a/api/info/service_test.go +++ b/api/info/service_test.go @@ -12,8 +12,8 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" ) var errTest = errors.New("non-nil error") @@ -21,12 +21,12 @@ var errTest = errors.New("non-nil error") type getVMsTest struct { info *Info ctrl *gomock.Controller - mockVMManager *vms.MockManager + mockVMManager *rpcchainvm.MockManager } func initGetVMsTest(t *testing.T) *getVMsTest { ctrl := gomock.NewController(t) - mockVMManager := vms.NewMockManager(ctrl) + mockVMManager := rpcchainvm.NewMockManager(ctrl) return &getVMsTest{ info: &Info{ Parameters: Parameters{ diff --git a/api/ipcs/service.go b/api/ipcs/service.go index efe6f2e7280..db8d392dbaf 100644 --- a/api/ipcs/service.go +++ b/api/ipcs/service.go @@ -12,7 +12,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs" "github.com/ava-labs/avalanchego/utils/json" @@ -20,22 +19,22 @@ import ( ) type Service struct { - log logging.Logger - chainManager chains.Manager - lock sync.RWMutex - ipcs *ipcs.ChainIPCs + log logging.Logger + aliaser ids.Aliaser + lock sync.RWMutex + ipcs *ipcs.ChainIPCs } -func NewService(log logging.Logger, chainManager chains.Manager, ipcs *ipcs.ChainIPCs) (http.Handler, error) { +func NewService(log logging.Logger, aliaser ids.Aliaser, ipcs *ipcs.ChainIPCs) (http.Handler, error) { server := rpc.NewServer() codec := json.NewCodec() server.RegisterCodec(codec, "application/json") server.RegisterCodec(codec, "application/json;charset=UTF-8") return server, server.RegisterService( &Service{ - log: log, - chainManager: chainManager, - ipcs: ipcs, + log: log, + aliaser: aliaser, + ipcs: ipcs, }, "ipcs", ) @@ -59,7 +58,7 @@ func (s *Service) PublishBlockchain(_ *http.Request, args *PublishBlockchainArgs logging.UserString("blockchainID", args.BlockchainID), ) - chainID, err := s.chainManager.Lookup(args.BlockchainID) + chainID, err := s.aliaser.Lookup(args.BlockchainID) if err != nil { s.log.Error("chain lookup failed", logging.UserString("blockchainID", args.BlockchainID), @@ -98,7 +97,7 @@ func (s *Service) UnpublishBlockchain(_ *http.Request, args *UnpublishBlockchain logging.UserString("blockchainID", args.BlockchainID), ) - chainID, err := s.chainManager.Lookup(args.BlockchainID) + chainID, err := s.aliaser.Lookup(args.BlockchainID) if err != nil { s.log.Error("chain lookup failed", logging.UserString("blockchainID", args.BlockchainID), diff --git a/chains/test_manager.go b/chains/test_manager.go deleted file mode 100644 index f7b98b29b58..00000000000 --- a/chains/test_manager.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package chains - -import "github.com/ava-labs/avalanchego/ids" - -// TestManager implements Manager but does nothing. Always returns nil error. -// To be used only in tests -var TestManager Manager = testManager{} - -type testManager struct{} - -func (testManager) QueueChainCreation(ChainParameters) {} - -func (testManager) ForceCreateChain(ChainParameters) {} - -func (testManager) AddRegistrant(Registrant) {} - -func (testManager) Aliases(ids.ID) ([]string, error) { - return nil, nil -} - -func (testManager) PrimaryAlias(ids.ID) (string, error) { - return "", nil -} - -func (testManager) PrimaryAliasOrDefault(ids.ID) string { - return "" -} - -func (testManager) Alias(ids.ID, string) error { - return nil -} - -func (testManager) RemoveAliases(ids.ID) {} - -func (testManager) Shutdown() {} - -func (testManager) StartChainCreator(ChainParameters) error { - return nil -} - -func (testManager) SubnetID(ids.ID) (ids.ID, error) { - return ids.ID{}, nil -} - -func (testManager) IsBootstrapped(ids.ID) bool { - return false -} - -func (testManager) Lookup(s string) (ids.ID, error) { - return ids.FromString(s) -} - -func (testManager) LookupVM(s string) (ids.ID, error) { - return ids.FromString(s) -} diff --git a/config/config.go b/config/config.go index 09cc32d286b..5d4d1d9028d 100644 --- a/config/config.go +++ b/config/config.go @@ -19,7 +19,6 @@ import ( "github.com/spf13/viper" "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs" @@ -46,6 +45,7 @@ import ( "github.com/ava-labs/avalanchego/utils/storage" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/proposervm" ) @@ -1015,35 +1015,35 @@ func getPathFromDirKey(v *viper.Viper, configKey string) (string, error) { return "", nil } -func getChainConfigsFromFlag(v *viper.Viper) (map[string]chains.ChainConfig, error) { +func getChainConfigsFromFlag(v *viper.Viper) (map[string]platformvm.ChainConfig, error) { chainConfigContentB64 := v.GetString(ChainConfigContentKey) chainConfigContent, err := base64.StdEncoding.DecodeString(chainConfigContentB64) if err != nil { return nil, fmt.Errorf("unable to decode base64 content: %w", err) } - chainConfigs := make(map[string]chains.ChainConfig) + chainConfigs := make(map[string]platformvm.ChainConfig) if err := json.Unmarshal(chainConfigContent, &chainConfigs); err != nil { return nil, fmt.Errorf("could not unmarshal JSON: %w", err) } return chainConfigs, nil } -func getChainConfigsFromDir(v *viper.Viper) (map[string]chains.ChainConfig, error) { +func getChainConfigsFromDir(v *viper.Viper) (map[string]platformvm.ChainConfig, error) { chainConfigPath, err := getPathFromDirKey(v, ChainConfigDirKey) if err != nil { return nil, err } if len(chainConfigPath) == 0 { - return make(map[string]chains.ChainConfig), nil + return make(map[string]platformvm.ChainConfig), nil } return readChainConfigPath(chainConfigPath) } // getChainConfigs reads & puts chainConfigs to node config -func getChainConfigs(v *viper.Viper) (map[string]chains.ChainConfig, error) { +func getChainConfigs(v *viper.Viper) (map[string]platformvm.ChainConfig, error) { if v.IsSet(ChainConfigContentKey) { return getChainConfigsFromFlag(v) } @@ -1052,12 +1052,12 @@ func getChainConfigs(v *viper.Viper) (map[string]chains.ChainConfig, error) { // ReadsChainConfigs reads chain config files from static directories and returns map with contents, // if successful. -func readChainConfigPath(chainConfigPath string) (map[string]chains.ChainConfig, error) { +func readChainConfigPath(chainConfigPath string) (map[string]platformvm.ChainConfig, error) { chainDirs, err := filepath.Glob(filepath.Join(chainConfigPath, "*")) if err != nil { return nil, err } - chainConfigMap := make(map[string]chains.ChainConfig) + chainConfigMap := make(map[string]platformvm.ChainConfig) for _, chainDir := range chainDirs { dirInfo, err := os.Stat(chainDir) if err != nil { @@ -1080,7 +1080,7 @@ func readChainConfigPath(chainConfigPath string) (map[string]chains.ChainConfig, return chainConfigMap, err } - chainConfigMap[dirInfo.Name()] = chains.ChainConfig{ + chainConfigMap[dirInfo.Name()] = platformvm.ChainConfig{ Config: configData, Upgrade: upgradeData, } diff --git a/config/config_test.go b/config/config_test.go index 4c64e448ac1..5593b77c9f5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -17,35 +17,35 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/vms/platformvm" ) func TestGetChainConfigsFromFiles(t *testing.T) { tests := map[string]struct { configs map[string]string upgrades map[string]string - expected map[string]chains.ChainConfig + expected map[string]platformvm.ChainConfig }{ "no chain configs": { configs: map[string]string{}, upgrades: map[string]string{}, - expected: map[string]chains.ChainConfig{}, + expected: map[string]platformvm.ChainConfig{}, }, "valid chain-id": { configs: map[string]string{"yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp": "hello", "2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm": "world"}, upgrades: map[string]string{"yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp": "helloUpgrades"}, - expected: func() map[string]chains.ChainConfig { - m := map[string]chains.ChainConfig{} + expected: func() map[string]platformvm.ChainConfig { + m := map[string]platformvm.ChainConfig{} id1, err := ids.FromString("yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp") require.NoError(t, err) - m[id1.String()] = chains.ChainConfig{Config: []byte("hello"), Upgrade: []byte("helloUpgrades")} + m[id1.String()] = platformvm.ChainConfig{Config: []byte("hello"), Upgrade: []byte("helloUpgrades")} id2, err := ids.FromString("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm") require.NoError(t, err) - m[id2.String()] = chains.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} + m[id2.String()] = platformvm.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} return m }(), @@ -53,10 +53,10 @@ func TestGetChainConfigsFromFiles(t *testing.T) { "valid alias": { configs: map[string]string{"C": "hello", "X": "world"}, upgrades: map[string]string{"C": "upgradess"}, - expected: func() map[string]chains.ChainConfig { - m := map[string]chains.ChainConfig{} - m["C"] = chains.ChainConfig{Config: []byte("hello"), Upgrade: []byte("upgradess")} - m["X"] = chains.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} + expected: func() map[string]platformvm.ChainConfig { + m := map[string]platformvm.ChainConfig{} + m["C"] = platformvm.ChainConfig{Config: []byte("hello"), Upgrade: []byte("upgradess")} + m["X"] = platformvm.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} return m }(), @@ -96,7 +96,7 @@ func TestGetChainConfigsDirNotExist(t *testing.T) { structure string file map[string]string expectedErr error - expected map[string]chains.ChainConfig + expected map[string]platformvm.ChainConfig }{ "cdir not exist": { structure: "/", @@ -114,13 +114,13 @@ func TestGetChainConfigsDirNotExist(t *testing.T) { structure: "/cdir/", file: map[string]string{"config.ex": "noeffect"}, expectedErr: nil, - expected: map[string]chains.ChainConfig{}, + expected: map[string]platformvm.ChainConfig{}, }, "full structure": { structure: "/cdir/C/", file: map[string]string{"config.ex": "hello"}, expectedErr: nil, - expected: map[string]chains.ChainConfig{"C": {Config: []byte("hello"), Upgrade: []byte(nil)}}, + expected: map[string]platformvm.ChainConfig{"C": {Config: []byte("hello"), Upgrade: []byte(nil)}}, }, } @@ -165,54 +165,54 @@ func TestSetChainConfigDefaultDir(t *testing.T) { setupFile(t, chainsDir, chainConfigFileName+".ex", "helloworld") chainConfigs, err := getChainConfigs(v) require.NoError(err) - expected := map[string]chains.ChainConfig{"C": {Config: []byte("helloworld"), Upgrade: []byte(nil)}} + expected := map[string]platformvm.ChainConfig{"C": {Config: []byte("helloworld"), Upgrade: []byte(nil)}} require.Equal(expected, chainConfigs) } func TestGetChainConfigsFromFlags(t *testing.T) { tests := map[string]struct { - fullConfigs map[string]chains.ChainConfig - expected map[string]chains.ChainConfig + fullConfigs map[string]platformvm.ChainConfig + expected map[string]platformvm.ChainConfig }{ "no chain configs": { - fullConfigs: map[string]chains.ChainConfig{}, - expected: map[string]chains.ChainConfig{}, + fullConfigs: map[string]platformvm.ChainConfig{}, + expected: map[string]platformvm.ChainConfig{}, }, "valid chain-id": { - fullConfigs: func() map[string]chains.ChainConfig { - m := map[string]chains.ChainConfig{} + fullConfigs: func() map[string]platformvm.ChainConfig { + m := map[string]platformvm.ChainConfig{} id1, err := ids.FromString("yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp") require.NoError(t, err) - m[id1.String()] = chains.ChainConfig{Config: []byte("hello"), Upgrade: []byte("helloUpgrades")} + m[id1.String()] = platformvm.ChainConfig{Config: []byte("hello"), Upgrade: []byte("helloUpgrades")} id2, err := ids.FromString("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm") require.NoError(t, err) - m[id2.String()] = chains.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} + m[id2.String()] = platformvm.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} return m }(), - expected: func() map[string]chains.ChainConfig { - m := map[string]chains.ChainConfig{} + expected: func() map[string]platformvm.ChainConfig { + m := map[string]platformvm.ChainConfig{} id1, err := ids.FromString("yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp") require.NoError(t, err) - m[id1.String()] = chains.ChainConfig{Config: []byte("hello"), Upgrade: []byte("helloUpgrades")} + m[id1.String()] = platformvm.ChainConfig{Config: []byte("hello"), Upgrade: []byte("helloUpgrades")} id2, err := ids.FromString("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm") require.NoError(t, err) - m[id2.String()] = chains.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} + m[id2.String()] = platformvm.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} return m }(), }, "valid alias": { - fullConfigs: map[string]chains.ChainConfig{ + fullConfigs: map[string]platformvm.ChainConfig{ "C": {Config: []byte("hello"), Upgrade: []byte("upgradess")}, "X": {Config: []byte("world"), Upgrade: []byte(nil)}, }, - expected: func() map[string]chains.ChainConfig { - m := map[string]chains.ChainConfig{} - m["C"] = chains.ChainConfig{Config: []byte("hello"), Upgrade: []byte("upgradess")} - m["X"] = chains.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} + expected: func() map[string]platformvm.ChainConfig { + m := map[string]platformvm.ChainConfig{} + m["C"] = platformvm.ChainConfig{Config: []byte("hello"), Upgrade: []byte("upgradess")} + m["X"] = platformvm.ChainConfig{Config: []byte("world"), Upgrade: []byte(nil)} return m }(), diff --git a/go.mod b/go.mod index 8120a418171..3554f4e953f 100644 --- a/go.mod +++ b/go.mod @@ -34,8 +34,8 @@ require ( github.com/onsi/ginkgo/v2 v2.13.1 github.com/onsi/gomega v1.29.0 github.com/pires/go-proxyproto v0.6.2 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/client_model v0.3.0 + github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_model v0.5.0 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cast v1.5.0 @@ -118,14 +118,15 @@ require ( github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sanity-io/litter v1.5.1 // indirect @@ -153,3 +154,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace github.com/ava-labs/coreth => /Users/joshua.kim/github/coreth diff --git a/go.sum b/go.sum index dcbb39e2f29..25ae5ee3e1e 100644 --- a/go.sum +++ b/go.sum @@ -440,6 +440,8 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -510,19 +512,27 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= diff --git a/node/config.go b/node/config.go index a26ec4806fc..036345b803d 100644 --- a/node/config.go +++ b/node/config.go @@ -8,7 +8,6 @@ import ( "time" "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network" @@ -23,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/vms/platformvm" ) type IPCConfig struct { @@ -186,8 +186,8 @@ type Config struct { SubnetConfigs map[ids.ID]subnets.Config `json:"subnetConfigs"` - ChainConfigs map[string]chains.ChainConfig `json:"-"` - ChainAliases map[ids.ID][]string `json:"chainAliases"` + ChainConfigs map[string]platformvm.ChainConfig `json:"-"` + ChainAliases map[ids.ID][]string `json:"chainAliases"` VMAliaser ids.Aliaser `json:"-"` diff --git a/node/node.go b/node/node.go index 7842259c8a9..79895b5700e 100644 --- a/node/node.go +++ b/node/node.go @@ -34,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" @@ -53,6 +52,7 @@ import ( "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" @@ -76,18 +76,15 @@ import ( "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms" - "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" - avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" - platformconfig "github.com/ava-labs/avalanchego/vms/platformvm/config" ) const ( @@ -125,6 +122,7 @@ func New( LogFactory: logFactory, ID: ids.NodeIDFromCert(stakingCert), Config: config, + aliaser: ids.NewAliaser(), } n.DoneShuttingDown.Add(1) @@ -145,7 +143,7 @@ func New( return nil, fmt.Errorf("problem creating vm logger: %w", err) } - n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) + n.VMManager = rpcchainvm.NewManager(n.VMFactoryLog, config.VMAliaser) if err := n.initBootstrappers(); err != nil { // Configure the bootstrappers return nil, fmt.Errorf("problem initializing node beacons: %w", err) @@ -182,7 +180,7 @@ func New( n.initSharedMemory() // Initialize shared memory // message.Creator is shared between networking, chainManager and the engine. - // It must be initiated before networking (initNetworking), chain manager (initChainManager) + // It must be initiated before networking (initNetworking), chain manager (initPlatformChain) // and the engine (initChains) but after the metrics (initMetricsAPI) // message.Creator currently record metrics under network namespace n.networkNamespace = "network" @@ -214,7 +212,7 @@ func New( n.initEventDispatchers() // Start the Health API - // Has to be initialized before chain manager + // Has to be initialized before platform chain // [n.Net] must already be set if err := n.initHealthAPI(); err != nil { return nil, fmt.Errorf("couldn't initialize health API: %w", err) @@ -222,8 +220,8 @@ func New( if err := n.addDefaultVMAliases(); err != nil { return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) } - if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager - return nil, fmt.Errorf("couldn't initialize chain manager: %w", err) + if err := n.initPlatformChain(); err != nil { + return nil, fmt.Errorf("couldn't initialize platform chain: %w", err) } if err := n.initVMs(); err != nil { // Initialize the VM registry. return nil, fmt.Errorf("couldn't initialize VM registry: %w", err) @@ -253,10 +251,6 @@ func New( n.health.Start(context.TODO(), n.Config.HealthCheckFreq) n.initProfiler() - // Start the Platform chain - if err := n.initChains(n.Config.GenesisBytes); err != nil { - return nil, fmt.Errorf("couldn't initialize chains: %w", err) - } return n, nil } @@ -299,7 +293,8 @@ type Node struct { timeoutManager timeout.Manager // Manages creation of blockchains and routing messages to them - chainManager chains.Manager + platformVM *platformvm.VM + aliaser ids.Aliaser // Manages validator benching benchlistManager benchlist.Manager @@ -359,7 +354,7 @@ type Node struct { MetricsRegisterer *prometheus.Registry MetricsGatherer metrics.MultiGatherer - VMManager vms.Manager + VMManager rpcchainvm.Manager // VM endpoint registry VMRegistry registry.VMRegistry @@ -872,29 +867,9 @@ func (n *Node) initIndexer() error { return fmt.Errorf("couldn't create index for txs: %w", err) } - // Chain manager will notify indexer when a chain is created - n.chainManager.AddRegistrant(n.indexer) - return nil } -// Initializes the Platform chain. -// Its genesis data specifies the other chains that should be created. -func (n *Node) initChains(genesisBytes []byte) error { - n.Log.Info("initializing chains") - - platformChain := chains.ChainParameters{ - ID: constants.PlatformChainID, - SubnetID: constants.PrimaryNetworkID, - GenesisData: genesisBytes, // Specifies other chains to create - VMID: constants.PlatformVMID, - CustomBeacons: n.bootstrappers, - } - - // Start the chain creator with the Platform Chain - return n.chainManager.StartChainCreator(platformChain) -} - func (n *Node) initMetrics() { n.MetricsRegisterer = prometheus.NewRegistry() n.MetricsGatherer = metrics.NewMultiGatherer() @@ -1053,10 +1028,11 @@ func (n *Node) addDefaultVMAliases() error { return nil } -// Create the chainManager and register the following VMs: -// AVM, Simple Payments DAG, Simple Payments Chain, and Platform VM +// Create the platform chain. // Assumes n.DBManager, n.vdrs all initialized (non-nil) -func (n *Node) initChainManager(avaxAssetID ids.ID) error { +func (n *Node) initPlatformChain() error { + n.Log.Info("initializing chains") + createAVMTx, err := genesis.VMGenesis(n.Config.GenesisBytes, constants.AVMID) if err != nil { return err @@ -1105,61 +1081,6 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { return fmt.Errorf("couldn't initialize chain router: %w", err) } - n.chainManager = chains.New(&chains.ManagerConfig{ - SybilProtectionEnabled: n.Config.SybilProtectionEnabled, - StakingTLSCert: n.Config.StakingTLSCert, - StakingBLSKey: n.Config.StakingSigningKey, - Log: n.Log, - LogFactory: n.LogFactory, - VMManager: n.VMManager, - BlockAcceptorGroup: n.BlockAcceptorGroup, - TxAcceptorGroup: n.TxAcceptorGroup, - VertexAcceptorGroup: n.VertexAcceptorGroup, - DB: n.DB, - MsgCreator: n.msgCreator, - Router: n.Config.ConsensusRouter, - Net: n.Net, - Validators: n.vdrs, - PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, - NodeID: n.ID, - NetworkID: n.Config.NetworkID, - Server: n.APIServer, - Keystore: n.keystore, - AtomicMemory: n.sharedMemory, - AVAXAssetID: avaxAssetID, - XChainID: xChainID, - CChainID: cChainID, - CriticalChains: criticalChains, - TimeoutManager: n.timeoutManager, - Health: n.health, - ShutdownNodeFunc: n.Shutdown, - MeterVMEnabled: n.Config.MeterVMEnabled, - Metrics: n.MetricsGatherer, - SubnetConfigs: n.Config.SubnetConfigs, - ChainConfigs: n.Config.ChainConfigs, - FrontierPollFrequency: n.Config.FrontierPollFrequency, - ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, - BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, - BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, - BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, - ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), - ApricotPhase4MinPChainHeight: version.ApricotPhase4MinPChainHeight[n.Config.NetworkID], - ResourceTracker: n.resourceTracker, - StateSyncBeacons: n.Config.StateSyncIDs, - TracingEnabled: n.Config.TraceConfig.Enabled, - Tracer: n.tracer, - ChainDataDir: n.Config.ChainDataDir, - }) - - // Notify the API server when new chains are created - n.chainManager.AddRegistrant(n.APIServer) - return nil -} - -// initVMs initializes the VMs Avalanche supports + any additional vms installed as plugins. -func (n *Node) initVMs() error { - n.Log.Info("initializing VMs") - vdrs := n.vdrs // If sybil protection is disabled, we provide the P-chain its own local @@ -1176,58 +1097,100 @@ func (n *Node) initVMs() error { if err := block.InitCodec(durangoTime); err != nil { return err } - if err := coreth.InitCodec(durangoTime); err != nil { - return err + //TODO register coreth codec + + platformvmConfig := config.Config{ + SybilProtectionEnabled: n.Config.SybilProtectionEnabled, + PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, + TrackedSubnets: n.Config.TrackedSubnets, + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + CreateSubnetTxFee: n.Config.CreateSubnetTxFee, + TransformSubnetTxFee: n.Config.TransformSubnetTxFee, + CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, + AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, + AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, + AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, + AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, + MinValidatorStake: n.Config.MinValidatorStake, + MaxValidatorStake: n.Config.MaxValidatorStake, + MinDelegatorStake: n.Config.MinDelegatorStake, + MinDelegationFee: n.Config.MinDelegationFee, + UptimePercentage: n.Config.UptimeRequirement, + MinStakeDuration: n.Config.MinStakeDuration, + MaxStakeDuration: n.Config.MaxStakeDuration, + RewardConfig: n.Config.RewardConfig, + ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), + ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), + //ApricotPhase4MinPChainHeight: //TODO + ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), + BanffTime: version.GetBanffTime(n.Config.NetworkID), + CortinaTime: version.GetCortinaTime(n.Config.NetworkID), + DurangoTime: durangoTime, + UseCurrentHeight: n.Config.UseCurrentHeight, + TracingEnabled: n.Config.TraceConfig.Enabled, + NetworkID: n.Config.NetworkID, + AVAXAssetID: n.Config.AvaxAssetID, + MeterVMEnabled: n.Config.MeterVMEnabled, + FrontierPollFrequency: n.Config.FrontierPollFrequency, + ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, + BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, + BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, + BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, + StateSyncBeacons: n.Config.StateSyncIDs, + ChainDataDir: n.Config.ChainDataDir, } - // Register the VMs that Avalanche supports - err := utils.Err( - n.VMManager.RegisterFactory(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ - Config: platformconfig.Config{ - Chains: n.chainManager, - Validators: vdrs, - UptimeLockedCalculator: n.uptimeCalculator, - SybilProtectionEnabled: n.Config.SybilProtectionEnabled, - PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, - TrackedSubnets: n.Config.TrackedSubnets, - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - CreateSubnetTxFee: n.Config.CreateSubnetTxFee, - TransformSubnetTxFee: n.Config.TransformSubnetTxFee, - CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, - AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, - AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - UptimePercentage: n.Config.UptimeRequirement, - MinValidatorStake: n.Config.MinValidatorStake, - MaxValidatorStake: n.Config.MaxValidatorStake, - MinDelegatorStake: n.Config.MinDelegatorStake, - MinDelegationFee: n.Config.MinDelegationFee, - MinStakeDuration: n.Config.MinStakeDuration, - MaxStakeDuration: n.Config.MaxStakeDuration, - RewardConfig: n.Config.RewardConfig, - ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), - ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), - BanffTime: version.GetBanffTime(n.Config.NetworkID), - CortinaTime: version.GetCortinaTime(n.Config.NetworkID), - DurangoTime: durangoTime, - UseCurrentHeight: n.Config.UseCurrentHeight, - }, - }), - n.VMManager.RegisterFactory(context.TODO(), constants.AVMID, &avm.Factory{ - Config: avmconfig.Config{ - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - DurangoTime: durangoTime, - }, - }), - n.VMManager.RegisterFactory(context.TODO(), constants.EVMID, &coreth.Factory{}), + platformvmFactory := platformvm.NewFactory( + platformvmConfig, + n.aliaser, + n.uptimeCalculator, + n.Config.StakingTLSCert, + n.Config.StakingSigningKey, + n.tracer, + n.LogFactory, + n.VMManager, + n.BlockAcceptorGroup, + n.TxAcceptorGroup, + n.VertexAcceptorGroup, + n.DB, + n.msgCreator, + n.Config.ConsensusRouter, + n.Net, + n.bootstrappers, + vdrs, + n.ID, + n.keystore, + n.sharedMemory, + xChainID, + cChainID, + criticalChains, + n.timeoutManager, + n.health, + n.Config.SubnetConfigs, + n.Config.ChainConfigs, + n.Shutdown, + n.MetricsGatherer, + n.resourceTracker, ) + vm, err := platformvmFactory.New(n.Log) if err != nil { return err } + n.platformVM = vm + return nil +} + +// initVMs initializes the VMs Avalanche supports + any additional vms installed as plugins. +func (n *Node) initVMs() error { + n.Log.Info("initializing VMs") + + durangoTime := version.GetDurangoTime(n.Config.NetworkID) + if err := coreth.InitCodec(durangoTime); err != nil { + return err + } + // initialize vm runtime manager n.runtimeManager = runtime.NewManager() @@ -1324,14 +1287,14 @@ func (n *Node) initAdminAPI() error { n.Log.Info("initializing admin API") service, err := admin.NewService( admin.Config{ - Log: n.Log, - ChainManager: n.chainManager, - HTTPServer: n.APIServer, - ProfileDir: n.Config.ProfilerConfig.Dir, - LogFactory: n.LogFactory, - NodeConfig: n.Config, - VMManager: n.VMManager, - VMRegistry: n.VMRegistry, + Log: n.Log, + Aliaser: n.aliaser, + HTTPServer: n.APIServer, + ProfileDir: n.Config.ProfilerConfig.Dir, + LogFactory: n.LogFactory, + NodeConfig: n.Config, + VMManager: n.VMManager, + VMRegistry: n.VMRegistry, }, ) if err != nil { @@ -1395,7 +1358,8 @@ func (n *Node) initInfoAPI() error { }, n.Log, n.vdrs, - n.chainManager, + n.aliaser, + n.platformVM, n.VMManager, n.Config.NetworkConfig.MyIPPort, n.Net, @@ -1516,7 +1480,7 @@ func (n *Node) initIPCAPI() error { return nil } n.Log.Warn("initializing deprecated ipc API") - service, err := ipcsapi.NewService(n.Log, n.chainManager, n.IPCs) + service, err := ipcsapi.NewService(n.Log, n.aliaser, n.IPCs) if err != nil { return err } @@ -1537,7 +1501,7 @@ func (n *Node) initChainAliases(genesisBytes []byte) error { for chainID, aliases := range chainAliases { for _, alias := range aliases { - if err := n.chainManager.Alias(chainID, alias); err != nil { + if err := n.aliaser.Alias(chainID, alias); err != nil { return err } } @@ -1545,7 +1509,7 @@ func (n *Node) initChainAliases(genesisBytes []byte) error { for chainID, aliases := range n.Config.ChainAliases { for _, alias := range aliases { - if err := n.chainManager.Alias(chainID, alias); err != nil { + if err := n.aliaser.Alias(chainID, alias); err != nil { return err } } @@ -1660,8 +1624,8 @@ func (n *Node) shutdown() { } } n.timeoutManager.Stop() - if n.chainManager != nil { - n.chainManager.Shutdown() + if n.platformVM != nil { + _ = n.platformVM.Shutdown(context.TODO()) } if n.profiler != nil { n.profiler.Shutdown() diff --git a/vms/manager.go b/node/rpcchainvm/manager.go similarity index 83% rename from vms/manager.go rename to node/rpcchainvm/manager.go index f4ae49e39cd..f25a6da4de3 100644 --- a/vms/manager.go +++ b/node/rpcchainvm/manager.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package vms +package rpcchainvm import ( "context" @@ -12,8 +12,9 @@ import ( "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/rpcchainvm" ) var ( @@ -22,11 +23,6 @@ var ( _ Manager = (*manager)(nil) ) -// A Factory creates new instances of a VM -type Factory interface { - New(logging.Logger) (interface{}, error) -} - // Manager tracks a collection of VM factories, their aliases, and their // versions. // It has the following functionality: @@ -43,11 +39,11 @@ type Manager interface { // Return a factory that can create new instances of the vm whose ID is // [vmID] - GetFactory(vmID ids.ID) (Factory, error) + GetFactory(vmID ids.ID) (vms.Factory[*rpcchainvm.VMClient], error) // Map [vmID] to [factory]. [factory] creates new instances of the vm whose // ID is [vmID] - RegisterFactory(ctx context.Context, vmID ids.ID, factory Factory) error + RegisterFactory(ctx context.Context, vmID ids.ID, factory vms.Factory[*rpcchainvm.VMClient]) error // ListFactories returns all the IDs that have had factories registered. ListFactories() ([]ids.ID, error) @@ -68,7 +64,7 @@ type manager struct { // Key: A VM's ID // Value: A factory that creates new instances of that VM - factories map[ids.ID]Factory + factories map[ids.ID]vms.Factory[*rpcchainvm.VMClient] // Key: A VM's ID // Value: version the VM returned @@ -80,12 +76,12 @@ func NewManager(log logging.Logger, aliaser ids.Aliaser) Manager { return &manager{ Aliaser: aliaser, log: log, - factories: make(map[ids.ID]Factory), + factories: make(map[ids.ID]vms.Factory[*rpcchainvm.VMClient]), versions: make(map[ids.ID]string), } } -func (m *manager) GetFactory(vmID ids.ID) (Factory, error) { +func (m *manager) GetFactory(vmID ids.ID) (vms.Factory[*rpcchainvm.VMClient], error) { m.lock.RLock() defer m.lock.RUnlock() @@ -95,7 +91,7 @@ func (m *manager) GetFactory(vmID ids.ID) (Factory, error) { return nil, fmt.Errorf("%q was %w", vmID, ErrNotFound) } -func (m *manager) RegisterFactory(ctx context.Context, vmID ids.ID, factory Factory) error { +func (m *manager) RegisterFactory(ctx context.Context, vmID ids.ID, factory vms.Factory[*rpcchainvm.VMClient]) error { m.lock.Lock() defer m.lock.Unlock() @@ -113,20 +109,15 @@ func (m *manager) RegisterFactory(ctx context.Context, vmID ids.ID, factory Fact return err } - commonVM, ok := vm.(common.VM) - if !ok { - return nil - } - - version, err := commonVM.Version(ctx) + version, err := vm.Version(ctx) if err != nil { // Drop the shutdown error to surface the original error - _ = commonVM.Shutdown(ctx) + _ = vm.Shutdown(ctx) return err } m.versions[vmID] = version - return commonVM.Shutdown(ctx) + return vm.Shutdown(ctx) } func (m *manager) ListFactories() ([]ids.ID, error) { diff --git a/node/rpcchainvm/mock_manager.go b/node/rpcchainvm/mock_manager.go new file mode 100644 index 00000000000..0f510b39bc4 --- /dev/null +++ b/node/rpcchainvm/mock_manager.go @@ -0,0 +1,225 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms (interfaces: Factory,Manager) +// +// Generated by this command: +// +// mockgen -package=vms -destination=vms/mock_manager.go github.com/ava-labs/avalanchego/vms Factory,Manager +// + +// Package vms is a generated GoMock package. +package rpcchainvm +// +//import ( +// "context" +// "reflect" +// +// "go.uber.org/mock/gomock" +// +// "github.com/ava-labs/avalanchego/ids" +// "github.com/ava-labs/avalanchego/utils/logging" +//) +// +//// MockFactory is a mock of Factory interface. +//type MockFactory struct { +// ctrl *gomock.Controller +// recorder *MockFactoryMockRecorder +//} +// +//// MockFactoryMockRecorder is the mock recorder for MockFactory. +//type MockFactoryMockRecorder struct { +// mock *MockFactory +//} +// +//// NewMockFactory creates a new mock instance. +//func NewMockFactory(ctrl *gomock.Controller) *MockFactory { +// mock := &MockFactory{ctrl: ctrl} +// mock.recorder = &MockFactoryMockRecorder{mock} +// return mock +//} +// +//// EXPECT returns an object that allows the caller to indicate expected use. +//func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { +// return m.recorder +//} +// +//// New mocks base method. +//func (m *MockFactory) New(arg0 logging.Logger) (any, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "New", arg0) +// ret0, _ := ret[0].(any) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// New indicates an expected call of New. +//func (mr *MockFactoryMockRecorder) New(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockFactory)(nil).New), arg0) +//} +// +//// MockManager is a mock of Manager interface. +//type MockManager struct { +// ctrl *gomock.Controller +// recorder *MockManagerMockRecorder +//} +// +//// MockManagerMockRecorder is the mock recorder for MockManager. +//type MockManagerMockRecorder struct { +// mock *MockManager +//} +// +//// NewMockManager creates a new mock instance. +//func NewMockManager(ctrl *gomock.Controller) *MockManager { +// mock := &MockManager{ctrl: ctrl} +// mock.recorder = &MockManagerMockRecorder{mock} +// return mock +//} +// +//// EXPECT returns an object that allows the caller to indicate expected use. +//func (m *MockManager) EXPECT() *MockManagerMockRecorder { +// return m.recorder +//} +// +//// Alias mocks base method. +//func (m *MockManager) Alias(arg0 ids.ID, arg1 string) error { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "Alias", arg0, arg1) +// ret0, _ := ret[0].(error) +// return ret0 +//} +// +//// Alias indicates an expected call of Alias. +//func (mr *MockManagerMockRecorder) Alias(arg0, arg1 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Alias", reflect.TypeOf((*MockManager)(nil).Alias), arg0, arg1) +//} +// +//// Aliases mocks base method. +//func (m *MockManager) Aliases(arg0 ids.ID) ([]string, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "Aliases", arg0) +// ret0, _ := ret[0].([]string) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// Aliases indicates an expected call of Aliases. +//func (mr *MockManagerMockRecorder) Aliases(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aliases", reflect.TypeOf((*MockManager)(nil).Aliases), arg0) +//} +// +//// GetFactory mocks base method. +//func (m *MockManager) GetFactory(arg0 ids.ID) (Factory, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "GetFactory", arg0) +// ret0, _ := ret[0].(Factory) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// GetFactory indicates an expected call of GetFactory. +//func (mr *MockManagerMockRecorder) GetFactory(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFactory", reflect.TypeOf((*MockManager)(nil).GetFactory), arg0) +//} +// +//// ListFactories mocks base method. +//func (m *MockManager) ListFactories() ([]ids.ID, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "ListFactories") +// ret0, _ := ret[0].([]ids.ID) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// ListFactories indicates an expected call of ListFactories. +//func (mr *MockManagerMockRecorder) ListFactories() *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFactories", reflect.TypeOf((*MockManager)(nil).ListFactories)) +//} +// +//// Lookup mocks base method. +//func (m *MockManager) Lookup(arg0 string) (ids.ID, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "Lookup", arg0) +// ret0, _ := ret[0].(ids.ID) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// Lookup indicates an expected call of Lookup. +//func (mr *MockManagerMockRecorder) Lookup(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockManager)(nil).Lookup), arg0) +//} +// +//// PrimaryAlias mocks base method. +//func (m *MockManager) PrimaryAlias(arg0 ids.ID) (string, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "PrimaryAlias", arg0) +// ret0, _ := ret[0].(string) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// PrimaryAlias indicates an expected call of PrimaryAlias. +//func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAlias", reflect.TypeOf((*MockManager)(nil).PrimaryAlias), arg0) +//} +// +//// PrimaryAliasOrDefault mocks base method. +//func (m *MockManager) PrimaryAliasOrDefault(arg0 ids.ID) string { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "PrimaryAliasOrDefault", arg0) +// ret0, _ := ret[0].(string) +// return ret0 +//} +// +//// PrimaryAliasOrDefault indicates an expected call of PrimaryAliasOrDefault. +//func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAliasOrDefault", reflect.TypeOf((*MockManager)(nil).PrimaryAliasOrDefault), arg0) +//} +// +//// RegisterFactory mocks base method. +//func (m *MockManager) RegisterFactory(arg0 context.Context, arg1 ids.ID, arg2 Factory) error { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "RegisterFactory", arg0, arg1, arg2) +// ret0, _ := ret[0].(error) +// return ret0 +//} +// +//// RegisterFactory indicates an expected call of RegisterFactory. +//func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterFactory", reflect.TypeOf((*MockManager)(nil).RegisterFactory), arg0, arg1, arg2) +//} +// +//// RemoveAliases mocks base method. +//func (m *MockManager) RemoveAliases(arg0 ids.ID) { +// m.ctrl.T.Helper() +// m.ctrl.Call(m, "RemoveAliases", arg0) +//} +// +//// RemoveAliases indicates an expected call of RemoveAliases. +//func (mr *MockManagerMockRecorder) RemoveAliases(arg0 any) *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAliases", reflect.TypeOf((*MockManager)(nil).RemoveAliases), arg0) +//} +// +//// Versions mocks base method. +//func (m *MockManager) Versions() (map[string]string, error) { +// m.ctrl.T.Helper() +// ret := m.ctrl.Call(m, "Versions") +// ret0, _ := ret[0].(map[string]string) +// ret1, _ := ret[1].(error) +// return ret0, ret1 +//} +// +//// Versions indicates an expected call of Versions. +//func (mr *MockManagerMockRecorder) Versions() *gomock.Call { +// mr.mock.ctrl.T.Helper() +// return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Versions", reflect.TypeOf((*MockManager)(nil).Versions)) +//} diff --git a/version/constants.go b/version/constants.go index 35320d4ccf3..410c13a04d1 100644 --- a/version/constants.go +++ b/version/constants.go @@ -4,11 +4,10 @@ package version import ( + _ "embed" "encoding/json" "time" - _ "embed" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" ) diff --git a/vms/avm/factory.go b/vms/avm/factory.go index ee71cac0346..8dc8ff52fba 100644 --- a/vms/avm/factory.go +++ b/vms/avm/factory.go @@ -9,12 +9,12 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" ) -var _ vms.Factory = (*Factory)(nil) +var _ vms.Factory[*VM] = (*Factory)(nil) type Factory struct { config.Config } -func (f *Factory) New(logging.Logger) (interface{}, error) { +func (f *Factory) New(logging.Logger) (*VM, error) { return &VM{Config: f.Config}, nil } diff --git a/vms/example/xsvm/factory.go b/vms/example/xsvm/factory.go index 99d33b8290d..928be57ab1b 100644 --- a/vms/example/xsvm/factory.go +++ b/vms/example/xsvm/factory.go @@ -4,11 +4,11 @@ package xsvm import ( + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" ) -var _ vms.Factory = (*Factory)(nil) +var _ rpcchainvm.Factory = (*Factory)(nil) type Factory struct{} diff --git a/vms/interface.go b/vms/interface.go new file mode 100644 index 00000000000..93a6335677c --- /dev/null +++ b/vms/interface.go @@ -0,0 +1,11 @@ +package vms + +import ( + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" +) + +// A Factory creates new instances of a VM +type Factory[T common.VM] interface { + New(logging.Logger) (T, error) +} diff --git a/vms/mock_manager.go b/vms/mock_manager.go deleted file mode 100644 index cea232ba2a7..00000000000 --- a/vms/mock_manager.go +++ /dev/null @@ -1,224 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms (interfaces: Factory,Manager) -// -// Generated by this command: -// -// mockgen -package=vms -destination=vms/mock_manager.go github.com/ava-labs/avalanchego/vms Factory,Manager -// - -// Package vms is a generated GoMock package. -package vms - -import ( - context "context" - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - logging "github.com/ava-labs/avalanchego/utils/logging" - gomock "go.uber.org/mock/gomock" -) - -// MockFactory is a mock of Factory interface. -type MockFactory struct { - ctrl *gomock.Controller - recorder *MockFactoryMockRecorder -} - -// MockFactoryMockRecorder is the mock recorder for MockFactory. -type MockFactoryMockRecorder struct { - mock *MockFactory -} - -// NewMockFactory creates a new mock instance. -func NewMockFactory(ctrl *gomock.Controller) *MockFactory { - mock := &MockFactory{ctrl: ctrl} - mock.recorder = &MockFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { - return m.recorder -} - -// New mocks base method. -func (m *MockFactory) New(arg0 logging.Logger) (any, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "New", arg0) - ret0, _ := ret[0].(any) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// New indicates an expected call of New. -func (mr *MockFactoryMockRecorder) New(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockFactory)(nil).New), arg0) -} - -// MockManager is a mock of Manager interface. -type MockManager struct { - ctrl *gomock.Controller - recorder *MockManagerMockRecorder -} - -// MockManagerMockRecorder is the mock recorder for MockManager. -type MockManagerMockRecorder struct { - mock *MockManager -} - -// NewMockManager creates a new mock instance. -func NewMockManager(ctrl *gomock.Controller) *MockManager { - mock := &MockManager{ctrl: ctrl} - mock.recorder = &MockManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManager) EXPECT() *MockManagerMockRecorder { - return m.recorder -} - -// Alias mocks base method. -func (m *MockManager) Alias(arg0 ids.ID, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Alias", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Alias indicates an expected call of Alias. -func (mr *MockManagerMockRecorder) Alias(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Alias", reflect.TypeOf((*MockManager)(nil).Alias), arg0, arg1) -} - -// Aliases mocks base method. -func (m *MockManager) Aliases(arg0 ids.ID) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Aliases", arg0) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Aliases indicates an expected call of Aliases. -func (mr *MockManagerMockRecorder) Aliases(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aliases", reflect.TypeOf((*MockManager)(nil).Aliases), arg0) -} - -// GetFactory mocks base method. -func (m *MockManager) GetFactory(arg0 ids.ID) (Factory, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFactory", arg0) - ret0, _ := ret[0].(Factory) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFactory indicates an expected call of GetFactory. -func (mr *MockManagerMockRecorder) GetFactory(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFactory", reflect.TypeOf((*MockManager)(nil).GetFactory), arg0) -} - -// ListFactories mocks base method. -func (m *MockManager) ListFactories() ([]ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListFactories") - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListFactories indicates an expected call of ListFactories. -func (mr *MockManagerMockRecorder) ListFactories() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFactories", reflect.TypeOf((*MockManager)(nil).ListFactories)) -} - -// Lookup mocks base method. -func (m *MockManager) Lookup(arg0 string) (ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Lookup", arg0) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Lookup indicates an expected call of Lookup. -func (mr *MockManagerMockRecorder) Lookup(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockManager)(nil).Lookup), arg0) -} - -// PrimaryAlias mocks base method. -func (m *MockManager) PrimaryAlias(arg0 ids.ID) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrimaryAlias", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PrimaryAlias indicates an expected call of PrimaryAlias. -func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAlias", reflect.TypeOf((*MockManager)(nil).PrimaryAlias), arg0) -} - -// PrimaryAliasOrDefault mocks base method. -func (m *MockManager) PrimaryAliasOrDefault(arg0 ids.ID) string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrimaryAliasOrDefault", arg0) - ret0, _ := ret[0].(string) - return ret0 -} - -// PrimaryAliasOrDefault indicates an expected call of PrimaryAliasOrDefault. -func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAliasOrDefault", reflect.TypeOf((*MockManager)(nil).PrimaryAliasOrDefault), arg0) -} - -// RegisterFactory mocks base method. -func (m *MockManager) RegisterFactory(arg0 context.Context, arg1 ids.ID, arg2 Factory) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterFactory", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// RegisterFactory indicates an expected call of RegisterFactory. -func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterFactory", reflect.TypeOf((*MockManager)(nil).RegisterFactory), arg0, arg1, arg2) -} - -// RemoveAliases mocks base method. -func (m *MockManager) RemoveAliases(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveAliases", arg0) -} - -// RemoveAliases indicates an expected call of RemoveAliases. -func (mr *MockManagerMockRecorder) RemoveAliases(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAliases", reflect.TypeOf((*MockManager)(nil).RemoveAliases), arg0) -} - -// Versions mocks base method. -func (m *MockManager) Versions() (map[string]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Versions") - ret0, _ := ret[0].(map[string]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Versions indicates an expected call of Versions. -func (mr *MockManagerMockRecorder) Versions() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Versions", reflect.TypeOf((*MockManager)(nil).Versions)) -} diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index f2d15195ad0..b26c38e3ed2 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -25,7 +24,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" - "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" diff --git a/chains/manager.go b/vms/platformvm/chain_manager.go similarity index 68% rename from chains/manager.go rename to vms/platformvm/chain_manager.go index 50b27836166..3eea53e6e1b 100644 --- a/chains/manager.go +++ b/vms/platformvm/chain_manager.go @@ -1,27 +1,27 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package chains +package platformvm import ( "context" "crypto" - "crypto/tls" "errors" "fmt" "os" "path/filepath" "sync" - "time" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + coreth "github.com/ava-labs/coreth/plugin/evm" + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/api/server" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/meterdb" @@ -29,19 +29,28 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network" + rpcchainvm2 "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" + smcon "github.com/ava-labs/avalanchego/snow/consensus/snowman" + aveng "github.com/ava-labs/avalanchego/snow/engine/avalanche" + avbootstrap "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap" + avagetter "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" "github.com/ava-labs/avalanchego/snow/engine/avalanche/state" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + smeng "github.com/ava-labs/avalanchego/snow/engine/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + smbootstrap "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap" + snowgetter "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" "github.com/ava-labs/avalanchego/snow/engine/snowman/syncer" "github.com/ava-labs/avalanchego/snow/networking/handler" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/timeout" + timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" @@ -55,25 +64,16 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" - "github.com/ava-labs/avalanchego/vms/fx" + "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/metervm" "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/proposervm" + "github.com/ava-labs/avalanchego/vms/rpcchainvm" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/vms/tracedvm" - - timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" - - aveng "github.com/ava-labs/avalanchego/snow/engine/avalanche" - avbootstrap "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap" - avagetter "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" - - smcon "github.com/ava-labs/avalanchego/snow/consensus/snowman" - smeng "github.com/ava-labs/avalanchego/snow/engine/snowman" - smbootstrap "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap" - snowgetter "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" ) const ( @@ -100,52 +100,12 @@ var ( errNoPrimaryNetworkConfig = errors.New("no subnet config for primary network found") errPartialSyncAsAValidator = errors.New("partial sync should not be configured for a validator") - fxs = map[ids.ID]fx.Factory{ - secp256k1fx.ID: &secp256k1fx.Factory{}, - nftfx.ID: &nftfx.Factory{}, - propertyfx.ID: &propertyfx.Factory{}, - } - - _ Manager = (*manager)(nil) + _ consensusBuilder[*rpcchainvm.VMClient] = (*snowmanBuilder[*rpcchainvm.VMClient])(nil) + //_ consensusBuilder[*avm.VM] = (*avalancheBuilder)(nil) ) -// Manager manages the chains running on this node. -// It can: -// - Create a chain -// - Add a registrant. When a chain is created, each registrant calls -// RegisterChain with the new chain as the argument. -// - Manage the aliases of chains -type Manager interface { - ids.Aliaser - - // Queues a chain to be created in the future after chain creator is unblocked. - // This is only called from the P-chain thread to create other chains - // Queued chains are created only after P-chain is bootstrapped. - // This assumes only chains in tracked subnets are queued. - QueueChainCreation(ChainParameters) - - // Add a registrant [r]. Every time a chain is - // created, [r].RegisterChain([new chain]) is called. - AddRegistrant(Registrant) - - // Given an alias, return the ID of the chain associated with that alias - Lookup(string) (ids.ID, error) - - // Given an alias, return the ID of the VM associated with that alias - LookupVM(string) (ids.ID, error) - - // Returns true iff the chain with the given ID exists and is finished bootstrapping - IsBootstrapped(ids.ID) bool - - // Starts the chain creator with the initial platform chain parameters, must - // be called once. - StartChainCreator(platformChain ChainParameters) error - - Shutdown() -} - -// ChainParameters defines the chain being created -type ChainParameters struct { +// chainParameters defines the chain being created +type chainParameters struct { // The ID of the chain being created. ID ids.ID // ID of the subnet that validates this chain. @@ -154,18 +114,15 @@ type ChainParameters struct { GenesisData []byte // The ID of the vm this chain is running. VMID ids.ID - // The IDs of the feature extensions this chain is running. - FxIDs []ids.ID - // Invariant: Only used when [ID] is the P-chain ID. - CustomBeacons validators.Manager } -type chain struct { - Name string - Context *snow.ConsensusContext - VM common.VM - Handler handler.Handler - Beacons validators.Manager +type chain[T common.VM] struct { + Name string + Context *snow.ConsensusContext + VM T + WrappedVM common.VM + Handler handler.Handler + Beacons validators.Manager } // ChainConfig is configuration settings for the current execution. @@ -176,80 +133,54 @@ type ChainConfig struct { Upgrade []byte } -type ManagerConfig struct { - SybilProtectionEnabled bool - StakingTLSCert tls.Certificate // needed to sign snowman++ blocks - StakingBLSKey *bls.SecretKey - TracingEnabled bool - // Must not be used unless [TracingEnabled] is true as this may be nil. - Tracer trace.Tracer - Log logging.Logger - LogFactory logging.Factory - VMManager vms.Manager // Manage mappings from vm ID --> vm - BlockAcceptorGroup snow.AcceptorGroup - TxAcceptorGroup snow.AcceptorGroup - VertexAcceptorGroup snow.AcceptorGroup - DB database.Database - MsgCreator message.OutboundMsgBuilder // message creator, shared with network - Router router.Router // Routes incoming messages to the appropriate chain - Net network.Network // Sends consensus messages to other validators - Validators validators.Manager // Validators validating on this chain - NodeID ids.NodeID // The ID of this node - NetworkID uint32 // ID of the network this node is connected to - PartialSyncPrimaryNetwork bool - Server server.Server // Handles HTTP API calls - Keystore keystore.Keystore - AtomicMemory *atomic.Memory - AVAXAssetID ids.ID - XChainID ids.ID // ID of the X-Chain, - CChainID ids.ID // ID of the C-Chain, - CriticalChains set.Set[ids.ID] // Chains that can't exit gracefully - TimeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators - Health health.Registerer - SubnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig - ChainConfigs map[string]ChainConfig // alias -> ChainConfig - // ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node - ShutdownNodeFunc func(exitCode int) - MeterVMEnabled bool // Should each VM be wrapped with a MeterVM - Metrics metrics.MultiGatherer - - FrontierPollFrequency time.Duration - ConsensusAppConcurrency int - - // Max Time to spend fetching a container and its - // ancestors when responding to a GetAncestors - BootstrapMaxTimeGetAncestors time.Duration - // Max number of containers in an ancestors message sent by this node. - BootstrapAncestorsMaxContainersSent int - // This node will only consider the first [AncestorsMaxContainersReceived] - // containers in an ancestors message it receives. - BootstrapAncestorsMaxContainersReceived int - - ApricotPhase4Time time.Time - ApricotPhase4MinPChainHeight uint64 - - // Tracks CPU/disk usage caused by each peer. - ResourceTracker timetracker.ResourceTracker +type chainManager struct { + config.Config - StateSyncBeacons []ids.NodeID + platformVMFactory vms.Factory[*VM] + avmFactory vms.Factory[*avm.VM] + corethFactory vms.Factory[*coreth.VM] - ChainDataDir string -} + aliaser ids.Aliaser + stakingBLSKey *bls.SecretKey + // Must not be used unless [TracingEnabled] is true as this may be nil. + tracer trace.Tracer + log logging.Logger + logFactory logging.Factory + vmManager rpcchainvm2.Manager // Manage mappings from vm ID --> vm + blockAcceptorGroup snow.AcceptorGroup + txAcceptorGroup snow.AcceptorGroup + vertexAcceptorGroup snow.AcceptorGroup + db database.Database + msgCreator message.OutboundMsgBuilder // message creator, shared with network + router router.Router // Routes incoming messages to the appropriate chain + net network.Network // Sends consensus messages to other validators + bootstrappers validators.Manager + validators validators.Manager // Validators validating on this chain + nodeID ids.NodeID // The ID of this node + keystore keystore.Keystore + atomicMemory *atomic.Memory + xChainID ids.ID // ID of the X-Chain, + cChainID ids.ID // ID of the C-Chain, + criticalChains set.Set[ids.ID] // Chains that can't exit gracefully + timeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators + health health.Registerer + subnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig + chainConfigs map[string]ChainConfig // alias -> ChainConfig + // ShutdownNodeFunc allows the chainManager to issue a request to shutdown the node + shutdownNodeFunc func(exitCode int) + metrics metrics.MultiGatherer -type manager struct { - // Note: The string representation of a chain's ID is also considered to be an alias of the chain - // That is, [chainID].String() is an alias for the chain, too - ids.Aliaser - ManagerConfig + // Tracks CPU/disk usage caused by each peer. + resourceTracker timetracker.ResourceTracker stakingSigner crypto.Signer stakingCert *staking.Certificate // Those notified when a chain is created - registrants []Registrant + registrants []chains.Registrant // queue that holds chain create requests - chainsQueue buffer.BlockingDeque[ChainParameters] + chainsQueue buffer.BlockingDeque[chainParameters] // unblocks chain creator to start processing the queue unblockChainCreatorCh chan struct{} // shutdown the chain creator goroutine if the queue hasn't started to be @@ -271,65 +202,268 @@ type manager struct { validatorState validators.State } -// New returns a new Manager -func New(config *ManagerConfig) Manager { - return &manager{ - Aliaser: ids.NewAliaser(), - ManagerConfig: *config, - stakingSigner: config.StakingTLSCert.PrivateKey.(crypto.Signer), - stakingCert: staking.CertificateFromX509(config.StakingTLSCert.Leaf), - subnets: make(map[ids.ID]subnets.Subnet), - chains: make(map[ids.ID]handler.Handler), - chainsQueue: buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize), - unblockChainCreatorCh: make(chan struct{}), - chainCreatorShutdownCh: make(chan struct{}), +// Create the blockchain if this node is a member of the subnet that validates +// the chain +// +// TODO verify Note: it is expected for the subnet to already have the chain registered as +// bootstrapping before this function is called +func (m *chainManager) QueueChainCreation(chainID ids.ID, subnetID ids.ID, genesis []byte, vmID ids.ID) { + if m.SybilProtectionEnabled && // Sybil protection is enabled, so nodes might not validate all chains + constants.PrimaryNetworkID != subnetID && // All nodes must validate the primary network + !m.TrackedSubnets.Contains(subnetID) { // This node doesn't validate this blockchain + return } -} -// QueueChainCreation queues a chain creation request -// Invariant: Tracked Subnet must be checked before calling this function -func (m *manager) QueueChainCreation(chainParams ChainParameters) { m.subnetsLock.Lock() - subnetID := chainParams.SubnetID sb, exists := m.subnets[subnetID] if !exists { - sbConfig, ok := m.SubnetConfigs[subnetID] + sbConfig, ok := m.subnetConfigs[subnetID] if !ok { // default to primary subnet config - sbConfig = m.SubnetConfigs[constants.PrimaryNetworkID] + sbConfig = m.subnetConfigs[constants.PrimaryNetworkID] } - sb = subnets.New(m.NodeID, sbConfig) - m.subnets[chainParams.SubnetID] = sb + sb = subnets.New(m.nodeID, sbConfig) + m.subnets[subnetID] = sb } - addedChain := sb.AddChain(chainParams.ID) + addedChain := sb.AddChain(chainID) m.subnetsLock.Unlock() if !addedChain { - m.Log.Debug("skipping chain creation", + m.log.Debug("skipping chain creation", zap.String("reason", "chain already staged"), zap.Stringer("subnetID", subnetID), - zap.Stringer("chainID", chainParams.ID), - zap.Stringer("vmID", chainParams.VMID), + zap.Stringer("chainID", chainID), + zap.Stringer("vmID", vmID), ) return } - if ok := m.chainsQueue.PushRight(chainParams); !ok { - m.Log.Warn("skipping chain creation", + if ok := m.chainsQueue.PushRight(chainParameters{ + ID: chainID, + SubnetID: subnetID, + GenesisData: genesis, + VMID: vmID, + }); !ok { + m.log.Warn("skipping chain creation", zap.String("reason", "couldn't enqueue chain"), zap.Stringer("subnetID", subnetID), - zap.Stringer("chainID", chainParams.ID), - zap.Stringer("vmID", chainParams.VMID), + zap.Stringer("chainID", chainID), + zap.Stringer("vmID", vmID), ) } } -// createChain creates and starts the chain -// -// Note: it is expected for the subnet to already have the chain registered as -// bootstrapping before this function is called -func (m *manager) createChain(chainParams ChainParameters) { - m.Log.Info("creating chain", +func (m *chainManager) AddRegistrant(r chains.Registrant) { + m.registrants = append(m.registrants, r) +} + +func (m *chainManager) IsBootstrapped(id ids.ID) bool { + m.chainsLock.Lock() + chain, exists := m.chains[id] + m.chainsLock.Unlock() + if !exists { + return false + } + + return chain.Context().State.Get().State == snow.NormalOp +} + +func (m *chainManager) subnetsNotBootstrapped() []ids.ID { + m.subnetsLock.RLock() + defer m.subnetsLock.RUnlock() + + subnetsBootstrapping := make([]ids.ID, 0, len(m.subnets)) + for subnetID, subnet := range m.subnets { + if !subnet.IsBootstrapped() { + subnetsBootstrapping = append(subnetsBootstrapping, subnetID) + } + } + return subnetsBootstrapping +} + +func (m *chainManager) registerBootstrappedHealthChecks() error { + bootstrappedCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { + subnetIDs := m.subnetsNotBootstrapped() + if len(subnetIDs) != 0 { + return subnetIDs, errNotBootstrapped + } + return []ids.ID{}, nil + }) + if err := m.health.RegisterReadinessCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil { + return fmt.Errorf("couldn't register bootstrapped readiness check: %w", err) + } + if err := m.health.RegisterHealthCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil { + return fmt.Errorf("couldn't register bootstrapped health check: %w", err) + } + + // We should only report unhealthy if the node is partially syncing the + // primary network and is a validator. + if !m.PartialSyncPrimaryNetwork { + return nil + } + + partialSyncCheck := health.CheckerFunc(func(ctx context.Context) (interface{}, error) { + // Note: The health check is skipped during bootstrapping to allow a + // node to sync the network even if it was previously a validator. + if !m.IsBootstrapped(constants.PlatformChainID) { + return "node is currently bootstrapping", nil + } + if _, ok := m.validators.GetValidator(constants.PrimaryNetworkID, m.nodeID); !ok { + return "node is not a primary network validator", nil + } + + m.log.Warn("node is a primary network validator", + zap.Error(errPartialSyncAsAValidator), + ) + return "node is a primary network validator", errPartialSyncAsAValidator + }) + + if err := m.health.RegisterHealthCheck("validation", partialSyncCheck, health.ApplicationTag); err != nil { + return fmt.Errorf("couldn't register validation health check: %w", err) + } + return nil +} + +// Starts chain creation loop to process queued chains +func (m *chainManager) StartChainCreator(platformParams chainParameters) (*VM, error) { + // Get the Primary Network's subnet config. If it wasn't registered, then we + // throw a fatal error. + sbConfig, ok := m.subnetConfigs[constants.PrimaryNetworkID] + if !ok { + return nil, errNoPrimaryNetworkConfig + } + + sb := subnets.New(m.nodeID, sbConfig) + m.subnetsLock.Lock() + m.subnets[platformParams.SubnetID] = sb + sb.AddChain(platformParams.ID) + m.subnetsLock.Unlock() + + // The P-chain is created synchronously to ensure that `VM.Initialize` has + // finished before returning from this function. This is required because + // the P-chain initializes state that the rest of the node initialization + // depends on. + chainBuilder := chainCreator[*VM]{ + m: m, + beacons: m.bootstrappers, + factory: m.platformVMFactory, + consensusBuilder: platformBuilder{}, + } + chain, err := chainBuilder.createChain(platformParams) + if err != nil { + return nil, err + } + + m.log.Info("starting chain creator") + m.chainCreatorExited.Add(1) + go m.dispatchChainCreator() + return chain.VM, err +} + +func (m *chainManager) dispatchChainCreator() { + defer m.chainCreatorExited.Done() + + select { + // This channel will be closed when Shutdown is called on the manager. + case <-m.chainCreatorShutdownCh: + return + case <-m.unblockChainCreatorCh: + } + + // Handle chain creations + for { + // Get the next chain we should create. + // Dequeue waits until an element is pushed, so this is not + // busy-looping. + chainParams, ok := m.chainsQueue.PopLeft() + if !ok { // queue is closed, return directly + return + } + + switch chainParams.ID { + case m.xChainID: + //chainCreator := chainCreator[*linearizeOnInitializeVM]{ + // m: m, + // beacons: m.validators, + // factory: m.avmFactory, + // consensusBuilder: avalancheBuilder{}, + //} + //_, _ = chainCreator.createChain(chainParams) + case m.cChainID: + chainCreator := chainCreator[*coreth.VM]{ + m: m, + beacons: m.validators, + factory: m.corethFactory, + consensusBuilder: snowmanBuilder[*coreth.VM]{}, + } + _, _ = chainCreator.createChain(chainParams) + default: + vmFactory, err := m.vmManager.GetFactory(chainParams.VMID) + if err != nil { + m.log.Error("failed to create chain", zap.Error(err)) + continue + } + + chainCreator := chainCreator[*rpcchainvm.VMClient]{ + m: m, + beacons: m.validators, + factory: vmFactory, + consensusBuilder: snowmanBuilder[*rpcchainvm.VMClient]{}, + } + _, _ = chainCreator.createChain(chainParams) + } + } +} + +// Shutdown stops all the chains +func (m *chainManager) Shutdown() { + m.log.Info("shutting down chain manager") + m.chainsQueue.Close() + close(m.chainCreatorShutdownCh) + m.chainCreatorExited.Wait() + m.router.Shutdown(context.TODO()) +} + +// Notify registrants [those who want to know about the creation of chains] +// that the specified chain has been created +func (m *chainManager) notifyRegistrants(name string, ctx *snow.ConsensusContext, created common.VM) { + for _, registrant := range m.registrants { + registrant.RegisterChain(name, ctx, created) + } +} + +// getChainConfig returns value of a entry by looking at ID key and alias key +// it first searches ID key, then falls back to it's corresponding primary alias +func (m *chainManager) getChainConfig(id ids.ID) (ChainConfig, error) { + if val, ok := m.chainConfigs[id.String()]; ok { + return val, nil + } + aliases, err := m.aliaser.Aliases(id) + if err != nil { + return ChainConfig{}, err + } + for _, alias := range aliases { + if val, ok := m.chainConfigs[alias]; ok { + return val, nil + } + } + + return ChainConfig{}, nil +} + +type chainCreator[T common.VM] struct { + m *chainManager + + beacons validators.Manager + factory vms.Factory[T] + consensusBuilder consensusBuilder[T] +} + +// Create a chain +func (b *chainCreator[T]) createChain(chainParams chainParameters) (*chain[T], error) { + m := b.m + + m.log.Info("creating chain", zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), @@ -344,22 +478,23 @@ func (m *manager) createChain(chainParams ChainParameters) { // issue some internal messages), is delayed until chain dispatching is started and // the chain is registered in the manager. This ensures that no message generated by handler // upon start is dropped. - chain, err := m.buildChain(chainParams, sb) + chain, err := b.buildChain(chainParams, sb) if err != nil { - if m.CriticalChains.Contains(chainParams.ID) { + if m.criticalChains.Contains(chainParams.ID) { // Shut down if we fail to create a required chain (i.e. X, P or C) - m.Log.Fatal("error creating required chain", + m.log.Fatal("error creating required chain", zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), zap.Error(err), ) - go m.ShutdownNodeFunc(1) - return + //TODO handle shutdown case in the caller of vmFactory.New() + go m.shutdownNodeFunc(1) + return nil, err } - chainAlias := m.PrimaryAliasOrDefault(chainParams.ID) - m.Log.Error("error creating chain", + chainAlias := m.aliaser.PrimaryAliasOrDefault(chainParams.ID) + m.log.Error("error creating chain", zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.String("chainAlias", chainAlias), @@ -372,7 +507,7 @@ func (m *manager) createChain(chainParams ChainParameters) { // node may not be properly validating the subnet they expect to be // validating. healthCheckErr := fmt.Errorf("failed to create chain on subnet %s: %w", chainParams.SubnetID, err) - err := m.Health.RegisterHealthCheck( + err := m.health.RegisterHealthCheck( chainAlias, health.CheckerFunc(func(context.Context) (interface{}, error) { return nil, healthCheckErr @@ -380,7 +515,7 @@ func (m *manager) createChain(chainParams ChainParameters) { chainParams.SubnetID.String(), ) if err != nil { - m.Log.Error("failed to register failing health check", + m.log.Error("failed to register failing health check", zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.String("chainAlias", chainAlias), @@ -388,7 +523,7 @@ func (m *manager) createChain(chainParams ChainParameters) { zap.Error(err), ) } - return + return nil, err } m.chainsLock.Lock() @@ -396,8 +531,8 @@ func (m *manager) createChain(chainParams ChainParameters) { m.chainsLock.Unlock() // Associate the newly created chain with its default alias - if err := m.Alias(chainParams.ID, chainParams.ID.String()); err != nil { - m.Log.Error("failed to alias the new chain with itself", + if err := m.aliaser.Alias(chainParams.ID, chainParams.ID.String()); err != nil { + m.log.Error("failed to alias the new chain with itself", zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), @@ -406,12 +541,12 @@ func (m *manager) createChain(chainParams ChainParameters) { } // Notify those that registered to be notified when a new chain is created - m.notifyRegistrants(chain.Name, chain.Context, chain.VM) + m.notifyRegistrants(chain.Name, chain.Context, chain.WrappedVM) // Allows messages to be routed to the new chain. If the handler hasn't been // started and a message is forwarded, then the message will block until the // handler is started. - m.ManagerConfig.Router.AddChain(context.TODO(), chain.Handler) + m.router.AddChain(context.TODO(), chain.Handler) // Register bootstrapped health checks after P chain has been added to // chains. @@ -427,15 +562,16 @@ func (m *manager) createChain(chainParams ChainParameters) { // Tell the chain to start processing messages. // If the X, P, or C Chain panics, do not attempt to recover - chain.Handler.Start(context.TODO(), !m.CriticalChains.Contains(chainParams.ID)) + chain.Handler.Start(context.TODO(), !m.criticalChains.Contains(chainParams.ID)) + return chain, nil } -// Create a chain -func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*chain, error) { +func (b *chainCreator[T]) buildChain(chainParams chainParameters, sb subnets.Subnet) (*chain[T], error) { + m := b.m if chainParams.ID != constants.PlatformChainID && chainParams.VMID == constants.PlatformVMID { return nil, errCreatePlatformVM } - primaryAlias := m.PrimaryAliasOrDefault(chainParams.ID) + primaryAlias := m.aliaser.PrimaryAliasOrDefault(chainParams.ID) // Create this chain's data directory chainDataDir := filepath.Join(m.ChainDataDir, chainParams.ID.String()) @@ -444,14 +580,14 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } // Create the log and context of the chain - chainLog, err := m.LogFactory.MakeChain(primaryAlias) + chainLog, err := m.logFactory.MakeChain(primaryAlias) if err != nil { return nil, fmt.Errorf("error while creating chain's log %w", err) } consensusMetrics := prometheus.NewRegistry() chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) - if err := m.Metrics.Register(chainNamespace, consensusMetrics); err != nil { + if err := m.metrics.Register(chainNamespace, consensusMetrics); err != nil { return nil, fmt.Errorf("error while registering chain's metrics %w", err) } @@ -460,13 +596,13 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c // there are no conflicts when registering the Snowman consensus metrics. avalancheConsensusMetrics := prometheus.NewRegistry() avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") - if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { + if err := m.metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { return nil, fmt.Errorf("error while registering DAG metrics %w", err) } vmMetrics := metrics.NewOptionalGatherer() vmNamespace := metric.AppendNamespace(chainNamespace, "vm") - if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { + if err := m.metrics.Register(vmNamespace, vmMetrics); err != nil { return nil, fmt.Errorf("error while registering vm's metrics %w", err) } @@ -475,296 +611,199 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c NetworkID: m.NetworkID, SubnetID: chainParams.SubnetID, ChainID: chainParams.ID, - NodeID: m.NodeID, - PublicKey: bls.PublicFromSecretKey(m.StakingBLSKey), + NodeID: m.nodeID, + PublicKey: bls.PublicFromSecretKey(m.stakingBLSKey), - XChainID: m.XChainID, - CChainID: m.CChainID, + XChainID: m.xChainID, + CChainID: m.cChainID, AVAXAssetID: m.AVAXAssetID, Log: chainLog, - Keystore: m.Keystore.NewBlockchainKeyStore(chainParams.ID), - SharedMemory: m.AtomicMemory.NewSharedMemory(chainParams.ID), - BCLookup: m, + Keystore: m.keystore.NewBlockchainKeyStore(chainParams.ID), + SharedMemory: m.atomicMemory.NewSharedMemory(chainParams.ID), + BCLookup: m.aliaser, Metrics: vmMetrics, - WarpSigner: warp.NewSigner(m.StakingBLSKey, m.NetworkID, chainParams.ID), + WarpSigner: warp.NewSigner(m.stakingBLSKey, m.NetworkID, chainParams.ID), ValidatorState: m.validatorState, ChainDataDir: chainDataDir, }, - BlockAcceptor: m.BlockAcceptorGroup, - TxAcceptor: m.TxAcceptorGroup, - VertexAcceptor: m.VertexAcceptorGroup, + BlockAcceptor: m.blockAcceptorGroup, + TxAcceptor: m.txAcceptorGroup, + VertexAcceptor: m.vertexAcceptorGroup, Registerer: consensusMetrics, AvalancheRegisterer: avalancheConsensusMetrics, } - // Get a factory for the vm we want to use on our chain - vmFactory, err := m.VMManager.GetFactory(chainParams.VMID) - if err != nil { - return nil, fmt.Errorf("error while getting vmFactory: %w", err) - } - - // Create the chain - vm, err := vmFactory.New(chainLog) + chain, err := b.consensusBuilder.build(m, ctx, chainParams, b.beacons, m.validators, b.factory, sb) if err != nil { - return nil, fmt.Errorf("error while creating vm: %w", err) - } - // TODO: Shutdown VM if an error occurs - - chainFxs := make([]*common.Fx, len(chainParams.FxIDs)) - for i, fxID := range chainParams.FxIDs { - fxFactory, ok := fxs[fxID] - if !ok { - return nil, fmt.Errorf("fx %s not found", fxID) - } - - chainFxs[i] = &common.Fx{ - ID: fxID, - Fx: fxFactory.New(), - } - } - - var chain *chain - switch vm := vm.(type) { - case vertex.LinearizableVMWithEngine: - chain, err = m.createAvalancheChain( - ctx, - chainParams.GenesisData, - m.Validators, - vm, - chainFxs, - sb, - ) - if err != nil { - return nil, fmt.Errorf("error while creating new avalanche vm %w", err) - } - case block.ChainVM: - beacons := m.Validators - if chainParams.ID == constants.PlatformChainID { - beacons = chainParams.CustomBeacons - } - - chain, err = m.createSnowmanChain( - ctx, - chainParams.GenesisData, - m.Validators, - beacons, - vm, - chainFxs, - sb, - ) - if err != nil { - return nil, fmt.Errorf("error while creating new snowman vm %w", err) - } - default: - return nil, errUnknownVMType + return nil, err } - // Register the chain with the timeout manager - if err := m.TimeoutManager.RegisterChain(ctx); err != nil { + // Register the chain with the timeout ChainManager + if err := m.timeoutManager.RegisterChain(ctx); err != nil { return nil, err } return chain, nil } -func (m *manager) AddRegistrant(r Registrant) { - m.registrants = append(m.registrants, r) +type consensusBuilder[T common.VM] interface { + build( + m *chainManager, + ctx *snow.ConsensusContext, + params chainParameters, + vdrs, + beacons validators.Manager, + vmFactory vms.Factory[T], + sb subnets.Subnet, + ) (*chain[T], error) } -// Create a DAG-based blockchain that uses Avalanche -func (m *manager) createAvalancheChain( +type snowmanBuilder[T block.ChainVM] struct{} + +func (s snowmanBuilder[T]) build( + m *chainManager, ctx *snow.ConsensusContext, - genesisData []byte, - vdrs validators.Manager, - vm vertex.LinearizableVMWithEngine, - fxs []*common.Fx, + params chainParameters, + vdrs, + beacons validators.Manager, + vmFactory vms.Factory[T], sb subnets.Subnet, -) (*chain, error) { +) (*chain[T], error) { + vm, err := vmFactory.New(ctx.Log) + if err != nil { + return nil, fmt.Errorf("error while creating m: %w", err) + } + ctx.Lock.Lock() defer ctx.Lock.Unlock() ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Initializing, }) - meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) + meterDB, err := meterdb.New("db", ctx.Registerer, m.db) if err != nil { return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) vmDB := prefixdb.New(vmDBPrefix, prefixDB) - vertexDB := prefixdb.New(vertexDBPrefix, prefixDB) - vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, prefixDB) - txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, prefixDB) - blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(bootstrappingDB, prefixDB) - vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) - if err != nil { - return nil, err - } - txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.AvalancheRegisterer) - if err != nil { - return nil, err - } - blockBlocker, err := queue.NewWithMissing(blockBootstrappingDB, "block", ctx.Registerer) + blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { return nil, err } - // Passes messages from the avalanche engines to the network - avalancheMessageSender, err := sender.New( + // Passes messages from the consensus engine to the network + messageSender, err := sender.New( ctx, - m.MsgCreator, - m.Net, - m.ManagerConfig.Router, - m.TimeoutManager, - p2p.EngineType_ENGINE_TYPE_AVALANCHE, + m.msgCreator, + m.net, + m.router, + m.timeoutManager, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, sb, ) if err != nil { - return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) + return nil, fmt.Errorf("couldn't initialize sender: %w", err) } if m.TracingEnabled { - avalancheMessageSender = sender.Trace(avalancheMessageSender, m.Tracer) + messageSender = sender.Trace(messageSender, m.tracer) } - err = m.VertexAcceptorGroup.RegisterAcceptor( + err = m.blockAcceptorGroup.RegisterAcceptor( ctx.ChainID, "gossip", - avalancheMessageSender, + messageSender, false, ) if err != nil { // Set up the event dispatcher return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) } - // Passes messages from the snowman engines to the network - snowmanMessageSender, err := sender.New( - ctx, - m.MsgCreator, - m.Net, - m.ManagerConfig.Router, - m.TimeoutManager, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - sb, + var ( + bootstrapFunc func() + subnetConnector = validators.UnhandledSubnetConnector + wrappedVM block.ChainVM = vm ) - if err != nil { - return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) - } - - if m.TracingEnabled { - snowmanMessageSender = sender.Trace(snowmanMessageSender, m.Tracer) - } + // If [m.validatorState] is nil then we are creating the P-Chain. Since the + // P-Chain is the first chain to be created, we can use it to initialize + // required interfaces for the other chains + if m.validatorState == nil { + valState, ok := wrappedVM.(validators.State) + if !ok { + return nil, fmt.Errorf("expected validators.State but got %T", m) + } - err = m.BlockAcceptorGroup.RegisterAcceptor( - ctx.ChainID, - "gossip", - snowmanMessageSender, - false, - ) - if err != nil { // Set up the event dispatcher - return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) - } + if m.TracingEnabled { + valState = validators.Trace(valState, "platformvm", m.tracer) + } - chainConfig, err := m.getChainConfig(ctx.ChainID) - if err != nil { - return nil, fmt.Errorf("error while fetching chain config: %w", err) - } + // Notice that this context is left unlocked. This is because the + // lock will already be held when accessing these values on the + // P-chain. + ctx.ValidatorState = valState - dagVM := vm - if m.MeterVMEnabled { - dagVM = metervm.NewVertexVM(dagVM) - } - if m.TracingEnabled { - dagVM = tracedvm.NewVertexVM(dagVM, m.Tracer) - } + // Initialize the validator state for future chains. + m.validatorState = validators.NewLockedState(&ctx.Lock, valState) + if m.TracingEnabled { + m.validatorState = validators.Trace(m.validatorState, "lockedState", m.tracer) + } - // Handles serialization/deserialization of vertices and also the - // persistence of vertices - vtxManager := state.NewSerializer( - state.SerializerConfig{ - ChainID: ctx.ChainID, - VM: dagVM, - DB: vertexDB, - Log: ctx.Log, - CortinaTime: version.GetCortinaTime(ctx.NetworkID), - }, - ) + if !m.SybilProtectionEnabled { + m.validatorState = validators.NewNoValidatorsState(m.validatorState) + ctx.ValidatorState = validators.NewNoValidatorsState(ctx.ValidatorState) + } - avalancheRegisterer := metrics.NewOptionalGatherer() - snowmanRegisterer := metrics.NewOptionalGatherer() + // Set this func only for platform + // + // The snowman bootstrapper ensures this function is only executed once, so + // we don't need to be concerned about closing this channel multiple times. + bootstrapFunc = func() { + close(m.unblockChainCreatorCh) + } - registerer := metrics.NewMultiGatherer() - if err := registerer.Register("avalanche", avalancheRegisterer); err != nil { - return nil, err - } - if err := registerer.Register("", snowmanRegisterer); err != nil { - return nil, err - } - if err := ctx.Context.Metrics.Register(registerer); err != nil { - return nil, err + // Set up the subnet connector for the P-Chain + subnetConnector, ok = wrappedVM.(validators.SubnetConnector) + if !ok { + return nil, fmt.Errorf("expected validators.SubnetConnector but got %T", m) + } } - ctx.Context.Metrics = avalancheRegisterer - - // The channel through which a VM may send messages to the consensus engine - // VM uses this channel to notify engine that a block is ready to be made - msgChan := make(chan common.Message, defaultChannelSize) - - // The only difference between using avalancheMessageSender and - // snowmanMessageSender here is where the metrics will be placed. Because we - // end up using this sender after the linearization, we pass in - // snowmanMessageSender here. - err = dagVM.Initialize( - context.TODO(), - ctx.Context, - vmDB, - genesisData, - chainConfig.Upgrade, - chainConfig.Config, - msgChan, - fxs, - snowmanMessageSender, - ) + // Initialize the ProposerVM and the m wrapped inside it + chainConfig, err := m.getChainConfig(ctx.ChainID) if err != nil { - return nil, fmt.Errorf("error during vm's Initialize: %w", err) + return nil, fmt.Errorf("error while fetching chain config: %w", err) } - // Initialize the ProposerVM and the vm wrapped inside it var ( minBlockDelay = proposervm.DefaultMinBlockDelay numHistoricalBlocks = proposervm.DefaultNumHistoricalBlocks ) - if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok { + if subnetCfg, ok := m.subnetConfigs[ctx.SubnetID]; ok { minBlockDelay = subnetCfg.ProposerMinBlockDelay numHistoricalBlocks = subnetCfg.ProposerNumHistoricalBlocks } - m.Log.Info("creating proposervm wrapper", + m.log.Info("creating proposervm wrapper", zap.Time("activationTime", m.ApricotPhase4Time), zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight), zap.Duration("minBlockDelay", minBlockDelay), zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) - - // Note: this does not use [dagVM] to ensure we use the [vm]'s height index. - untracedVMWrappedInsideProposerVM := NewLinearizeOnInitializeVM(vm) + chainAlias := m.aliaser.PrimaryAliasOrDefault(ctx.ChainID) - var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM if m.TracingEnabled { - vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, chainAlias, m.Tracer) + wrappedVM = tracedvm.NewBlockVM(vm, chainAlias, m.tracer) } - // Note: vmWrappingProposerVM is the VM that the Snowman engines should be - // using. - var vmWrappingProposerVM block.ChainVM = proposervm.New( - vmWrappedInsideProposerVM, + wrappedVM = proposervm.New( + wrappedVM, proposervm.Config{ ActivationTime: m.ApricotPhase4Time, DurangoTime: version.GetDurangoTime(m.NetworkID), @@ -777,31 +816,31 @@ func (m *manager) createAvalancheChain( ) if m.MeterVMEnabled { - vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM) + wrappedVM = metervm.NewBlockVM(vm) } if m.TracingEnabled { - vmWrappingProposerVM = tracedvm.NewBlockVM(vmWrappingProposerVM, "proposervm", m.Tracer) + wrappedVM = tracedvm.NewBlockVM(vm, "proposervm", m.tracer) } - // Note: linearizableVM is the VM that the Avalanche engines should be - // using. - linearizableVM := &initializeOnLinearizeVM{ - DAGVM: dagVM, - vmToInitialize: vmWrappingProposerVM, - vmToLinearize: untracedVMWrappedInsideProposerVM, + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, defaultChannelSize) - registerer: snowmanRegisterer, - ctx: ctx.Context, - db: vmDB, - genesisBytes: genesisData, - upgradeBytes: chainConfig.Upgrade, - configBytes: chainConfig.Config, - toEngine: msgChan, - fxs: fxs, - appSender: snowmanMessageSender, + if err := vm.Initialize( + context.TODO(), + ctx.Context, + vmDB, + params.GenesisData, + chainConfig.Upgrade, + chainConfig.Config, + msgChan, + nil, + messageSender, + ); err != nil { + return nil, err } - bootstrapWeight, err := vdrs.TotalWeight(ctx.SubnetID) + bootstrapWeight, err := beacons.TotalWeight(ctx.SubnetID) if err != nil { return nil, fmt.Errorf("error while fetching weight for subnet %s: %w", ctx.SubnetID, err) } @@ -825,22 +864,22 @@ func (m *manager) createAvalancheChain( msgChan, m.FrontierPollFrequency, m.ConsensusAppConcurrency, - m.ResourceTracker, - validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector + m.resourceTracker, + subnetConnector, sb, connectedValidators, ) if err != nil { - return nil, fmt.Errorf("error initializing network handler: %w", err) + return nil, fmt.Errorf("couldn't initialize message handler: %w", err) } connectedBeacons := tracker.NewPeers() startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) - vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) + beacons.RegisterCallbackListener(ctx.SubnetID, startupTracker) snowGetHandler, err := snowgetter.New( - vmWrappingProposerVM, - snowmanMessageSender, + vm, + messageSender, ctx.Log, m.BootstrapMaxTimeGetAncestors, m.BootstrapAncestorsMaxContainersSent, @@ -850,267 +889,337 @@ func (m *manager) createAvalancheChain( return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } - var snowmanConsensus smcon.Consensus = &smcon.Topological{} + var consensus smcon.Consensus = &smcon.Topological{} if m.TracingEnabled { - snowmanConsensus = smcon.Trace(snowmanConsensus, m.Tracer) + consensus = smcon.Trace(consensus, m.tracer) } // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized - snowmanEngineConfig := smeng.Config{ + engineConfig := smeng.Config{ Ctx: ctx, AllGetsServer: snowGetHandler, - VM: vmWrappingProposerVM, - Sender: snowmanMessageSender, + VM: vm, + Sender: messageSender, Validators: vdrs, ConnectedValidators: connectedValidators, Params: consensusParams, - Consensus: snowmanConsensus, + Consensus: consensus, + PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, } - snowmanEngine, err := smeng.New(snowmanEngineConfig) + engine, err := smeng.New(engineConfig) if err != nil { return nil, fmt.Errorf("error initializing snowman engine: %w", err) } if m.TracingEnabled { - snowmanEngine = smeng.TraceEngine(snowmanEngine, m.Tracer) + engine = smeng.TraceEngine(engine, m.tracer) } - // create bootstrap gear + // create smbootstrap gear bootstrapCfg := smbootstrap.Config{ AllGetsServer: snowGetHandler, Ctx: ctx, - Beacons: vdrs, + Beacons: beacons, SampleK: sampleK, StartupTracker: startupTracker, - Sender: snowmanMessageSender, + Sender: messageSender, BootstrapTracker: sb, Timer: h, AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - Blocked: blockBlocker, - VM: vmWrappingProposerVM, + Blocked: blocked, + VM: vm, + Bootstrapped: bootstrapFunc, } - var snowmanBootstrapper common.BootstrapableEngine - snowmanBootstrapper, err = smbootstrap.New( + var bootstrapper common.BootstrapableEngine + bootstrapper, err = smbootstrap.New( bootstrapCfg, - snowmanEngine.Start, + engine.Start, ) if err != nil { return nil, fmt.Errorf("error initializing snowman bootstrapper: %w", err) } if m.TracingEnabled { - snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.Tracer) + bootstrapper = common.TraceBootstrapableEngine(bootstrapper, m.tracer) } - avaGetHandler, err := avagetter.New( - vtxManager, - avalancheMessageSender, - ctx.Log, - m.BootstrapMaxTimeGetAncestors, - m.BootstrapAncestorsMaxContainersSent, - ctx.AvalancheRegisterer, + // create state sync gear + stateSyncCfg, err := syncer.NewConfig( + snowGetHandler, + ctx, + startupTracker, + messageSender, + beacons, + sampleK, + bootstrapWeight/2+1, // must be > 50% + m.StateSyncBeacons, + vm, ) if err != nil { - return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) - } - - // create engine gear - avalancheEngine := aveng.New(ctx, avaGetHandler, linearizableVM) - if m.TracingEnabled { - avalancheEngine = common.TraceEngine(avalancheEngine, m.Tracer) - } - - // create bootstrap gear - avalancheBootstrapperConfig := avbootstrap.Config{ - AllGetsServer: avaGetHandler, - Ctx: ctx, - Beacons: vdrs, - StartupTracker: startupTracker, - Sender: avalancheMessageSender, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: vtxManager, - VM: linearizableVM, - } - if ctx.ChainID == m.XChainID { - avalancheBootstrapperConfig.StopVertexID = version.CortinaXChainStopVertexID[ctx.NetworkID] + return nil, fmt.Errorf("couldn't initialize state syncer configuration: %w", err) } - - avalancheBootstrapper, err := avbootstrap.New( - avalancheBootstrapperConfig, - snowmanBootstrapper.Start, + stateSyncer := syncer.New( + stateSyncCfg, + bootstrapper.Start, ) - if err != nil { - return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) - } if m.TracingEnabled { - avalancheBootstrapper = common.TraceBootstrapableEngine(avalancheBootstrapper, m.Tracer) + stateSyncer = common.TraceStateSyncer(stateSyncer, m.tracer) } h.SetEngineManager(&handler.EngineManager{ - Avalanche: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: avalancheBootstrapper, - Consensus: avalancheEngine, - }, + Avalanche: nil, Snowman: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: snowmanBootstrapper, - Consensus: snowmanEngine, + StateSyncer: stateSyncer, + Bootstrapper: bootstrapper, + Consensus: engine, }, }) - // Register health check for this chain - if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { + // Register health checks + if err := m.health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) } - return &chain{ - Name: chainAlias, - Context: ctx, - VM: dagVM, - Handler: h, + return &chain[T]{ + Name: chainAlias, + Context: ctx, + VM: vm, + WrappedVM: wrappedVM, + Handler: h, }, nil } -// Create a linear chain using the Snowman consensus engine -func (m *manager) createSnowmanChain( +type platformBuilder struct { + snowmanBuilder[*VM] +} + +func (p platformBuilder) build( + m *chainManager, ctx *snow.ConsensusContext, - genesisData []byte, - vdrs validators.Manager, + params chainParameters, + vdrs, beacons validators.Manager, - vm block.ChainVM, - fxs []*common.Fx, + vmFactory vms.Factory[*VM], + sb subnets.Subnet, +) (*chain[*VM], error) { + return p.snowmanBuilder.build(m, ctx, params, vdrs, beacons, vmFactory, sb) +} + +type avalancheBuilder struct{} + +func (x avalancheBuilder) build( + m *chainManager, + ctx *snow.ConsensusContext, + params chainParameters, + vdrs, + _ validators.Manager, + vmFactory vms.Factory[*avm.VM], sb subnets.Subnet, -) (*chain, error) { +) (*chain[*linearizeOnInitializeVM], error) { + vm, err := vmFactory.New(ctx.Log) + if err != nil { + return nil, err + } + ctx.Lock.Lock() defer ctx.Lock.Unlock() ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, State: snow.Initializing, }) - meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) + meterDB, err := meterdb.New("db", ctx.Registerer, m.db) if err != nil { return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) vmDB := prefixdb.New(vmDBPrefix, prefixDB) - bootstrappingDB := prefixdb.New(bootstrappingDB, prefixDB) + vertexDB := prefixdb.New(vertexDBPrefix, prefixDB) + vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, prefixDB) + txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, prefixDB) + blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, prefixDB) - blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) + vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { return nil, err } - - // Passes messages from the consensus engine to the network - messageSender, err := sender.New( - ctx, - m.MsgCreator, - m.Net, - m.ManagerConfig.Router, - m.TimeoutManager, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - sb, - ) + txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.AvalancheRegisterer) if err != nil { - return nil, fmt.Errorf("couldn't initialize sender: %w", err) + return nil, err + } + blockBlocker, err := queue.NewWithMissing(blockBootstrappingDB, "block", ctx.Registerer) + if err != nil { + return nil, err + } + + // Passes messages from the avalanche engines to the network + avalancheMessageSender, err := sender.New( + ctx, + m.msgCreator, + m.net, + m.router, + m.timeoutManager, + p2p.EngineType_ENGINE_TYPE_AVALANCHE, + sb, + ) + if err != nil { + return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) } if m.TracingEnabled { - messageSender = sender.Trace(messageSender, m.Tracer) + avalancheMessageSender = sender.Trace(avalancheMessageSender, m.tracer) } - err = m.BlockAcceptorGroup.RegisterAcceptor( + err = m.vertexAcceptorGroup.RegisterAcceptor( ctx.ChainID, "gossip", - messageSender, + avalancheMessageSender, false, ) if err != nil { // Set up the event dispatcher return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) } - var ( - bootstrapFunc func() - subnetConnector = validators.UnhandledSubnetConnector + // Passes messages from the snowman engines to the network + snowmanMessageSender, err := sender.New( + ctx, + m.msgCreator, + m.net, + m.router, + m.timeoutManager, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + sb, ) - // If [m.validatorState] is nil then we are creating the P-Chain. Since the - // P-Chain is the first chain to be created, we can use it to initialize - // required interfaces for the other chains - if m.validatorState == nil { - valState, ok := vm.(validators.State) - if !ok { - return nil, fmt.Errorf("expected validators.State but got %T", vm) - } + if err != nil { + return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) + } - if m.TracingEnabled { - valState = validators.Trace(valState, "platformvm", m.Tracer) - } + if m.TracingEnabled { + snowmanMessageSender = sender.Trace(snowmanMessageSender, m.tracer) + } - // Notice that this context is left unlocked. This is because the - // lock will already be held when accessing these values on the - // P-chain. - ctx.ValidatorState = valState + err = m.blockAcceptorGroup.RegisterAcceptor( + ctx.ChainID, + "gossip", + snowmanMessageSender, + false, + ) + if err != nil { // Set up the event dispatcher + return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) + } - // Initialize the validator state for future chains. - m.validatorState = validators.NewLockedState(&ctx.Lock, valState) - if m.TracingEnabled { - m.validatorState = validators.Trace(m.validatorState, "lockedState", m.Tracer) - } + chainConfig, err := m.getChainConfig(ctx.ChainID) + if err != nil { + return nil, fmt.Errorf("error while fetching chain config: %w", err) + } - if !m.ManagerConfig.SybilProtectionEnabled { - m.validatorState = validators.NewNoValidatorsState(m.validatorState) - ctx.ValidatorState = validators.NewNoValidatorsState(ctx.ValidatorState) - } + var wrappedVM vertex.LinearizableVMWithEngine + wrappedVM = vm + if m.MeterVMEnabled { + wrappedVM = metervm.NewVertexVM(vm) + } + if m.TracingEnabled { + wrappedVM = tracedvm.NewVertexVM(vm, m.tracer) + } - // Set this func only for platform - // - // The snowman bootstrapper ensures this function is only executed once, so - // we don't need to be concerned about closing this channel multiple times. - bootstrapFunc = func() { - close(m.unblockChainCreatorCh) - } + // Handles serialization/deserialization of vertices and also the + // persistence of vertices + vtxManager := state.NewSerializer( + state.SerializerConfig{ + ChainID: ctx.ChainID, + VM: wrappedVM, + DB: vertexDB, + Log: ctx.Log, + CortinaTime: version.GetCortinaTime(ctx.NetworkID), + }, + ) - // Set up the subnet connector for the P-Chain - subnetConnector, ok = vm.(validators.SubnetConnector) - if !ok { - return nil, fmt.Errorf("expected validators.SubnetConnector but got %T", vm) - } + avalancheRegisterer := metrics.NewOptionalGatherer() + snowmanRegisterer := metrics.NewOptionalGatherer() + + registerer := metrics.NewMultiGatherer() + if err := registerer.Register("avalanche", avalancheRegisterer); err != nil { + return nil, err + } + if err := registerer.Register("", snowmanRegisterer); err != nil { + return nil, err + } + if err := ctx.Context.Metrics.Register(registerer); err != nil { + return nil, err } - // Initialize the ProposerVM and the vm wrapped inside it - chainConfig, err := m.getChainConfig(ctx.ChainID) + ctx.Context.Metrics = avalancheRegisterer + + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, defaultChannelSize) + fxs := []*common.Fx{ + { + ID: secp256k1fx.ID, + Fx: (&secp256k1fx.Factory{}).New(), + }, + { + ID: nftfx.ID, + Fx: (&nftfx.Factory{}).New(), + }, + { + ID: propertyfx.ID, + Fx: (&propertyfx.Factory{}).New(), + }, + } + + // The only difference between using avalancheMessageSender and + // snowmanMessageSender here is where the metrics will be placed. Because we + // end up using this sender after the linearization, we pass in + // snowmanMessageSender here. + err = wrappedVM.Initialize( + context.TODO(), + ctx.Context, + vmDB, + params.GenesisData, + chainConfig.Upgrade, + chainConfig.Config, + msgChan, + fxs, + snowmanMessageSender, + ) if err != nil { - return nil, fmt.Errorf("error while fetching chain config: %w", err) + return nil, fmt.Errorf("error during m's Initialize: %w", err) } + // Initialize the ProposerVM and the m wrapped inside it var ( minBlockDelay = proposervm.DefaultMinBlockDelay numHistoricalBlocks = proposervm.DefaultNumHistoricalBlocks ) - if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok { + if subnetCfg, ok := m.subnetConfigs[ctx.SubnetID]; ok { minBlockDelay = subnetCfg.ProposerMinBlockDelay numHistoricalBlocks = subnetCfg.ProposerNumHistoricalBlocks } - m.Log.Info("creating proposervm wrapper", + m.log.Info("creating proposervm wrapper", zap.Time("activationTime", m.ApricotPhase4Time), zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight), zap.Duration("minBlockDelay", minBlockDelay), zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + chainAlias := m.aliaser.PrimaryAliasOrDefault(ctx.ChainID) + + // Note: this does not use [dagVM] to ensure we use the [m]'s height index. + untracedVMWrappedInsideProposerVM := NewLinearizeOnInitializeVM(vm) + + var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM if m.TracingEnabled { - vm = tracedvm.NewBlockVM(vm, chainAlias, m.Tracer) + vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, chainAlias, m.tracer) } - vm = proposervm.New( - vm, + // Note: vmWrappingProposerVM is the VM that the Snowman engines should be + // using. + var vmWrappingProposerVM block.ChainVM = proposervm.New( + vmWrappedInsideProposerVM, proposervm.Config{ ActivationTime: m.ApricotPhase4Time, DurangoTime: version.GetDurangoTime(m.NetworkID), @@ -1123,31 +1232,31 @@ func (m *manager) createSnowmanChain( ) if m.MeterVMEnabled { - vm = metervm.NewBlockVM(vm) + vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM) } if m.TracingEnabled { - vm = tracedvm.NewBlockVM(vm, "proposervm", m.Tracer) + vmWrappingProposerVM = tracedvm.NewBlockVM(vmWrappingProposerVM, "proposervm", m.tracer) } - // The channel through which a VM may send messages to the consensus engine - // VM uses this channel to notify engine that a block is ready to be made - msgChan := make(chan common.Message, defaultChannelSize) + // Note: linearizableVM is the VM that the Avalanche engines should be + // using. + linearizableVM := &initializeOnLinearizeVM{ + DAGVM: wrappedVM, + vmToInitialize: vmWrappingProposerVM, + vmToLinearize: untracedVMWrappedInsideProposerVM, - if err := vm.Initialize( - context.TODO(), - ctx.Context, - vmDB, - genesisData, - chainConfig.Upgrade, - chainConfig.Config, - msgChan, - fxs, - messageSender, - ); err != nil { - return nil, err + registerer: snowmanRegisterer, + ctx: ctx.Context, + db: vmDB, + genesisBytes: params.GenesisData, + upgradeBytes: chainConfig.Upgrade, + configBytes: chainConfig.Config, + toEngine: msgChan, + fxs: []*common.Fx{}, //TODO + appSender: snowmanMessageSender, } - bootstrapWeight, err := beacons.TotalWeight(ctx.SubnetID) + bootstrapWeight, err := vdrs.TotalWeight(ctx.SubnetID) if err != nil { return nil, fmt.Errorf("error while fetching weight for subnet %s: %w", ctx.SubnetID, err) } @@ -1171,22 +1280,22 @@ func (m *manager) createSnowmanChain( msgChan, m.FrontierPollFrequency, m.ConsensusAppConcurrency, - m.ResourceTracker, - subnetConnector, + m.resourceTracker, + validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector sb, connectedValidators, ) if err != nil { - return nil, fmt.Errorf("couldn't initialize message handler: %w", err) + return nil, fmt.Errorf("error initializing network handler: %w", err) } connectedBeacons := tracker.NewPeers() startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) - beacons.RegisterCallbackListener(ctx.SubnetID, startupTracker) + vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) snowGetHandler, err := snowgetter.New( - vm, - messageSender, + vmWrappingProposerVM, + snowmanMessageSender, ctx.Log, m.BootstrapMaxTimeGetAncestors, m.BootstrapAncestorsMaxContainersSent, @@ -1196,261 +1305,129 @@ func (m *manager) createSnowmanChain( return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } - var consensus smcon.Consensus = &smcon.Topological{} + var snowmanConsensus smcon.Consensus = &smcon.Topological{} if m.TracingEnabled { - consensus = smcon.Trace(consensus, m.Tracer) + snowmanConsensus = smcon.Trace(snowmanConsensus, m.tracer) } // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized - engineConfig := smeng.Config{ + snowmanEngineConfig := smeng.Config{ Ctx: ctx, AllGetsServer: snowGetHandler, - VM: vm, - Sender: messageSender, + VM: vmWrappingProposerVM, + Sender: snowmanMessageSender, Validators: vdrs, ConnectedValidators: connectedValidators, Params: consensusParams, - Consensus: consensus, - PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, + Consensus: snowmanConsensus, } - engine, err := smeng.New(engineConfig) + snowmanEngine, err := smeng.New(snowmanEngineConfig) if err != nil { return nil, fmt.Errorf("error initializing snowman engine: %w", err) } if m.TracingEnabled { - engine = smeng.TraceEngine(engine, m.Tracer) + snowmanEngine = smeng.TraceEngine(snowmanEngine, m.tracer) } - // create bootstrap gear + // create smbootstrap gear bootstrapCfg := smbootstrap.Config{ AllGetsServer: snowGetHandler, Ctx: ctx, - Beacons: beacons, + Beacons: vdrs, SampleK: sampleK, StartupTracker: startupTracker, - Sender: messageSender, + Sender: snowmanMessageSender, BootstrapTracker: sb, Timer: h, AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - Blocked: blocked, - VM: vm, - Bootstrapped: bootstrapFunc, + Blocked: blockBlocker, + VM: vmWrappingProposerVM, } - var bootstrapper common.BootstrapableEngine - bootstrapper, err = smbootstrap.New( + var snowmanBootstrapper common.BootstrapableEngine + snowmanBootstrapper, err = smbootstrap.New( bootstrapCfg, - engine.Start, + snowmanEngine.Start, ) if err != nil { return nil, fmt.Errorf("error initializing snowman bootstrapper: %w", err) } if m.TracingEnabled { - bootstrapper = common.TraceBootstrapableEngine(bootstrapper, m.Tracer) + snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.tracer) } - // create state sync gear - stateSyncCfg, err := syncer.NewConfig( - snowGetHandler, - ctx, - startupTracker, - messageSender, - beacons, - sampleK, - bootstrapWeight/2+1, // must be > 50% - m.StateSyncBeacons, - vm, + avaGetHandler, err := avagetter.New( + vtxManager, + avalancheMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.AvalancheRegisterer, ) if err != nil { - return nil, fmt.Errorf("couldn't initialize state syncer configuration: %w", err) + return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) } - stateSyncer := syncer.New( - stateSyncCfg, - bootstrapper.Start, - ) + // create engine gear + avalancheEngine := aveng.New(ctx, avaGetHandler, linearizableVM) if m.TracingEnabled { - stateSyncer = common.TraceStateSyncer(stateSyncer, m.Tracer) - } - - h.SetEngineManager(&handler.EngineManager{ - Avalanche: nil, - Snowman: &handler.Engine{ - StateSyncer: stateSyncer, - Bootstrapper: bootstrapper, - Consensus: engine, - }, - }) - - // Register health checks - if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { - return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) + avalancheEngine = common.TraceEngine(avalancheEngine, m.tracer) } - return &chain{ - Name: chainAlias, - Context: ctx, - VM: vm, - Handler: h, - }, nil -} - -func (m *manager) IsBootstrapped(id ids.ID) bool { - m.chainsLock.Lock() - chain, exists := m.chains[id] - m.chainsLock.Unlock() - if !exists { - return false + // create smbootstrap gear + avalancheBootstrapperConfig := avbootstrap.Config{ + AllGetsServer: avaGetHandler, + Ctx: ctx, + Beacons: vdrs, + StartupTracker: startupTracker, + Sender: avalancheMessageSender, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: vtxManager, + VM: linearizableVM, } - - return chain.Context().State.Get().State == snow.NormalOp -} - -func (m *manager) subnetsNotBootstrapped() []ids.ID { - m.subnetsLock.RLock() - defer m.subnetsLock.RUnlock() - - subnetsBootstrapping := make([]ids.ID, 0, len(m.subnets)) - for subnetID, subnet := range m.subnets { - if !subnet.IsBootstrapped() { - subnetsBootstrapping = append(subnetsBootstrapping, subnetID) - } + if ctx.ChainID == m.xChainID { + avalancheBootstrapperConfig.StopVertexID = version.CortinaXChainStopVertexID[ctx.NetworkID] } - return subnetsBootstrapping -} -func (m *manager) registerBootstrappedHealthChecks() error { - bootstrappedCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { - subnetIDs := m.subnetsNotBootstrapped() - if len(subnetIDs) != 0 { - return subnetIDs, errNotBootstrapped - } - return []ids.ID{}, nil - }) - if err := m.Health.RegisterReadinessCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil { - return fmt.Errorf("couldn't register bootstrapped readiness check: %w", err) - } - if err := m.Health.RegisterHealthCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil { - return fmt.Errorf("couldn't register bootstrapped health check: %w", err) + avalancheBootstrapper, err := avbootstrap.New( + avalancheBootstrapperConfig, + snowmanBootstrapper.Start, + ) + if err != nil { + return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) } - // We should only report unhealthy if the node is partially syncing the - // primary network and is a validator. - if !m.PartialSyncPrimaryNetwork { - return nil + if m.TracingEnabled { + avalancheBootstrapper = common.TraceBootstrapableEngine(avalancheBootstrapper, m.tracer) } - partialSyncCheck := health.CheckerFunc(func(ctx context.Context) (interface{}, error) { - // Note: The health check is skipped during bootstrapping to allow a - // node to sync the network even if it was previously a validator. - if !m.IsBootstrapped(constants.PlatformChainID) { - return "node is currently bootstrapping", nil - } - if _, ok := m.Validators.GetValidator(constants.PrimaryNetworkID, m.NodeID); !ok { - return "node is not a primary network validator", nil - } - - m.Log.Warn("node is a primary network validator", - zap.Error(errPartialSyncAsAValidator), - ) - return "node is a primary network validator", errPartialSyncAsAValidator + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: avalancheBootstrapper, + Consensus: avalancheEngine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: snowmanBootstrapper, + Consensus: snowmanEngine, + }, }) - if err := m.Health.RegisterHealthCheck("validation", partialSyncCheck, health.ApplicationTag); err != nil { - return fmt.Errorf("couldn't register validation health check: %w", err) - } - return nil -} - -// Starts chain creation loop to process queued chains -func (m *manager) StartChainCreator(platformParams ChainParameters) error { - // Get the Primary Network's subnet config. If it wasn't registered, then we - // throw a fatal error. - sbConfig, ok := m.SubnetConfigs[constants.PrimaryNetworkID] - if !ok { - return errNoPrimaryNetworkConfig - } - - sb := subnets.New(m.NodeID, sbConfig) - m.subnetsLock.Lock() - m.subnets[platformParams.SubnetID] = sb - sb.AddChain(platformParams.ID) - m.subnetsLock.Unlock() - - // The P-chain is created synchronously to ensure that `VM.Initialize` has - // finished before returning from this function. This is required because - // the P-chain initializes state that the rest of the node initialization - // depends on. - m.createChain(platformParams) - - m.Log.Info("starting chain creator") - m.chainCreatorExited.Add(1) - go m.dispatchChainCreator() - return nil -} - -func (m *manager) dispatchChainCreator() { - defer m.chainCreatorExited.Done() - - select { - // This channel will be closed when Shutdown is called on the manager. - case <-m.chainCreatorShutdownCh: - return - case <-m.unblockChainCreatorCh: - } - - // Handle chain creations - for { - // Get the next chain we should create. - // Dequeue waits until an element is pushed, so this is not - // busy-looping. - chainParams, ok := m.chainsQueue.PopLeft() - if !ok { // queue is closed, return directly - return - } - m.createChain(chainParams) - } -} - -// Shutdown stops all the chains -func (m *manager) Shutdown() { - m.Log.Info("shutting down chain manager") - m.chainsQueue.Close() - close(m.chainCreatorShutdownCh) - m.chainCreatorExited.Wait() - m.ManagerConfig.Router.Shutdown(context.TODO()) -} - -// LookupVM returns the ID of the VM associated with an alias -func (m *manager) LookupVM(alias string) (ids.ID, error) { - return m.VMManager.Lookup(alias) -} - -// Notify registrants [those who want to know about the creation of chains] -// that the specified chain has been created -func (m *manager) notifyRegistrants(name string, ctx *snow.ConsensusContext, vm common.VM) { - for _, registrant := range m.registrants { - registrant.RegisterChain(name, ctx, vm) - } -} - -// getChainConfig returns value of a entry by looking at ID key and alias key -// it first searches ID key, then falls back to it's corresponding primary alias -func (m *manager) getChainConfig(id ids.ID) (ChainConfig, error) { - if val, ok := m.ManagerConfig.ChainConfigs[id.String()]; ok { - return val, nil - } - aliases, err := m.Aliases(id) - if err != nil { - return ChainConfig{}, err - } - for _, alias := range aliases { - if val, ok := m.ManagerConfig.ChainConfigs[alias]; ok { - return val, nil - } + // Register health check for this chain + if err := m.health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { + return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) } - return ChainConfig{}, nil + return &chain[*linearizeOnInitializeVM]{ + Name: chainAlias, + Context: ctx, + VM: untracedVMWrappedInsideProposerVM, + WrappedVM: wrappedVM, + Handler: h, + }, nil } diff --git a/vms/platformvm/config/config.go b/vms/platformvm/config/config.go index 50628c422af..d2c44cfdc02 100644 --- a/vms/platformvm/config/config.go +++ b/vms/platformvm/config/config.go @@ -6,32 +6,13 @@ package config import ( "time" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/uptime" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) // Struct collecting all foundational parameters of PlatformVM type Config struct { - // The node's chain manager - Chains chains.Manager - - // Node's validator set maps subnetID -> validators of the subnet - // - // Invariant: The primary network's validator set should have been added to - // the manager before calling VM.Initialize. - // Invariant: The primary network's validator set should be empty before - // calling VM.Initialize. - Validators validators.Manager - - // Provides access to the uptime manager as a thread safe data structure - UptimeLockedCalculator uptime.LockedCalculator - // True if the node is being run with staking enabled SybilProtectionEnabled bool @@ -93,8 +74,9 @@ type Config struct { RewardConfig reward.Config // Time of the AP3 network upgrade - ApricotPhase3Time time.Time - + ApricotPhase3Time time.Time + ApricotPhase4Time time.Time + ApricotPhase4MinPChainHeight uint64 // Time of the AP5 network upgrade ApricotPhase5Time time.Time @@ -114,7 +96,23 @@ type Config struct { // This config is particularly useful for triggering proposervm activation // on recently created subnets (without this, users need to wait for // [recentlyAcceptedWindowTTL] to pass for activation to occur). - UseCurrentHeight bool + UseCurrentHeight bool + TracingEnabled bool + NetworkID uint32 // ID of the network this node is connected to + AVAXAssetID ids.ID + MeterVMEnabled bool // Should each VM be wrapped with a MeterVM + FrontierPollFrequency time.Duration + ConsensusAppConcurrency int + // Max Time to spend fetching a container and its + // ancestors when responding to a GetAncestors + BootstrapMaxTimeGetAncestors time.Duration + // Max number of containers in an ancestors message sent by this node. + BootstrapAncestorsMaxContainersSent int + // This node will only consider the first [AncestorsMaxContainersReceived] + // containers in an ancestors message it receives. + BootstrapAncestorsMaxContainersReceived int + StateSyncBeacons []ids.NodeID + ChainDataDir string } func (c *Config) IsApricotPhase3Activated(timestamp time.Time) bool { @@ -150,23 +148,3 @@ func (c *Config) GetCreateSubnetTxFee(timestamp time.Time) uint64 { } return c.CreateAssetTxFee } - -// Create the blockchain described in [tx], but only if this node is a member of -// the subnet that validates the chain -func (c *Config) CreateChain(chainID ids.ID, tx *txs.CreateChainTx) { - if c.SybilProtectionEnabled && // Sybil protection is enabled, so nodes might not validate all chains - constants.PrimaryNetworkID != tx.SubnetID && // All nodes must validate the primary network - !c.TrackedSubnets.Contains(tx.SubnetID) { // This node doesn't validate this blockchain - return - } - - chainParams := chains.ChainParameters{ - ID: chainID, - SubnetID: tx.SubnetID, - GenesisData: tx.GenesisData, - VMID: tx.VMID, - FxIDs: tx.FxIDs, - } - - c.Chains.QueueChainCreation(chainParams) -} diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 834c9c8f245..14839d0e2b0 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -4,19 +4,217 @@ package platformvm import ( + "crypto" + "crypto/tls" + "sync" + + coreth "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/api/keystore" + "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/network" + noderpcchainvm "github.com/ava-labs/avalanchego/node/rpcchainvm" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/networking/handler" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/snow/networking/timeout" + "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/buffer" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/platformvm/config" ) -var _ vms.Factory = (*Factory)(nil) +var _ vms.Factory[*VM] = (*Factory)(nil) + +func NewFactory( + config config.Config, + aliaser ids.Aliaser, + uptimeLockedCalculator uptime.LockedCalculator, + tlsCertificate tls.Certificate, + stakingBLSKey *bls.SecretKey, + tracer trace.Tracer, + logFactory logging.Factory, + vmManager noderpcchainvm.Manager, + blockAcceptorGroup snow.AcceptorGroup, + txAcceptorGroup snow.AcceptorGroup, + vertexAcceptorGroup snow.AcceptorGroup, + db database.Database, + msgCreator message.OutboundMsgBuilder, + router router.Router, + net network.Network, + bootstrappers validators.Manager, + validators validators.Manager, + nodeID ids.NodeID, + keystore keystore.Keystore, + atomicMemory *atomic.Memory, + xChainID ids.ID, + cChainID ids.ID, + criticalChains set.Set[ids.ID], + timeoutManager timeout.Manager, + health health.Registerer, + subnetConfigs map[ids.ID]subnets.Config, + chainConfigs map[string]ChainConfig, + shutdownNodeFunc func(exitCode int), + metrics metrics.MultiGatherer, + resourceTracker tracker.ResourceTracker, +) *Factory { + return &Factory{ + config: config, + aliaser: aliaser, + uptimeLockedCalculator: uptimeLockedCalculator, + tlsCertificate: tlsCertificate, + stakingBLSKey: stakingBLSKey, + tracer: tracer, + logFactory: logFactory, + avmFactory: &avm.Factory{}, + corethFactory: &coreth.Factory{}, + vmManager: vmManager, + blockAcceptorGroup: blockAcceptorGroup, + txAcceptorGroup: txAcceptorGroup, + vertexAcceptorGroup: vertexAcceptorGroup, + db: db, + msgCreator: msgCreator, + router: router, + net: net, + bootstrappers: bootstrappers, + validators: validators, + nodeID: nodeID, + keystore: keystore, + atomicMemory: atomicMemory, + xChainID: xChainID, + cChainID: cChainID, + criticalChains: criticalChains, + timeoutManager: timeoutManager, + health: health, + subnetConfigs: subnetConfigs, + chainConfigs: chainConfigs, + shutdownNodeFunc: shutdownNodeFunc, + metrics: metrics, + resourceTracker: resourceTracker, + } +} -// Factory can create new instances of the Platform Chain type Factory struct { - config.Config + config config.Config + genesis []byte + aliaser ids.Aliaser + uptimeLockedCalculator uptime.LockedCalculator + tlsCertificate tls.Certificate + stakingBLSKey *bls.SecretKey + tracer trace.Tracer + logFactory logging.Factory + avmFactory vms.Factory[*avm.VM] + corethFactory vms.Factory[*coreth.VM] + vmManager noderpcchainvm.Manager + blockAcceptorGroup snow.AcceptorGroup + txAcceptorGroup snow.AcceptorGroup + vertexAcceptorGroup snow.AcceptorGroup + db database.Database + msgCreator message.OutboundMsgBuilder + router router.Router + net network.Network + bootstrappers validators.Manager + validators validators.Manager + nodeID ids.NodeID + keystore keystore.Keystore + atomicMemory *atomic.Memory + xChainID ids.ID + cChainID ids.ID + criticalChains set.Set[ids.ID] + timeoutManager timeout.Manager + health health.Registerer + subnetConfigs map[ids.ID]subnets.Config + chainConfigs map[string]ChainConfig + shutdownNodeFunc func(exitCode int) + metrics metrics.MultiGatherer + resourceTracker tracker.ResourceTracker +} + +// New returns a new instance of the PlatformVM +func (f *Factory) New(log logging.Logger) (*VM, error) { + vm := &VM{ + Config: f.config, + aliaser: f.aliaser, + Validators: f.validators, + UptimeLockedCalculator: f.uptimeLockedCalculator, + } + + vm.chainManager = &chainManager{ + Config: f.config, + platformVMFactory: &factory{ + vm: vm, + }, + avmFactory: f.avmFactory, + corethFactory: f.corethFactory, + aliaser: f.aliaser, + stakingBLSKey: f.stakingBLSKey, + tracer: f.tracer, + log: log, + logFactory: f.logFactory, + vmManager: f.vmManager, + blockAcceptorGroup: f.blockAcceptorGroup, + txAcceptorGroup: f.txAcceptorGroup, + vertexAcceptorGroup: f.vertexAcceptorGroup, + db: f.db, + msgCreator: f.msgCreator, + router: f.router, + net: f.net, + bootstrappers: f.bootstrappers, + validators: f.validators, + nodeID: f.nodeID, + keystore: f.keystore, + atomicMemory: f.atomicMemory, + xChainID: f.xChainID, + cChainID: f.cChainID, + criticalChains: f.criticalChains, + timeoutManager: f.timeoutManager, + health: f.health, + subnetConfigs: f.subnetConfigs, + chainConfigs: f.chainConfigs, + shutdownNodeFunc: f.shutdownNodeFunc, + metrics: f.metrics, + resourceTracker: f.resourceTracker, + stakingSigner: f.tlsCertificate.PrivateKey.(crypto.Signer), + stakingCert: staking.CertificateFromX509(f.tlsCertificate.Leaf), + registrants: nil, + chainsQueue: buffer.NewUnboundedBlockingDeque[chainParameters](initialQueueSize), + unblockChainCreatorCh: make(chan struct{}), + chainCreatorShutdownCh: make(chan struct{}), + chainCreatorExited: sync.WaitGroup{}, + subnetsLock: sync.RWMutex{}, + subnets: make(map[ids.ID]subnets.Subnet), + chainsLock: sync.Mutex{}, + chains: make(map[ids.ID]handler.Handler), + validatorState: nil, + } + + return vm.chainManager.StartChainCreator(chainParameters{ + ID: constants.PlatformChainID, + SubnetID: constants.PrimaryNetworkID, + GenesisData: f.genesis, + VMID: constants.PlatformVMID, + }) +} + +type factory struct { + vm *VM } -// New returns a new instance of the Platform Chain -func (f *Factory) New(logging.Logger) (interface{}, error) { - return &VM{Config: f.Config}, nil +func (f *factory) New(logging.Logger) (*VM, error) { + return f.vm, nil } diff --git a/chains/linearizable_vm.go b/vms/platformvm/linearizable_vm.go similarity index 99% rename from chains/linearizable_vm.go rename to vms/platformvm/linearizable_vm.go index 97fe9eb4d1f..7e846dce0e1 100644 --- a/chains/linearizable_vm.go +++ b/vms/platformvm/linearizable_vm.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package chains +package platformvm import ( "context" diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 16e5b16844c..428e96f6c4d 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -5,14 +5,13 @@ package platformvm import ( "context" + stdjson "encoding/json" "errors" "fmt" "math" "net/http" "time" - stdjson "encoding/json" - "go.uber.org/zap" "golang.org/x/exp/maps" @@ -85,6 +84,7 @@ var ( // Service defines the API calls that can be made to the platform chain type Service struct { vm *VM + aliaser ids.Aliaser addrManager avax.AddressManager stakerAttributesCache *cache.LRU[ids.ID, *stakerAttributes] } @@ -1868,14 +1868,14 @@ func (s *Service) buildCreateBlockchainTx(args *CreateBlockchainArgs) (*txs.Tx, return nil, ids.ShortEmpty, fmt.Errorf("problem parsing genesis data: %w", err) } - vmID, err := s.vm.Chains.LookupVM(args.VMID) + vmID, err := s.aliaser.Lookup(args.VMID) if err != nil { return nil, ids.ShortEmpty, fmt.Errorf("no VM with ID '%s' found", args.VMID) } fxIDs := []ids.ID(nil) for _, fxIDStr := range args.FxIDs { - fxID, err := s.vm.Chains.LookupVM(fxIDStr) + fxID, err := s.aliaser.Lookup(fxIDStr) if err != nil { return nil, ids.ShortEmpty, fmt.Errorf("no FX with ID '%s' found", fxIDStr) } @@ -1969,7 +1969,7 @@ func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatus defer s.vm.ctx.Lock.Unlock() // if its aliased then vm created this chain. - if aliasedID, err := s.vm.Chains.Lookup(args.BlockchainID); err == nil { + if aliasedID, err := s.aliaser.Lookup(args.BlockchainID); err == nil { if s.nodeValidates(aliasedID) { reply.Status = status.Validating return nil diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 92e646ed697..50774bbdb0d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -453,6 +453,7 @@ func New( db database.Database, genesisBytes []byte, metricsReg prometheus.Registerer, + validators validators.Manager, cfg *config.Config, execCfg *config.ExecutionConfig, ctx *snow.Context, @@ -462,6 +463,7 @@ func New( s, err := newState( db, metrics, + validators, cfg, execCfg, ctx, @@ -506,6 +508,7 @@ func New( func newState( db database.Database, metrics metrics.Metrics, + validators validators.Manager, cfg *config.Config, execCfg *config.ExecutionConfig, ctx *snow.Context, @@ -629,7 +632,7 @@ func newState( return &state{ validatorState: newValidatorState(), - validators: cfg.Validators, + validators: validators, ctx: ctx, cfg: cfg, metrics: metrics, diff --git a/vms/platformvm/txs/executor/backend.go b/vms/platformvm/txs/executor/backend.go index 847aefc1649..8ba88d8ed0b 100644 --- a/vms/platformvm/txs/executor/backend.go +++ b/vms/platformvm/txs/executor/backend.go @@ -16,6 +16,7 @@ import ( type Backend struct { Config *config.Config + ChainCreator ChainCreator Ctx *snow.Context Clk *mockable.Clock Fx fx.Fx diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index aa3ea9a2aaf..964ebb3f0ae 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -29,6 +29,11 @@ var ( errMissingStartTimePreDurango = errors.New("staker transactions must have a StartTime pre-Durango") ) +type ChainCreator interface { + //TODO fxs? + QueueChainCreation(chainID ids.ID, subnetID ids.ID, genesis []byte, vmID ids.ID) +} + type StandardTxExecutor struct { // inputs, to be filled before visitor methods are called *Backend @@ -94,7 +99,12 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { // If this proposal is committed and this node is a member of the subnet // that validates the blockchain, create the blockchain e.OnAccept = func() { - e.Config.CreateChain(txID, tx) + e.ChainCreator.QueueChainCreation( + txID, + tx.SubnetID, + tx.GenesisData, + tx.VMID, + ) } return nil } diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index 5ca5bfd6c24..fc3dc35003f 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -18,7 +18,6 @@ import ( "golang.org/x/exp/maps" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -27,7 +26,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/snowtest" - "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -712,10 +710,7 @@ func TestTimestampListGenerator(t *testing.T) { func buildVM(t *testing.T) (*VM, ids.ID, error) { forkTime := defaultGenesisTime vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), SybilProtectionEnabled: true, - Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, TransformSubnetTxFee: 100 * defaultTxFee, diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index 2c8b025a128..18d08ca7777 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -87,17 +87,19 @@ type State interface { func NewManager( log logging.Logger, cfg config.Config, + validatorManager validators.Manager, state State, metrics metrics.Metrics, clk *mockable.Clock, ) Manager { return &manager{ - log: log, - cfg: cfg, - state: state, - metrics: metrics, - clk: clk, - caches: make(map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput]), + log: log, + cfg: cfg, + validators: validatorManager, + state: state, + metrics: metrics, + clk: clk, + caches: make(map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput]), recentlyAccepted: window.New[ids.ID]( window.Config{ Clock: clk, @@ -112,11 +114,12 @@ func NewManager( // TODO: Remove requirement for the P-chain's context lock to be held when // calling exported functions. type manager struct { - log logging.Logger - cfg config.Config - state State - metrics metrics.Metrics - clk *mockable.Clock + log logging.Logger + cfg config.Config + validators validators.Manager + state State + metrics metrics.Metrics + clk *mockable.Clock // Maps caches for each subnet that is currently tracked. // Key: Subnet ID @@ -280,7 +283,7 @@ func (m *manager) makePrimaryNetworkValidatorSet( func (m *manager) getCurrentPrimaryValidatorSet( ctx context.Context, ) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) + primaryMap := m.validators.GetMap(constants.PrimaryNetworkID) currentHeight, err := m.getCurrentHeight(ctx) return primaryMap, currentHeight, err } @@ -342,8 +345,8 @@ func (m *manager) getCurrentValidatorSets( ctx context.Context, subnetID ids.ID, ) (map[ids.NodeID]*validators.GetValidatorOutput, map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - subnetMap := m.cfg.Validators.GetMap(subnetID) - primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) + subnetMap := m.validators.GetMap(subnetID) + primaryMap := m.validators.GetMap(constants.PrimaryNetworkID) currentHeight, err := m.getCurrentHeight(ctx) return subnetMap, primaryMap, currentHeight, err } diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 8c4b527e453..e16590ca732 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -60,14 +60,26 @@ var ( _ secp256k1fx.VM = (*VM)(nil) _ validators.State = (*VM)(nil) _ validators.SubnetConnector = (*VM)(nil) + _ ChainCreator = (*VM)(nil) + _ txexecutor.ChainCreator = (*VM)(nil) ) +type ChainCreator interface { + QueueChainCreation(chainID ids.ID, subnetID ids.ID, genesis []byte, vmID ids.ID) +} + type VM struct { config.Config blockbuilder.Builder network.Network validators.State + // Initialized from Factory + aliaser ids.Aliaser + Validators validators.Manager + UptimeLockedCalculator uptime.LockedCalculator + chainManager *chainManager + metrics metrics.Metrics atomicUtxosManager avax.AtomicUTXOManager @@ -149,6 +161,7 @@ func (vm *VM) Initialize( vm.db, genesisBytes, registerer, + vm.Validators, &vm.Config, execConfig, vm.ctx, @@ -159,7 +172,14 @@ func (vm *VM) Initialize( return err } - validatorManager := pvalidators.NewManager(chainCtx.Log, vm.Config, vm.state, vm.metrics, &vm.clock) + validatorManager := pvalidators.NewManager( + chainCtx.Log, + vm.Config, + vm.Validators, + vm.state, + vm.metrics, + &vm.clock, + ) vm.State = validatorManager vm.atomicUtxosManager = avax.NewAtomicUTXOManager(chainCtx.SharedMemory, txs.Codec) utxoHandler := utxo.NewHandler(vm.ctx, &vm.clock, vm.fx) @@ -178,6 +198,7 @@ func (vm *VM) Initialize( txExecutorBackend := &txexecutor.Backend{ Config: &vm.Config, + ChainCreator: vm.chainManager, Ctx: vm.ctx, Clk: &vm.clock, Fx: vm.fx, @@ -208,7 +229,7 @@ func (vm *VM) Initialize( chainCtx.ValidatorState, txVerifier, mempool, - txExecutorBackend.Config.PartialSyncPrimaryNetwork, + vm.Config.PartialSyncPrimaryNetwork, appSender, registerer, execConfig.Network, @@ -362,7 +383,7 @@ func (vm *VM) createSubnet(subnetID ids.ID) error { if !ok { return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned) } - vm.Config.CreateChain(chain.ID(), tx) + vm.QueueChainCreation(chain.ID(), tx.SubnetID, tx.GenesisData, tx.VMID) } return nil } @@ -424,6 +445,8 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { // Shutdown this blockchain func (vm *VM) Shutdown(context.Context) error { + vm.chainManager.Shutdown() + if vm.db == nil { return nil } @@ -499,6 +522,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { server.RegisterAfterFunc(vm.metrics.AfterRequest) service := &Service{ vm: vm, + aliaser: vm.aliaser, addrManager: avax.NewAddressManager(vm.ctx), stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ Size: stakerAttributesCacheSize, @@ -561,3 +585,16 @@ func (vm *VM) issueTx(ctx context.Context, tx *txs.Tx) error { return nil } + +func (vm *VM) QueueChainCreation( + chainID ids.ID, + subnetID ids.ID, + genesis []byte, + vmID ids.ID, +) { + vm.chainManager.QueueChainCreation(chainID, subnetID, genesis, vmID) +} + +func (vm *VM) IsBootstrapped(id ids.ID) bool { + return vm.chainManager.IsBootstrapped(id) +} diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 36186ec32ae..8d93a17a4ea 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" @@ -24,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/snowtest" - "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -364,15 +362,12 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { atomicDB := prefixdb.New([]byte{1}, baseDB) vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: mockable.MaxTime, - DurangoTime: mockable.MaxTime, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, }} ctx := snowtest.Context(t, snowtest.PChainID) diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 23e88a64636..66ed31307b7 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" @@ -35,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/snowtest" - "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" @@ -233,10 +231,7 @@ func defaultVM(t *testing.T, fork activeFork) (*VM, database.Database, *mutableS } vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), SybilProtectionEnabled: true, - Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, TransformSubnetTxFee: 100 * defaultTxFee, @@ -1126,15 +1121,12 @@ func TestRestartFullyAccepted(t *testing.T) { firstDB := prefixdb.New([]byte{}, db) firstVM := &VM{Config: config.Config{ - Chains: chains.TestManager, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} firstCtx := snowtest.Context(t, snowtest.PChainID) @@ -1213,15 +1205,12 @@ func TestRestartFullyAccepted(t *testing.T) { firstCtx.Lock.Unlock() secondVM := &VM{Config: config.Config{ - Chains: chains.TestManager, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} secondCtx := snowtest.Context(t, snowtest.PChainID) @@ -1263,15 +1252,12 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.NoError(err) vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} initialClkTime := latestForkTime.Add(time.Second) @@ -1604,15 +1590,12 @@ func TestUnverifiedParent(t *testing.T) { require := require.New(t) vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} initialClkTime := latestForkTime.Add(time.Second) @@ -1768,14 +1751,11 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstDB := prefixdb.New([]byte{}, db) const firstUptimePercentage = 20 // 20% firstVM := &VM{Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: firstUptimePercentage / 100., - RewardConfig: defaultRewardConfig, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UptimePercentage: firstUptimePercentage / 100., + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} firstCtx := snowtest.Context(t, snowtest.PChainID) @@ -1817,13 +1797,10 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondDB := prefixdb.New([]byte{}, db) const secondUptimePercentage = 21 // 21% > firstUptimePercentage, so uptime for reward is not met now secondVM := &VM{Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: secondUptimePercentage / 100., - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UptimePercentage: secondUptimePercentage / 100., + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} secondCtx := snowtest.Context(t, snowtest.PChainID) @@ -1915,14 +1892,11 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { db := memdb.New() vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} ctx := snowtest.Context(t, snowtest.PChainID) diff --git a/vms/registry/mock_vm_getter.go b/vms/registry/mock_vm_getter.go index 30c38f1b6a7..620386666f4 100644 --- a/vms/registry/mock_vm_getter.go +++ b/vms/registry/mock_vm_getter.go @@ -10,11 +10,13 @@ package registry import ( - reflect "reflect" + "reflect" - ids "github.com/ava-labs/avalanchego/ids" - vms "github.com/ava-labs/avalanchego/vms" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/rpcchainvm" ) // MockVMGetter is a mock of VMGetter interface. @@ -41,11 +43,11 @@ func (m *MockVMGetter) EXPECT() *MockVMGetterMockRecorder { } // Get mocks base method. -func (m *MockVMGetter) Get() (map[ids.ID]vms.Factory, map[ids.ID]vms.Factory, error) { +func (m *MockVMGetter) Get() (map[ids.ID]vms.Factory[*rpcchainvm.VMClient], map[ids.ID]vms.Factory[*rpcchainvm.VMClient], error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get") - ret0, _ := ret[0].(map[ids.ID]vms.Factory) - ret1, _ := ret[1].(map[ids.ID]vms.Factory) + ret0, _ := ret[0].(map[ids.ID]vms.Factory[*rpcchainvm.VMClient]) + ret1, _ := ret[1].(map[ids.ID]vms.Factory[*rpcchainvm.VMClient]) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } diff --git a/vms/registry/vm_getter.go b/vms/registry/vm_getter.go index 826624744e3..817f915e72f 100644 --- a/vms/registry/vm_getter.go +++ b/vms/registry/vm_getter.go @@ -9,6 +9,7 @@ import ( "path/filepath" "github.com/ava-labs/avalanchego/ids" + rpcchainvm2 "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/utils/filesystem" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/vms" @@ -27,8 +28,8 @@ type VMGetter interface { // Get fetches the VMs that are registered and the VMs that are not // registered but available to be installed on the node. Get() ( - registeredVMs map[ids.ID]vms.Factory, - unregisteredVMs map[ids.ID]vms.Factory, + registeredVMs map[ids.ID]vms.Factory[*rpcchainvm.VMClient], + unregisteredVMs map[ids.ID]vms.Factory[*rpcchainvm.VMClient], err error, ) } @@ -36,7 +37,7 @@ type VMGetter interface { // VMGetterConfig defines settings for VMGetter type VMGetterConfig struct { FileReader filesystem.Reader - Manager vms.Manager + Manager rpcchainvm2.Manager PluginDirectory string CPUTracker resource.ProcessTracker RuntimeTracker runtime.Tracker @@ -53,14 +54,14 @@ func NewVMGetter(config VMGetterConfig) VMGetter { } } -func (getter *vmGetter) Get() (map[ids.ID]vms.Factory, map[ids.ID]vms.Factory, error) { +func (getter *vmGetter) Get() (map[ids.ID]vms.Factory[*rpcchainvm.VMClient], map[ids.ID]vms.Factory[*rpcchainvm.VMClient], error) { files, err := getter.config.FileReader.ReadDir(getter.config.PluginDirectory) if err != nil { return nil, nil, err } - registeredVMs := make(map[ids.ID]vms.Factory) - unregisteredVMs := make(map[ids.ID]vms.Factory) + registeredVMs := make(map[ids.ID]vms.Factory[*rpcchainvm.VMClient]) + unregisteredVMs := make(map[ids.ID]vms.Factory[*rpcchainvm.VMClient]) for _, file := range files { if file.IsDir() { continue @@ -95,7 +96,7 @@ func (getter *vmGetter) Get() (map[ids.ID]vms.Factory, map[ids.ID]vms.Factory, e } // If the error isn't "not found", then we should report the error. - if !errors.Is(err, vms.ErrNotFound) { + if !errors.Is(err, rpcchainvm2.ErrNotFound) { return nil, nil, err } diff --git a/vms/registry/vm_getter_test.go b/vms/registry/vm_getter_test.go index 30bab4232be..6ec9dd20daa 100644 --- a/vms/registry/vm_getter_test.go +++ b/vms/registry/vm_getter_test.go @@ -16,10 +16,10 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/node/rpcchainvm" "github.com/ava-labs/avalanchego/utils/filesystem" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/resource" - "github.com/ava-labs/avalanchego/vms" ) var ( @@ -110,13 +110,13 @@ func TestGet_Success(t *testing.T) { registeredVMId := ids.GenerateTestID() unregisteredVMId := ids.GenerateTestID() - registeredVMFactory := vms.NewMockFactory(resources.ctrl) + registeredVMFactory := rpcchainvm.NewMockFactory(resources.ctrl) resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(twoValidVMs, nil) resources.mockManager.EXPECT().Lookup(registeredVMName).Times(1).Return(registeredVMId, nil) resources.mockManager.EXPECT().GetFactory(registeredVMId).Times(1).Return(registeredVMFactory, nil) resources.mockManager.EXPECT().Lookup(unregisteredVMName).Times(1).Return(unregisteredVMId, nil) - resources.mockManager.EXPECT().GetFactory(unregisteredVMId).Times(1).Return(nil, vms.ErrNotFound) + resources.mockManager.EXPECT().GetFactory(unregisteredVMId).Times(1).Return(nil, rpcchainvm.ErrNotFound) registeredVMs, unregisteredVMs, err := resources.getter.Get() @@ -133,7 +133,7 @@ func TestGet_Success(t *testing.T) { type vmGetterTestResources struct { ctrl *gomock.Controller mockReader *filesystem.MockReader - mockManager *vms.MockManager + mockManager *rpcchainvm.MockManager getter VMGetter } @@ -141,7 +141,7 @@ func initVMGetterTest(t *testing.T) *vmGetterTestResources { ctrl := gomock.NewController(t) mockReader := filesystem.NewMockReader(ctrl) - mockManager := vms.NewMockManager(ctrl) + mockManager := rpcchainvm.NewMockManager(ctrl) mockRegistry := prometheus.NewRegistry() mockCPUTracker, err := resource.NewManager( logging.NoLog{}, diff --git a/vms/registry/vm_registry.go b/vms/registry/vm_registry.go index 1374c4d46b8..1334758471e 100644 --- a/vms/registry/vm_registry.go +++ b/vms/registry/vm_registry.go @@ -7,7 +7,7 @@ import ( "context" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/node/rpcchainvm" ) var _ VMRegistry = (*vmRegistry)(nil) @@ -22,7 +22,7 @@ type VMRegistry interface { // VMRegistryConfig defines configurations for VMRegistry type VMRegistryConfig struct { VMGetter VMGetter - VMManager vms.Manager + VMManager rpcchainvm.Manager } type vmRegistry struct { diff --git a/vms/registry/vm_registry_test.go b/vms/registry/vm_registry_test.go index 12e39a7c29c..739a2e199c4 100644 --- a/vms/registry/vm_registry_test.go +++ b/vms/registry/vm_registry_test.go @@ -12,7 +12,7 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/node/rpcchainvm" ) var ( @@ -28,17 +28,17 @@ func TestReload_Success(t *testing.T) { resources := initVMRegistryTest(t) - factory1 := vms.NewMockFactory(resources.ctrl) - factory2 := vms.NewMockFactory(resources.ctrl) - factory3 := vms.NewMockFactory(resources.ctrl) - factory4 := vms.NewMockFactory(resources.ctrl) + factory1 := rpcchainvm.NewMockFactory(resources.ctrl) + factory2 := rpcchainvm.NewMockFactory(resources.ctrl) + factory3 := rpcchainvm.NewMockFactory(resources.ctrl) + factory4 := rpcchainvm.NewMockFactory(resources.ctrl) - registeredVms := map[ids.ID]vms.Factory{ + registeredVms := map[ids.ID]rpcchainvm.Factory{ id1: factory1, id2: factory2, } - unregisteredVms := map[ids.ID]vms.Factory{ + unregisteredVms := map[ids.ID]rpcchainvm.Factory{ id3: factory3, id4: factory4, } @@ -82,17 +82,17 @@ func TestReload_PartialRegisterFailure(t *testing.T) { resources := initVMRegistryTest(t) - factory1 := vms.NewMockFactory(resources.ctrl) - factory2 := vms.NewMockFactory(resources.ctrl) - factory3 := vms.NewMockFactory(resources.ctrl) - factory4 := vms.NewMockFactory(resources.ctrl) + factory1 := rpcchainvm.NewMockFactory(resources.ctrl) + factory2 := rpcchainvm.NewMockFactory(resources.ctrl) + factory3 := rpcchainvm.NewMockFactory(resources.ctrl) + factory4 := rpcchainvm.NewMockFactory(resources.ctrl) - registeredVms := map[ids.ID]vms.Factory{ + registeredVms := map[ids.ID]rpcchainvm.Factory{ id1: factory1, id2: factory2, } - unregisteredVms := map[ids.ID]vms.Factory{ + unregisteredVms := map[ids.ID]rpcchainvm.Factory{ id3: factory3, id4: factory4, } @@ -121,7 +121,7 @@ func TestReload_PartialRegisterFailure(t *testing.T) { type registryTestResources struct { ctrl *gomock.Controller mockVMGetter *MockVMGetter - mockVMManager *vms.MockManager + mockVMManager *rpcchainvm.MockManager vmRegistry VMRegistry } @@ -129,7 +129,7 @@ func initVMRegistryTest(t *testing.T) *registryTestResources { ctrl := gomock.NewController(t) mockVMGetter := NewMockVMGetter(ctrl) - mockVMManager := vms.NewMockManager(ctrl) + mockVMManager := rpcchainvm.NewMockManager(ctrl) vmRegistry := NewVMRegistry( VMRegistryConfig{ diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go index d61c41d11af..2a41db6001b 100644 --- a/vms/rpcchainvm/factory.go +++ b/vms/rpcchainvm/factory.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" ) -var _ vms.Factory = (*factory)(nil) +var _ vms.Factory[*VMClient] = (*factory)(nil) type factory struct { path string @@ -23,7 +23,7 @@ type factory struct { runtimeTracker runtime.Tracker } -func NewFactory(path string, processTracker resource.ProcessTracker, runtimeTracker runtime.Tracker) vms.Factory { +func NewFactory(path string, processTracker resource.ProcessTracker, runtimeTracker runtime.Tracker) vms.Factory[*VMClient] { return &factory{ path: path, processTracker: processTracker, @@ -31,7 +31,7 @@ func NewFactory(path string, processTracker resource.ProcessTracker, runtimeTrac } } -func (f *factory) New(log logging.Logger) (interface{}, error) { +func (f *factory) New(log logging.Logger) (*VMClient, error) { config := &subprocess.Config{ Stderr: log, Stdout: log,