From 08931a6b673abd883d594afdb631986f7c76acc4 Mon Sep 17 00:00:00 2001 From: kangeunchan Date: Fri, 27 Feb 2026 13:45:52 +0900 Subject: [PATCH 1/2] test(upgrade): add state and resume unit coverage Signed-off-by: kangeunchan --- internal/application/upgrade/resume_test.go | 384 ++++++++++++++ .../upgrade/state_detector_test.go | 349 +++++++++++++ .../upgrade/state_transitioner_test.go | 183 +++++++ internal/application/upgrade/switch_test.go | 122 +++++ .../application/upgrade/test_helpers_test.go | 478 ++++++++++++++++++ 5 files changed, 1516 insertions(+) create mode 100644 internal/application/upgrade/resume_test.go create mode 100644 internal/application/upgrade/state_detector_test.go create mode 100644 internal/application/upgrade/state_transitioner_test.go create mode 100644 internal/application/upgrade/switch_test.go create mode 100644 internal/application/upgrade/test_helpers_test.go diff --git a/internal/application/upgrade/resume_test.go b/internal/application/upgrade/resume_test.go new file mode 100644 index 00000000..e33858c4 --- /dev/null +++ b/internal/application/upgrade/resume_test.go @@ -0,0 +1,384 @@ +package upgrade + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/altuslabsxyz/devnet-builder/internal/application/dto" + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/altuslabsxyz/devnet-builder/internal/output" + "github.com/altuslabsxyz/devnet-builder/types" +) + +func newSilentOutputLogger() *output.Logger { + l := output.NewLogger() + l.SetJSONMode(true) + return l +} + +func TestResumeUseCase_BasicHelpers(t *testing.T) { + t.Parallel() + + state := ports.NewUpgradeState("v2", "local", false) + mgr := &mockStateManager{ + loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }, + } + uc := NewResumeUseCase(mgr, &mockStateDetector{}, NewStateTransitioner(), nil, newSilentOutputLogger()) + + got, err := uc.CheckState(context.Background()) + if err != nil { + t.Fatalf("CheckState error: %v", err) + } + if got != state { + t.Fatalf("CheckState state mismatch") + } + + got, err = uc.GetStatus(context.Background()) + if err != nil { + t.Fatalf("GetStatus error: %v", err) + } + if got != state { + t.Fatalf("GetStatus state mismatch") + } + + if err := uc.ClearState(context.Background()); err != nil { + t.Fatalf("ClearState error: %v", err) + } +} + +func TestResumeUseCase_GetStatusLoadError(t *testing.T) { + t.Parallel() + + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return nil, errors.New("load failed") }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + _, err := uc.GetStatus(context.Background()) + if err == nil || !strings.Contains(err.Error(), "failed to load state") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestResumeUseCase_ResumeOptions(t *testing.T) { + t.Parallel() + + t.Run("clear state", func(t *testing.T) { + t.Parallel() + deleted := false + uc := NewResumeUseCase( + &mockStateManager{deleteStateFunc: func(ctx context.Context) error { deleted = true; return nil }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + res, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{ClearState: true}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !deleted || res.Resumed { + t.Fatalf("clear-state branch not executed") + } + }) + + t.Run("show status", func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + res, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{ShowStatus: true}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if res.Resumed || res.State != state { + t.Fatalf("unexpected result: %+v", res) + } + }) + + t.Run("force restart", func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + deleted := false + uc := NewResumeUseCase( + &mockStateManager{ + loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }, + deleteStateFunc: func(ctx context.Context) error { deleted = true; return nil }, + }, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + res, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{ForceRestart: true}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !deleted || res.Resumed { + t.Fatalf("force-restart branch failed") + } + }) +} + +func TestResumeUseCase_LoadAndValidateFailures(t *testing.T) { + t.Parallel() + + t.Run("corruption error", func(t *testing.T) { + t.Parallel() + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { + return nil, &ports.StateCorruptionError{Reason: "checksum mismatch"} + }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + _, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{}) + if err == nil || !strings.Contains(err.Error(), "state file is corrupted") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("load error", func(t *testing.T) { + t.Parallel() + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return nil, errors.New("boom") }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + _, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{}) + if err == nil || !strings.Contains(err.Error(), "failed to load state") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("no state", func(t *testing.T) { + t.Parallel() + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return nil, nil }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + res, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if res.Resumed { + t.Fatalf("expected no resume") + } + }) + + t.Run("invalid state", func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + uc := NewResumeUseCase( + &mockStateManager{ + loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }, + validateStateFunc: func(state *ports.UpgradeState) error { return errors.New("bad state") }, + }, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + _, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid state") { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestResumeUseCase_TerminalStates(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + stage ports.ResumableStage + error string + message string + }{ + {name: "proposal rejected", stage: ports.ResumableStageProposalRejected, message: "terminal state: ProposalRejected"}, + {name: "failed", stage: ports.ResumableStageFailed, error: "vote failed", message: "terminal state: Failed"}, + {name: "completed", stage: ports.ResumableStageCompleted, message: "terminal state: Completed"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = tt.stage + state.Error = tt.error + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + res, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if res.Resumed { + t.Fatalf("expected non-resumed result") + } + if !strings.Contains(res.Message, tt.message) { + t.Fatalf("message = %q, expected to contain %q", res.Message, tt.message) + } + }) + } +} + +func TestResumeUseCase_ResumeFromOverride(t *testing.T) { + t.Parallel() + + t.Run("invalid transition", func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + tr := &mockTransitioner{canTransitionFunc: func(from, to ports.ResumableStage) bool { return false }} + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }}, + &mockStateDetector{}, + tr, + nil, + newSilentOutputLogger(), + ) + + _, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{ResumeFrom: ports.ResumableStageVoting}) + if err == nil || !strings.Contains(err.Error(), "cannot resume from") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("override save failure", func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + tr := &mockTransitioner{canTransitionFunc: func(from, to ports.ResumableStage) bool { return true }} + uc := NewResumeUseCase( + &mockStateManager{ + loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }, + saveStateFunc: func(ctx context.Context, state *ports.UpgradeState) error { return errors.New("save failed") }, + }, + &mockStateDetector{}, + tr, + nil, + newSilentOutputLogger(), + ) + + _, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{}, ports.ResumeOptions{ResumeFrom: ports.ResumableStageProposalSubmitted}) + if err == nil || !strings.Contains(err.Error(), "failed to save state after override") { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestResumeUseCase_Reconcile(t *testing.T) { + t.Parallel() + + t.Run("nil state", func(t *testing.T) { + t.Parallel() + uc := NewResumeUseCase( + &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return nil, nil }}, + &mockStateDetector{}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + state, err := uc.Reconcile(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if state != nil { + t.Fatalf("expected nil state") + } + }) + + t.Run("detect stage and save", func(t *testing.T) { + t.Parallel() + state := ports.NewUpgradeState("v2", "local", false) + saved := false + uc := NewResumeUseCase( + &mockStateManager{ + loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }, + saveStateFunc: func(ctx context.Context, s *ports.UpgradeState) error { saved = true; return nil }, + }, + &mockStateDetector{detectCurrentStageFunc: func(ctx context.Context, s *ports.UpgradeState) (ports.ResumableStage, error) { + return ports.ResumableStageProposalSubmitted, nil + }}, + NewStateTransitioner(), + nil, + newSilentOutputLogger(), + ) + + result, err := uc.Reconcile(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if result.Stage != ports.ResumableStageProposalSubmitted || !saved { + t.Fatalf("unexpected reconcile result: stage=%s saved=%v", result.Stage, saved) + } + }) +} + +func TestResumeUseCase_ResumeExecutesWorkflow(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageVerifyingResume + + mgr := &mockStateManager{ + loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }, + } + + rpc := &mockRPCClient{} + heights := []int64{10, 11} + rpc.getBlockHeightFunc = func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + } + + execUC := &ExecuteUpgradeUseCase{ + rpcClient: rpc, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + } + + resExec := &ResumableExecuteUpgradeUseCase{ + executeUC: execUC, + stateManager: mgr, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + uc := NewResumeUseCase(mgr, &mockStateDetector{}, NewStateTransitioner(), resExec, newSilentOutputLogger()) + + res, err := uc.Resume(context.Background(), dto.ExecuteUpgradeInput{SkipGovernance: true, Mode: types.ExecutionModeLocal}, ports.ResumeOptions{}) + if err != nil { + t.Fatalf("expected successful resume, got error: %v", err) + } + if !res.Resumed || res.UpgradeOutput == nil || !res.UpgradeOutput.Success { + t.Fatalf("unexpected resume result: %+v", res) + } +} diff --git a/internal/application/upgrade/state_detector_test.go b/internal/application/upgrade/state_detector_test.go new file mode 100644 index 00000000..69fee5ae --- /dev/null +++ b/internal/application/upgrade/state_detector_test.go @@ -0,0 +1,349 @@ +package upgrade + +import ( + "context" + "errors" + "testing" + + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" +) + +func TestStateDetector_DetectProposalStatus(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + proposalID uint64 + proposal *ports.Proposal + err error + want string + wantErr bool + }{ + {name: "invalid id", proposalID: 0, want: "unknown", wantErr: true}, + {name: "voting", proposalID: 1, proposal: &ports.Proposal{Status: ports.ProposalStatusVoting}, want: "voting"}, + {name: "passed", proposalID: 1, proposal: &ports.Proposal{Status: ports.ProposalStatusPassed}, want: "passed"}, + {name: "rejected", proposalID: 1, proposal: &ports.Proposal{Status: ports.ProposalStatusRejected}, want: "rejected"}, + {name: "failed", proposalID: 1, proposal: &ports.Proposal{Status: ports.ProposalStatusFailed}, want: "failed"}, + {name: "pending", proposalID: 1, proposal: &ports.Proposal{Status: ports.ProposalStatusPending}, want: "pending"}, + {name: "unknown status", proposalID: 1, proposal: &ports.Proposal{Status: "UNKNOWN"}, want: "unknown"}, + {name: "rpc error", proposalID: 1, err: errors.New("rpc"), want: "unknown", wantErr: true}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{ + getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + if tt.err != nil { + return nil, tt.err + } + return tt.proposal, nil + }, + }) + + got, err := d.DetectProposalStatus(context.Background(), tt.proposalID) + if got != tt.want { + t.Fatalf("status = %q, want %q", got, tt.want) + } + if (err != nil) != tt.wantErr { + t.Fatalf("err = %v, wantErr=%v", err, tt.wantErr) + } + }) + } +} + +func TestStateDetector_DetectChainStatus(t *testing.T) { + t.Parallel() + + t.Run("unreachable when chain not running", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{isChainRunningFunc: func(ctx context.Context) bool { return false }}) + status, err := d.DetectChainStatus(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if status != "unreachable" { + t.Fatalf("status = %q, want unreachable", status) + } + }) + + t.Run("unreachable when first height query fails", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{ + isChainRunningFunc: func(ctx context.Context) bool { return true }, + getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 0, errors.New("boom") }, + }) + status, err := d.DetectChainStatus(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if status != "unreachable" { + t.Fatalf("status = %q, want unreachable", status) + } + }) + + t.Run("halted on cancelled context", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + d := NewStateDetector(&mockRPCClient{ + isChainRunningFunc: func(ctx context.Context) bool { return true }, + getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 10, nil }, + }) + status, err := d.DetectChainStatus(ctx) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if status != "halted" { + t.Fatalf("status = %q, want halted", status) + } + }) + + t.Run("running when height advances", func(t *testing.T) { + heights := []int64{10, 12} + d := NewStateDetector(&mockRPCClient{ + isChainRunningFunc: func(ctx context.Context) bool { return true }, + getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }, + }) + status, err := d.DetectChainStatus(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if status != "running" { + t.Fatalf("status = %q, want running", status) + } + }) +} + +func TestStateDetector_DetectValidatorVotes(t *testing.T) { + t.Parallel() + + d := NewStateDetector(&mockRPCClient{}) + if _, err := d.DetectValidatorVotes(context.Background(), 0); err == nil { + t.Fatalf("expected error for invalid proposal ID") + } + votes, err := d.DetectValidatorVotes(context.Background(), 10) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(votes) != 0 { + t.Fatalf("expected empty votes, got %d", len(votes)) + } +} + +func TestStateDetector_DetectCurrentStage(t *testing.T) { + t.Parallel() + + d := NewStateDetector(&mockRPCClient{}) + if _, err := d.DetectCurrentStage(context.Background(), nil); err == nil { + t.Fatalf("expected nil state error") + } +} + +func TestStateDetector_DetectSkipGovStage(t *testing.T) { + t.Parallel() + + t.Run("initialized when no switches", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{}) + state := &ports.UpgradeState{SkipGovernance: true, Stage: ports.ResumableStageInitialized} + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageInitialized { + t.Fatalf("stage = %s", stage) + } + }) + + t.Run("switching when partially switched", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{}) + state := &ports.UpgradeState{ + SkipGovernance: true, + Stage: ports.ResumableStageSwitchingBinary, + NodeSwitches: []ports.NodeSwitchState{ + {NodeName: "node0", Switched: true}, + {NodeName: "node1", Switched: false}, + }, + } + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageSwitchingBinary { + t.Fatalf("stage = %s", stage) + } + }) + + t.Run("verifying when all switched and chain running", func(t *testing.T) { + heights := []int64{10, 11} + d := NewStateDetector(&mockRPCClient{ + isChainRunningFunc: func(ctx context.Context) bool { return true }, + getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }, + }) + state := &ports.UpgradeState{ + SkipGovernance: true, + Stage: ports.ResumableStageSwitchingBinary, + NodeSwitches: []ports.NodeSwitchState{ + {NodeName: "node0", Switched: true}, + {NodeName: "node1", Switched: true}, + }, + } + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageVerifyingResume { + t.Fatalf("stage = %s", stage) + } + }) +} + +func TestStateDetector_DetectGovPathStages(t *testing.T) { + t.Parallel() + + t.Run("initialized when proposal id missing", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{}) + state := &ports.UpgradeState{Stage: ports.ResumableStageInitialized, ProposalID: 0} + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageInitialized { + t.Fatalf("stage = %s", stage) + } + }) + + t.Run("status mapping", func(t *testing.T) { + tests := []struct { + name string + status ports.ProposalStatus + want ports.ResumableStage + }{ + {name: "pending", status: ports.ProposalStatusPending, want: ports.ResumableStageProposalSubmitted}, + {name: "voting", status: ports.ProposalStatusVoting, want: ports.ResumableStageVoting}, + {name: "rejected", status: ports.ProposalStatusRejected, want: ports.ResumableStageProposalRejected}, + {name: "failed", status: ports.ProposalStatusFailed, want: ports.ResumableStageFailed}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{ + getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: tt.status}, nil + }, + }) + state := &ports.UpgradeState{Stage: ports.ResumableStageProposalSubmitted, ProposalID: 10} + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != tt.want { + t.Fatalf("stage = %s, want %s", stage, tt.want) + } + }) + } + }) + + t.Run("passed stage with zero upgrade height", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{ + getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusPassed}, nil + }, + }) + state := &ports.UpgradeState{Stage: ports.ResumableStageVoting, ProposalID: 1, UpgradeHeight: 0} + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageWaitingForHeight { + t.Fatalf("stage = %s", stage) + } + }) + + t.Run("passed stage halted chain", func(t *testing.T) { + t.Parallel() + d := NewStateDetector(&mockRPCClient{ + getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusPassed}, nil + }, + isChainRunningFunc: func(ctx context.Context) bool { return false }, + }) + state := &ports.UpgradeState{Stage: ports.ResumableStageWaitingForHeight, ProposalID: 1, UpgradeHeight: 100} + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageChainHalted { + t.Fatalf("stage = %s", stage) + } + }) + + t.Run("passed stage running before target height", func(t *testing.T) { + heights := []int64{90, 91, 91} + d := NewStateDetector(&mockRPCClient{ + getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusPassed}, nil + }, + isChainRunningFunc: func(ctx context.Context) bool { return true }, + getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }, + }) + state := &ports.UpgradeState{Stage: ports.ResumableStageWaitingForHeight, ProposalID: 1, UpgradeHeight: 100} + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageWaitingForHeight { + t.Fatalf("stage = %s", stage) + } + }) + + t.Run("passed stage running with all switches", func(t *testing.T) { + heights := []int64{120, 121, 121} + d := NewStateDetector(&mockRPCClient{ + getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusPassed}, nil + }, + isChainRunningFunc: func(ctx context.Context) bool { return true }, + getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }, + }) + state := &ports.UpgradeState{ + Stage: ports.ResumableStageSwitchingBinary, + ProposalID: 1, + UpgradeHeight: 100, + NodeSwitches: []ports.NodeSwitchState{ + {NodeName: "node0", Switched: true}, + {NodeName: "node1", Switched: true}, + }, + } + stage, err := d.DetectCurrentStage(context.Background(), state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if stage != ports.ResumableStageVerifyingResume { + t.Fatalf("stage = %s", stage) + } + }) +} diff --git a/internal/application/upgrade/state_transitioner_test.go b/internal/application/upgrade/state_transitioner_test.go new file mode 100644 index 00000000..fb2f0d3f --- /dev/null +++ b/internal/application/upgrade/state_transitioner_test.go @@ -0,0 +1,183 @@ +package upgrade + +import ( + "testing" + "time" + + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" +) + +func TestStateTransitioner_CanTransition(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + + tests := []struct { + name string + from ports.ResumableStage + to ports.ResumableStage + want bool + }{ + {name: "initialized to proposal", from: ports.ResumableStageInitialized, to: ports.ResumableStageProposalSubmitted, want: true}, + {name: "initialized to failed", from: ports.ResumableStageInitialized, to: ports.ResumableStageFailed, want: true}, + {name: "completed to failed invalid", from: ports.ResumableStageCompleted, to: ports.ResumableStageFailed, want: false}, + {name: "unknown source invalid", from: "Unknown", to: ports.ResumableStageFailed, want: false}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if got := tr.CanTransition(tt.from, tt.to); got != tt.want { + t.Fatalf("CanTransition(%s, %s) = %v, want %v", tt.from, tt.to, got, tt.want) + } + }) + } +} + +func TestStateTransitioner_TransitionTo(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + state := ports.NewUpgradeState("v2", "local", false) + + if err := tr.TransitionTo(state, ports.ResumableStageProposalSubmitted, "proposal submitted"); err != nil { + t.Fatalf("TransitionTo returned error: %v", err) + } + + if state.Stage != ports.ResumableStageProposalSubmitted { + t.Fatalf("state.Stage = %s, want %s", state.Stage, ports.ResumableStageProposalSubmitted) + } + if len(state.StageHistory) != 2 { + t.Fatalf("history length = %d, want 2", len(state.StageHistory)) + } + last := state.StageHistory[len(state.StageHistory)-1] + if last.From != ports.ResumableStageInitialized || last.To != ports.ResumableStageProposalSubmitted { + t.Fatalf("unexpected transition history: %+v", last) + } + if last.Reason != "proposal submitted" { + t.Fatalf("reason = %q, want %q", last.Reason, "proposal submitted") + } + if state.UpdatedAt.Before(state.CreatedAt) { + t.Fatalf("updatedAt should be >= createdAt") + } +} + +func TestStateTransitioner_TransitionToFailedSetsError(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + state := ports.NewUpgradeState("v2", "local", false) + + if err := tr.TransitionTo(state, ports.ResumableStageFailed, "switch failed"); err != nil { + t.Fatalf("TransitionTo returned error: %v", err) + } + if state.Error != "switch failed" { + t.Fatalf("state.Error = %q, want %q", state.Error, "switch failed") + } +} + +func TestStateTransitioner_TransitionToInvalid(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + state := ports.NewUpgradeState("v2", "local", false) + + err := tr.TransitionTo(state, ports.ResumableStageCompleted, "skip") + if err == nil { + t.Fatalf("expected invalid transition error") + } +} + +func TestStateTransitioner_TransitionToNilState(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + err := tr.TransitionTo(nil, ports.ResumableStageCompleted, "nil") + if err == nil { + t.Fatalf("expected error for nil state") + } +} + +func TestStateTransitioner_GetValidTransitionsReturnsCopy(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + transitions := tr.GetValidTransitions(ports.ResumableStageInitialized) + if len(transitions) == 0 { + t.Fatalf("expected transitions") + } + transitions[0] = ports.ResumableStageCompleted + + transitions2 := tr.GetValidTransitions(ports.ResumableStageInitialized) + if transitions2[0] == ports.ResumableStageCompleted { + t.Fatalf("expected copy, got shared backing array") + } +} + +func TestStateTransitioner_IsGovernanceRequired(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + + if !tr.IsGovernanceRequired(&ports.UpgradeState{SkipGovernance: false}) { + t.Fatalf("expected governance required") + } + if tr.IsGovernanceRequired(&ports.UpgradeState{SkipGovernance: true}) { + t.Fatalf("expected governance not required") + } +} + +func TestStateTransitioner_GetNextStages(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + + govCases := []struct { + from ports.ResumableStage + to ports.ResumableStage + }{ + {from: ports.ResumableStageInitialized, to: ports.ResumableStageProposalSubmitted}, + {from: ports.ResumableStageProposalSubmitted, to: ports.ResumableStageVoting}, + {from: ports.ResumableStageVoting, to: ports.ResumableStageWaitingForHeight}, + {from: ports.ResumableStageWaitingForHeight, to: ports.ResumableStageChainHalted}, + {from: ports.ResumableStageChainHalted, to: ports.ResumableStageSwitchingBinary}, + {from: ports.ResumableStageSwitchingBinary, to: ports.ResumableStageVerifyingResume}, + {from: ports.ResumableStageVerifyingResume, to: ports.ResumableStageCompleted}, + } + for _, c := range govCases { + if got := tr.GetNextStageForGovPath(c.from); got != c.to { + t.Fatalf("gov next(%s)=%s, want %s", c.from, got, c.to) + } + } + if got := tr.GetNextStageForGovPath("unknown"); got != "" { + t.Fatalf("expected empty stage, got %s", got) + } + + if got := tr.GetNextStageForSkipGovPath(ports.ResumableStageInitialized); got != ports.ResumableStageSwitchingBinary { + t.Fatalf("skip-gov next = %s", got) + } + if got := tr.GetNextStageForSkipGovPath(ports.ResumableStageVerifyingResume); got != ports.ResumableStageCompleted { + t.Fatalf("skip-gov final next = %s", got) + } + if got := tr.GetNextStageForSkipGovPath(ports.ResumableStageVoting); got != "" { + t.Fatalf("expected empty stage, got %s", got) + } +} + +func TestStateTransitioner_TransitionHistoryTimestamp(t *testing.T) { + t.Parallel() + + tr := NewStateTransitioner() + state := ports.NewUpgradeState("v2", "local", false) + before := time.Now().Add(-time.Second) + + if err := tr.TransitionTo(state, ports.ResumableStageProposalSubmitted, "ok"); err != nil { + t.Fatalf("transition failed: %v", err) + } + + last := state.StageHistory[len(state.StageHistory)-1] + if last.Timestamp.Before(before) { + t.Fatalf("expected transition timestamp to be current") + } +} diff --git a/internal/application/upgrade/switch_test.go b/internal/application/upgrade/switch_test.go new file mode 100644 index 00000000..0e80559b --- /dev/null +++ b/internal/application/upgrade/switch_test.go @@ -0,0 +1,122 @@ +package upgrade + +import ( + "context" + "errors" + "testing" + + "github.com/altuslabsxyz/devnet-builder/internal/application/dto" + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/altuslabsxyz/devnet-builder/types" +) + +func TestSwitchBinaryUseCase_Execute_LocalMode(t *testing.T) { + t.Parallel() + + pid := 999 + nodes := []*ports.NodeMetadata{{Index: 0, Name: "node0", HomeDir: "/tmp/node0", PID: &pid}} + + setActiveRef := "" + uc := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{loadAllFunc: func(ctx context.Context, homeDir string) ([]*ports.NodeMetadata, error) { return nodes, nil }}, + &mockProcessExecutor{}, + &mockBinaryCache{setActiveFunc: func(ref string) error { setActiveRef = ref; return nil }}, + &testLogger{}, + ) + + out, err := uc.Execute(context.Background(), dto.SwitchBinaryInput{ + HomeDir: "/tmp/devnet", + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "abc123", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if out.NewBinary != "/tmp/new-stabled" { + t.Fatalf("unexpected new binary: %s", out.NewBinary) + } + if setActiveRef != "abc123" { + t.Fatalf("SetActive called with %q", setActiveRef) + } + if out.NodesRestarted != 1 { + t.Fatalf("expected 1 restarted node, got %d", out.NodesRestarted) + } +} + +func TestSwitchBinaryUseCase_Execute_Errors(t *testing.T) { + t.Parallel() + + t.Run("no target", func(t *testing.T) { + t.Parallel() + uc := NewSwitchBinaryUseCase(&mockDevnetRepo{}, &mockNodeRepo{}, &mockProcessExecutor{}, &mockBinaryCache{}, &testLogger{}) + _, err := uc.Execute(context.Background(), dto.SwitchBinaryInput{HomeDir: "/tmp/devnet", Mode: types.ExecutionModeLocal}) + if err == nil { + t.Fatalf("expected error") + } + }) + + t.Run("set active fails", func(t *testing.T) { + t.Parallel() + uc := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{}, + &mockProcessExecutor{}, + &mockBinaryCache{setActiveFunc: func(ref string) error { return errors.New("cache fail") }}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.SwitchBinaryInput{HomeDir: "/tmp/devnet", Mode: types.ExecutionModeLocal, TargetBinary: "/tmp/bin", CacheRef: "ref"}) + if err == nil || err.Error() != "failed to activate binary: cache fail" { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("node load fails", func(t *testing.T) { + t.Parallel() + uc := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{loadAllFunc: func(ctx context.Context, homeDir string) ([]*ports.NodeMetadata, error) { + return nil, errors.New("nodes fail") + }}, + &mockProcessExecutor{}, + &mockBinaryCache{}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.SwitchBinaryInput{HomeDir: "/tmp/devnet", Mode: types.ExecutionModeDocker, TargetImage: "img"}) + if err == nil || err.Error() != "failed to load nodes: nodes fail" { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestSwitchBinaryUseCase_BuildStartCommand(t *testing.T) { + t.Parallel() + + uc := &SwitchBinaryUseCase{} + cmd := uc.buildStartCommand(&ports.NodeMetadata{HomeDir: "/tmp/node0"}, &ports.DevnetMetadata{}, "", 100) + if cmd.Binary != "stabled" { + t.Fatalf("expected default binary, got %s", cmd.Binary) + } + if len(cmd.Args) < 3 || cmd.Args[0] != "start" { + t.Fatalf("unexpected args: %+v", cmd.Args) + } +} + +func TestPidHandle_Methods(t *testing.T) { + t.Parallel() + + h := &pidHandle{pid: 1234} + if got := h.PID(); got != 1234 { + t.Fatalf("PID = %d", got) + } + if h.IsRunning() { + t.Fatalf("expected IsRunning false") + } + if err := h.Wait(); err != nil { + t.Fatalf("Wait error: %v", err) + } + if err := h.Kill(); err != nil { + t.Fatalf("Kill error: %v", err) + } +} diff --git a/internal/application/upgrade/test_helpers_test.go b/internal/application/upgrade/test_helpers_test.go new file mode 100644 index 00000000..692e5db2 --- /dev/null +++ b/internal/application/upgrade/test_helpers_test.go @@ -0,0 +1,478 @@ +package upgrade + +import ( + "context" + "io" + "os" + "time" + + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/altuslabsxyz/devnet-builder/types" +) + +type testLogger struct{} + +func (l *testLogger) Info(format string, args ...interface{}) {} +func (l *testLogger) Warn(format string, args ...interface{}) {} +func (l *testLogger) Error(format string, args ...interface{}) {} +func (l *testLogger) Debug(format string, args ...interface{}) {} +func (l *testLogger) Success(format string, args ...interface{}) {} +func (l *testLogger) Print(format string, args ...interface{}) {} +func (l *testLogger) Println(format string, args ...interface{}) {} +func (l *testLogger) SetVerbose(verbose bool) {} +func (l *testLogger) IsVerbose() bool { return false } +func (l *testLogger) Writer() io.Writer { return os.Stdout } +func (l *testLogger) ErrWriter() io.Writer { return os.Stderr } + +type mockRPCClient struct { + getBlockHeightFunc func(ctx context.Context) (int64, error) + getBlockTimeFunc func(ctx context.Context, sampleSize int) (time.Duration, error) + isChainRunningFunc func(ctx context.Context) bool + waitForBlockFunc func(ctx context.Context, height int64) error + getProposalFunc func(ctx context.Context, id uint64) (*ports.Proposal, error) + getUpgradePlanFunc func(ctx context.Context) (*ports.UpgradePlan, error) + getAppVersionFunc func(ctx context.Context) (string, error) + getGovParamsFunc func(ctx context.Context) (*ports.GovParams, error) +} + +func (m *mockRPCClient) GetBlockHeight(ctx context.Context) (int64, error) { + if m.getBlockHeightFunc != nil { + return m.getBlockHeightFunc(ctx) + } + return 0, nil +} + +func (m *mockRPCClient) GetBlockTime(ctx context.Context, sampleSize int) (time.Duration, error) { + if m.getBlockTimeFunc != nil { + return m.getBlockTimeFunc(ctx, sampleSize) + } + return time.Second, nil +} + +func (m *mockRPCClient) IsChainRunning(ctx context.Context) bool { + if m.isChainRunningFunc != nil { + return m.isChainRunningFunc(ctx) + } + return true +} + +func (m *mockRPCClient) WaitForBlock(ctx context.Context, height int64) error { + if m.waitForBlockFunc != nil { + return m.waitForBlockFunc(ctx, height) + } + return nil +} + +func (m *mockRPCClient) GetProposal(ctx context.Context, id uint64) (*ports.Proposal, error) { + if m.getProposalFunc != nil { + return m.getProposalFunc(ctx, id) + } + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil +} + +func (m *mockRPCClient) GetUpgradePlan(ctx context.Context) (*ports.UpgradePlan, error) { + if m.getUpgradePlanFunc != nil { + return m.getUpgradePlanFunc(ctx) + } + return nil, nil +} + +func (m *mockRPCClient) GetAppVersion(ctx context.Context) (string, error) { + if m.getAppVersionFunc != nil { + return m.getAppVersionFunc(ctx) + } + return "", nil +} + +func (m *mockRPCClient) GetGovParams(ctx context.Context) (*ports.GovParams, error) { + if m.getGovParamsFunc != nil { + return m.getGovParamsFunc(ctx) + } + return &ports.GovParams{ExpeditedVotingPeriod: 60 * time.Second}, nil +} + +type mockDevnetRepo struct { + loadFunc func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) + saveFunc func(ctx context.Context, metadata *ports.DevnetMetadata) error + deleteFunc func(ctx context.Context, homeDir string) error + existsFunc func(homeDir string) bool +} + +func (m *mockDevnetRepo) Save(ctx context.Context, metadata *ports.DevnetMetadata) error { + if m.saveFunc != nil { + return m.saveFunc(ctx, metadata) + } + return nil +} + +func (m *mockDevnetRepo) Load(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + if m.loadFunc != nil { + return m.loadFunc(ctx, homeDir) + } + return &ports.DevnetMetadata{ + HomeDir: homeDir, + Status: ports.StateRunning, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + NumValidators: 1, + }, nil +} + +func (m *mockDevnetRepo) Delete(ctx context.Context, homeDir string) error { + if m.deleteFunc != nil { + return m.deleteFunc(ctx, homeDir) + } + return nil +} + +func (m *mockDevnetRepo) Exists(homeDir string) bool { + if m.existsFunc != nil { + return m.existsFunc(homeDir) + } + return true +} + +type mockNodeRepo struct { + loadAllFunc func(ctx context.Context, homeDir string) ([]*ports.NodeMetadata, error) + loadFunc func(ctx context.Context, homeDir string, index int) (*ports.NodeMetadata, error) + saveFunc func(ctx context.Context, node *ports.NodeMetadata) error + deleteFunc func(ctx context.Context, homeDir string, index int) error +} + +func (m *mockNodeRepo) Save(ctx context.Context, node *ports.NodeMetadata) error { + if m.saveFunc != nil { + return m.saveFunc(ctx, node) + } + return nil +} + +func (m *mockNodeRepo) Load(ctx context.Context, homeDir string, index int) (*ports.NodeMetadata, error) { + if m.loadFunc != nil { + return m.loadFunc(ctx, homeDir, index) + } + return &ports.NodeMetadata{Index: index, Name: "node"}, nil +} + +func (m *mockNodeRepo) LoadAll(ctx context.Context, homeDir string) ([]*ports.NodeMetadata, error) { + if m.loadAllFunc != nil { + return m.loadAllFunc(ctx, homeDir) + } + return []*ports.NodeMetadata{}, nil +} + +func (m *mockNodeRepo) Delete(ctx context.Context, homeDir string, index int) error { + if m.deleteFunc != nil { + return m.deleteFunc(ctx, homeDir, index) + } + return nil +} + +type mockProcessHandle struct { + pid int +} + +func (h *mockProcessHandle) PID() int { return h.pid } +func (h *mockProcessHandle) IsRunning() bool { return true } +func (h *mockProcessHandle) Wait() error { return nil } +func (h *mockProcessHandle) Kill() error { return nil } + +type mockProcessExecutor struct { + startFunc func(ctx context.Context, cmd ports.Command) (ports.ProcessHandle, error) + stopFunc func(ctx context.Context, handle ports.ProcessHandle, timeout time.Duration) error + killFunc func(handle ports.ProcessHandle) error + isRunningFunc func(handle ports.ProcessHandle) bool + logsFunc func(handle ports.ProcessHandle, lines int) ([]string, error) +} + +func (m *mockProcessExecutor) Start(ctx context.Context, cmd ports.Command) (ports.ProcessHandle, error) { + if m.startFunc != nil { + return m.startFunc(ctx, cmd) + } + return &mockProcessHandle{pid: 1000}, nil +} + +func (m *mockProcessExecutor) Stop(ctx context.Context, handle ports.ProcessHandle, timeout time.Duration) error { + if m.stopFunc != nil { + return m.stopFunc(ctx, handle, timeout) + } + return nil +} + +func (m *mockProcessExecutor) Kill(handle ports.ProcessHandle) error { + if m.killFunc != nil { + return m.killFunc(handle) + } + return nil +} + +func (m *mockProcessExecutor) IsRunning(handle ports.ProcessHandle) bool { + if m.isRunningFunc != nil { + return m.isRunningFunc(handle) + } + return true +} + +func (m *mockProcessExecutor) Logs(handle ports.ProcessHandle, lines int) ([]string, error) { + if m.logsFunc != nil { + return m.logsFunc(handle, lines) + } + return []string{}, nil +} + +type mockBinaryCache struct { + storeFunc func(ctx context.Context, ref string, binaryPath string) (string, error) + getFunc func(ref string) (string, bool) + hasFunc func(ref string) bool + listFunc func() []string + listDetailedFunc func() []ports.CachedBinaryInfo + statsFunc func() ports.CacheStats + removeFunc func(ref string) error + cleanFunc func() error + setActiveFunc func(ref string) error + getActiveFunc func() (string, error) + cacheDirFunc func() string + symlinkPathFunc func() string + symlinkInfoFunc func() (*ports.SymlinkInfo, error) +} + +func (m *mockBinaryCache) Store(ctx context.Context, ref string, binaryPath string) (string, error) { + if m.storeFunc != nil { + return m.storeFunc(ctx, ref, binaryPath) + } + return binaryPath, nil +} + +func (m *mockBinaryCache) Get(ref string) (string, bool) { + if m.getFunc != nil { + return m.getFunc(ref) + } + return "", false +} + +func (m *mockBinaryCache) Has(ref string) bool { + if m.hasFunc != nil { + return m.hasFunc(ref) + } + return false +} + +func (m *mockBinaryCache) List() []string { + if m.listFunc != nil { + return m.listFunc() + } + return []string{} +} + +func (m *mockBinaryCache) ListDetailed() []ports.CachedBinaryInfo { + if m.listDetailedFunc != nil { + return m.listDetailedFunc() + } + return []ports.CachedBinaryInfo{} +} + +func (m *mockBinaryCache) Stats() ports.CacheStats { + if m.statsFunc != nil { + return m.statsFunc() + } + return ports.CacheStats{} +} + +func (m *mockBinaryCache) Remove(ref string) error { + if m.removeFunc != nil { + return m.removeFunc(ref) + } + return nil +} + +func (m *mockBinaryCache) Clean() error { + if m.cleanFunc != nil { + return m.cleanFunc() + } + return nil +} + +func (m *mockBinaryCache) SetActive(ref string) error { + if m.setActiveFunc != nil { + return m.setActiveFunc(ref) + } + return nil +} + +func (m *mockBinaryCache) GetActive() (string, error) { + if m.getActiveFunc != nil { + return m.getActiveFunc() + } + return "", nil +} + +func (m *mockBinaryCache) CacheDir() string { + if m.cacheDirFunc != nil { + return m.cacheDirFunc() + } + return "" +} + +func (m *mockBinaryCache) SymlinkPath() string { + if m.symlinkPathFunc != nil { + return m.symlinkPathFunc() + } + return "" +} + +func (m *mockBinaryCache) SymlinkInfo() (*ports.SymlinkInfo, error) { + if m.symlinkInfoFunc != nil { + return m.symlinkInfoFunc() + } + return &ports.SymlinkInfo{}, nil +} + +type mockValidatorKeyLoader struct { + loadValidatorKeysFunc func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) +} + +func (m *mockValidatorKeyLoader) LoadValidatorKeys(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + if m.loadValidatorKeysFunc != nil { + return m.loadValidatorKeysFunc(ctx, opts) + } + return []ports.ValidatorKey{}, nil +} + +type mockExportUseCase struct { + executeFunc func(ctx context.Context, input interface{}) (interface{}, error) +} + +func (m *mockExportUseCase) Execute(ctx context.Context, input interface{}) (interface{}, error) { + if m.executeFunc != nil { + return m.executeFunc(ctx, input) + } + return nil, nil +} + +type mockStateManager struct { + loadStateFunc func(ctx context.Context) (*ports.UpgradeState, error) + saveStateFunc func(ctx context.Context, state *ports.UpgradeState) error + deleteStateFunc func(ctx context.Context) error + stateExistsFunc func(ctx context.Context) (bool, error) + validateStateFunc func(state *ports.UpgradeState) error + acquireLockFunc func(ctx context.Context) error + releaseLockFunc func(ctx context.Context) error + + savedStates []*ports.UpgradeState +} + +func (m *mockStateManager) LoadState(ctx context.Context) (*ports.UpgradeState, error) { + if m.loadStateFunc != nil { + return m.loadStateFunc(ctx) + } + return nil, nil +} + +func (m *mockStateManager) SaveState(ctx context.Context, state *ports.UpgradeState) error { + if m.saveStateFunc != nil { + return m.saveStateFunc(ctx, state) + } + clone := *state + m.savedStates = append(m.savedStates, &clone) + return nil +} + +func (m *mockStateManager) DeleteState(ctx context.Context) error { + if m.deleteStateFunc != nil { + return m.deleteStateFunc(ctx) + } + return nil +} + +func (m *mockStateManager) StateExists(ctx context.Context) (bool, error) { + if m.stateExistsFunc != nil { + return m.stateExistsFunc(ctx) + } + return false, nil +} + +func (m *mockStateManager) ValidateState(state *ports.UpgradeState) error { + if m.validateStateFunc != nil { + return m.validateStateFunc(state) + } + return nil +} + +func (m *mockStateManager) AcquireLock(ctx context.Context) error { + if m.acquireLockFunc != nil { + return m.acquireLockFunc(ctx) + } + return nil +} + +func (m *mockStateManager) ReleaseLock(ctx context.Context) error { + if m.releaseLockFunc != nil { + return m.releaseLockFunc(ctx) + } + return nil +} + +type mockStateDetector struct { + detectCurrentStageFunc func(ctx context.Context, state *ports.UpgradeState) (ports.ResumableStage, error) + detectProposalStatusFunc func(ctx context.Context, proposalID uint64) (string, error) + detectChainStatusFunc func(ctx context.Context) (string, error) + detectValidatorVotesFunc func(ctx context.Context, proposalID uint64) ([]ports.ValidatorVoteState, error) +} + +func (m *mockStateDetector) DetectCurrentStage(ctx context.Context, state *ports.UpgradeState) (ports.ResumableStage, error) { + if m.detectCurrentStageFunc != nil { + return m.detectCurrentStageFunc(ctx, state) + } + return state.Stage, nil +} + +func (m *mockStateDetector) DetectProposalStatus(ctx context.Context, proposalID uint64) (string, error) { + if m.detectProposalStatusFunc != nil { + return m.detectProposalStatusFunc(ctx, proposalID) + } + return "unknown", nil +} + +func (m *mockStateDetector) DetectChainStatus(ctx context.Context) (string, error) { + if m.detectChainStatusFunc != nil { + return m.detectChainStatusFunc(ctx) + } + return "running", nil +} + +func (m *mockStateDetector) DetectValidatorVotes(ctx context.Context, proposalID uint64) ([]ports.ValidatorVoteState, error) { + if m.detectValidatorVotesFunc != nil { + return m.detectValidatorVotesFunc(ctx, proposalID) + } + return []ports.ValidatorVoteState{}, nil +} + +type mockTransitioner struct { + transitionToFunc func(state *ports.UpgradeState, target ports.ResumableStage, reason string) error + canTransitionFunc func(from, to ports.ResumableStage) bool + getValidTransitionsFunc func(from ports.ResumableStage) []ports.ResumableStage +} + +func (m *mockTransitioner) TransitionTo(state *ports.UpgradeState, target ports.ResumableStage, reason string) error { + if m.transitionToFunc != nil { + return m.transitionToFunc(state, target, reason) + } + prev := state.Stage + state.Stage = target + state.StageHistory = append(state.StageHistory, ports.StageTransition{From: prev, To: target, Reason: reason, Timestamp: time.Now()}) + return nil +} + +func (m *mockTransitioner) CanTransition(from, to ports.ResumableStage) bool { + if m.canTransitionFunc != nil { + return m.canTransitionFunc(from, to) + } + return true +} + +func (m *mockTransitioner) GetValidTransitions(from ports.ResumableStage) []ports.ResumableStage { + if m.getValidTransitionsFunc != nil { + return m.getValidTransitionsFunc(from) + } + return []ports.ResumableStage{} +} From 495c23e7520fc304e2b7ef8c031c05a3753a9a49 Mon Sep 17 00:00:00 2001 From: kangeunchan Date: Fri, 27 Feb 2026 13:45:57 +0900 Subject: [PATCH 2/2] test(upgrade): expand workflow coverage with fake rpc harness Signed-off-by: kangeunchan --- internal/application/upgrade/eth_rpc_test.go | 227 +++++ internal/application/upgrade/execute_test.go | 398 ++++++++ .../application/upgrade/propose_vote_test.go | 306 ++++++ .../upgrade/resumable_execute_test.go | 903 ++++++++++++++++++ 4 files changed, 1834 insertions(+) create mode 100644 internal/application/upgrade/eth_rpc_test.go create mode 100644 internal/application/upgrade/execute_test.go create mode 100644 internal/application/upgrade/propose_vote_test.go create mode 100644 internal/application/upgrade/resumable_execute_test.go diff --git a/internal/application/upgrade/eth_rpc_test.go b/internal/application/upgrade/eth_rpc_test.go new file mode 100644 index 00000000..5372651a --- /dev/null +++ b/internal/application/upgrade/eth_rpc_test.go @@ -0,0 +1,227 @@ +package upgrade + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/altuslabsxyz/devnet-builder/internal/application/dto" + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +type rpcRequest struct { + JSONRPC string `json:"jsonrpc"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + ID interface{} `json:"id"` +} + +type rpcResponse struct { + JSONRPC string `json:"jsonrpc"` + ID interface{} `json:"id"` + Result interface{} `json:"result,omitempty"` + Error interface{} `json:"error,omitempty"` +} + +func startFakeEthRPCServer(t *testing.T, proposalID uint64) func() { + t.Helper() + + listener, err := net.Listen("tcp", "127.0.0.1:8545") + if err != nil { + t.Skipf("cannot bind fake eth rpc on :8545: %v", err) + } + + txHash := "0x" + strings.Repeat("1", 64) + blockHash := "0x" + strings.Repeat("2", 64) + logsBloom := "0x" + strings.Repeat("0", 512) + proposalIDTopic := fmt.Sprintf("0x%064x", proposalID) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + var req rpcRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + w.WriteHeader(http.StatusBadRequest) + _ = json.NewEncoder(w).Encode(rpcResponse{JSONRPC: "2.0", ID: nil, Error: map[string]interface{}{"code": -32700, "message": "parse error"}}) + return + } + + res := rpcResponse{JSONRPC: "2.0", ID: req.ID} + switch req.Method { + case "eth_chainId": + res.Result = "0x1" + case "eth_getBalance": + res.Result = "0x56bc75e2d63100000" + case "eth_getTransactionCount": + res.Result = "0x0" + case "eth_gasPrice": + res.Result = "0x3b9aca00" + case "eth_call": + res.Result = "0x" + case "eth_estimateGas": + res.Result = "0x7a120" + case "eth_sendRawTransaction": + res.Result = txHash + case "eth_getTransactionReceipt": + res.Result = map[string]interface{}{ + "transactionHash": txHash, + "transactionIndex": "0x0", + "blockHash": blockHash, + "blockNumber": "0x1", + "from": "0x0000000000000000000000000000000000000001", + "to": GovPrecompileAddress, + "cumulativeGasUsed": "0x5208", + "gasUsed": "0x5208", + "contractAddress": nil, + "logsBloom": logsBloom, + "status": "0x1", + "type": "0x0", + "effectiveGasPrice": "0x3b9aca00", + "logs": []map[string]interface{}{ + { + "address": GovPrecompileAddress, + "topics": []string{"0x" + strings.Repeat("3", 64), proposalIDTopic}, + "data": "0x", + "blockNumber": "0x1", + "transactionHash": txHash, + "transactionIndex": "0x0", + "blockHash": blockHash, + "logIndex": "0x0", + "removed": false, + }, + }, + } + default: + res.Error = map[string]interface{}{"code": -32601, "message": "method not found"} + } + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(res) + }) + + srv := &http.Server{Handler: handler} + go func() { + _ = srv.Serve(listener) + }() + + return func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _ = srv.Shutdown(ctx) + } +} + +func testValidatorKey(t *testing.T) ports.ValidatorKey { + t.Helper() + pkHex := "4f3edf983ac636a65a842ce7c78d9aa706d3b113bce036f0f6078ff8b8f4f0c7" + pk, err := crypto.HexToECDSA(pkHex) + if err != nil { + t.Fatalf("failed to parse key: %v", err) + } + addr := crypto.PubkeyToAddress(pk.PublicKey).Hex() + return ports.ValidatorKey{ + Name: "validator0", + Bech32Address: "stable1test", + HexAddress: addr, + PrivateKey: pkHex, + } +} + +func TestProposeUseCase_Execute_FakeEthRPC(t *testing.T) { + teardown := startFakeEthRPCServer(t, 7) + defer teardown() + + key := testValidatorKey(t) + uc := NewProposeUseCase( + &mockDevnetRepo{}, + &mockRPCClient{}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{key}, nil + }}, + &testLogger{}, + ) + + out, err := uc.Execute(context.Background(), dto.ProposeInput{ + HomeDir: "/tmp/devnet", + UpgradeName: "v2.0.0", + UpgradeHeight: 12345, + VotingPeriod: 30 * time.Second, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if out.ProposalID != 7 { + t.Fatalf("proposal id = %d, want 7", out.ProposalID) + } + if out.TxHash == "" { + t.Fatalf("expected tx hash") + } +} + +func TestVoteUseCase_Execute_FakeEthRPC(t *testing.T) { + teardown := startFakeEthRPCServer(t, 9) + defer teardown() + + key := testValidatorKey(t) + uc := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{NumValidators: 1, ExecutionMode: "local", CurrentVersion: "v1", BinaryName: "stabled"}, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{key}, nil + }}, + &testLogger{}, + ) + + out, err := uc.Execute(context.Background(), dto.VoteInput{ + HomeDir: "/tmp/devnet", + ProposalID: 9, + VoteOption: "yes", + FromAll: false, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if out.VotesCast != 1 || out.TotalVoters != 1 { + t.Fatalf("unexpected vote result: %+v", out) + } + if len(out.TxHashes) != 1 { + t.Fatalf("expected one tx hash") + } +} + +func TestWaitForReceiptCancelledContext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + _, err := waitForReceipt(ctx, nil, common.Hash{}) + if err == nil { + t.Fatalf("expected cancellation error") + } +} + +func TestGetLatestProposalID(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(`{"proposals":[{"id":"42"}]}`)) + })) + defer ts.Close() + + id, err := getLatestProposalID(ts.URL) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if id != 42 { + t.Fatalf("id = %d", id) + } +} diff --git a/internal/application/upgrade/execute_test.go b/internal/application/upgrade/execute_test.go new file mode 100644 index 00000000..06ba99cb --- /dev/null +++ b/internal/application/upgrade/execute_test.go @@ -0,0 +1,398 @@ +package upgrade + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/altuslabsxyz/devnet-builder/internal/application/dto" + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/altuslabsxyz/devnet-builder/types" +) + +func TestExecuteUpgradeUseCase_ExecuteSkipGov(t *testing.T) { + t.Parallel() + + cacheSetActiveCalled := false + switchUC := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{}, + &mockProcessExecutor{}, + &mockBinaryCache{ + setActiveFunc: func(ref string) error { + cacheSetActiveCalled = true + if ref != "cache-ref" { + t.Fatalf("unexpected cache ref: %s", ref) + } + return nil + }, + }, + &testLogger{}, + ) + + rpc := &mockRPCClient{} + heights := []int64{100, 101} + rpc.getBlockHeightFunc = func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + } + + uc := NewExecuteUpgradeUseCase( + nil, + nil, + switchUC, + &mockExportUseCase{}, + rpc, + &mockDevnetRepo{}, + nil, + &testLogger{}, + ) + + out, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + SkipGovernance: true, + TargetBinary: "/tmp/stabled", + CacheRef: "cache-ref", + Mode: types.ExecutionModeLocal, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success { + t.Fatalf("expected success output") + } + if !cacheSetActiveCalled { + t.Fatalf("expected SetActive to be called") + } +} + +func TestExecuteUpgradeUseCase_ExecuteSkipGovSwitchError(t *testing.T) { + t.Parallel() + + switchUC := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{}, + &mockProcessExecutor{}, + &mockBinaryCache{}, + &testLogger{}, + ) + + uc := NewExecuteUpgradeUseCase( + nil, + nil, + switchUC, + &mockExportUseCase{}, + &mockRPCClient{}, + &mockDevnetRepo{}, + nil, + &testLogger{}, + ) + + _, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{SkipGovernance: true, HomeDir: "/tmp/devnet", Mode: types.ExecutionModeLocal}) + if err == nil || !strings.Contains(err.Error(), "no target binary specified") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExecuteUpgradeUseCase_ExecuteWithGovPreExportErrors(t *testing.T) { + t.Parallel() + + t.Run("pre-export failure", func(t *testing.T) { + t.Parallel() + uc := NewExecuteUpgradeUseCase( + nil, + nil, + nil, + &mockExportUseCase{executeFunc: func(ctx context.Context, input interface{}) (interface{}, error) { + return nil, errors.New("export failed") + }}, + &mockRPCClient{}, + &mockDevnetRepo{}, + nil, + &testLogger{}, + ) + + _, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{SkipGovernance: false, WithExport: true}) + if err == nil || !strings.Contains(err.Error(), "pre-upgrade export failed") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("invalid pre-export result type", func(t *testing.T) { + t.Parallel() + uc := NewExecuteUpgradeUseCase( + nil, + nil, + nil, + &mockExportUseCase{executeFunc: func(ctx context.Context, input interface{}) (interface{}, error) { + return "invalid", nil + }}, + &mockRPCClient{}, + &mockDevnetRepo{}, + nil, + &testLogger{}, + ) + + _, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{SkipGovernance: false, WithExport: true}) + if err == nil || !strings.Contains(err.Error(), "invalid export result type") { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestExecuteUpgradeUseCase_UpdateCurrentVersion(t *testing.T) { + t.Parallel() + + metadata := &ports.DevnetMetadata{CurrentVersion: "v1"} + repo := &mockDevnetRepo{ + loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return metadata, nil + }, + } + + uc := &ExecuteUpgradeUseCase{devnetRepo: repo, logger: &testLogger{}} + if err := uc.updateCurrentVersion(context.Background(), "/tmp/devnet", "v2"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if metadata.CurrentVersion != "v2" { + t.Fatalf("version not updated: %s", metadata.CurrentVersion) + } +} + +func TestExecuteUpgradeUseCase_WaitForUpgradeHeight(t *testing.T) { + t.Parallel() + + t.Run("success when already at target", func(t *testing.T) { + t.Parallel() + uc := &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 100, nil }}, + logger: &testLogger{}, + } + if err := uc.waitForUpgradeHeight(context.Background(), 100); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("rpc error", func(t *testing.T) { + t.Parallel() + uc := &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 0, errors.New("rpc down") }}, + logger: &testLogger{}, + } + err := uc.waitForUpgradeHeight(context.Background(), 100) + if err == nil || !strings.Contains(err.Error(), "failed to get block height") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("canceled while waiting", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + uc := &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + cancel() + return 1, nil + }}, + logger: &testLogger{}, + } + err := uc.waitForUpgradeHeight(ctx, 10) + if err == nil { + t.Fatalf("expected cancellation error") + } + }) +} + +func TestExecuteUpgradeUseCase_WaitForChainHalt(t *testing.T) { + t.Parallel() + + uc := &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 200, nil }}, + logger: &testLogger{}, + } + err := uc.waitForChainHalt(context.Background(), 100) + if err == nil || !strings.Contains(err.Error(), "upgrade proposal may have failed") { + t.Fatalf("unexpected error: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err = uc.waitForChainHalt(ctx, 100) + if err == nil { + t.Fatalf("expected context cancellation error") + } +} + +func TestExecuteUpgradeUseCase_VerifyChainResumedCancelled(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + uc := &ExecuteUpgradeUseCase{rpcClient: &mockRPCClient{}, logger: &testLogger{}} + _, err := uc.verifyChainResumed(ctx, "/tmp/devnet") + if err == nil { + t.Fatalf("expected cancellation error") + } +} + +func TestMonitorUseCase_Execute(t *testing.T) { + t.Parallel() + + monitor := NewMonitorUseCase(&mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 55, nil }}, &testLogger{}) + ch, err := monitor.Execute(context.Background(), dto.MonitorInput{TargetHeight: 50}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + progress := <-ch + if !progress.IsComplete || progress.Stage != ports.StageCompleted { + t.Fatalf("unexpected progress: %+v", progress) + } +} + +func TestProgressHelpers(t *testing.T) { + t.Parallel() + + if bar := makeProgressBar(10, 50); bar != "=====> " { + t.Fatalf("unexpected bar: %q", bar) + } + if bar := makeProgressBar(5, -1); bar != "> " { + t.Fatalf("unexpected negative bar: %q", bar) + } + if bar := makeProgressBar(5, 200); bar != "=====" { + t.Fatalf("unexpected overflow bar: %q", bar) + } + + if got := formatDuration(65 * time.Second); got != "1m5s" { + t.Fatalf("formatDuration = %q", got) + } + if got := formatDuration((2 * time.Hour) + (3 * time.Minute) + (4 * time.Second)); got != "2h3m4s" { + t.Fatalf("formatDuration = %q", got) + } +} + +func TestExecuteUpgradeUseCase_ExecuteWithGov_ChainHaltErrorAfterProposalAndVote(t *testing.T) { + teardown := startFakeEthRPCServer(t, 11) + defer teardown() + + key := testValidatorKey(t) + keyLoader := &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{key}, nil + }} + + proposeUC := NewProposeUseCase(&mockDevnetRepo{}, &mockRPCClient{}, keyLoader, &testLogger{}) + voteUC := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 1, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + keyLoader, + &testLogger{}, + ) + switchUC := NewSwitchBinaryUseCase(&mockDevnetRepo{}, &mockNodeRepo{}, &mockProcessExecutor{}, &mockBinaryCache{}, &testLogger{}) + + rpcHeights := []int64{100, 120} + execRPC := &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := rpcHeights[0] + rpcHeights = rpcHeights[1:] + return h, nil + }} + + uc := NewExecuteUpgradeUseCase( + proposeUC, + voteUC, + switchUC, + &mockExportUseCase{}, + execRPC, + &mockDevnetRepo{}, + nil, + &testLogger{}, + ) + + _, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + UpgradeName: "v2.0.0", + UpgradeHeight: 100, + VotingPeriod: 30 * time.Second, + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "cache-ref", + SkipGovernance: false, + }) + if err == nil || !strings.Contains(err.Error(), "upgrade proposal may have failed") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExecuteUpgradeUseCase_ExecuteWithGov_Success(t *testing.T) { + teardown := startFakeEthRPCServer(t, 15) + defer teardown() + + key := testValidatorKey(t) + keyLoader := &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{key}, nil + }} + + proposeUC := NewProposeUseCase(&mockDevnetRepo{}, &mockRPCClient{}, keyLoader, &testLogger{}) + voteUC := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 1, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + keyLoader, + &testLogger{}, + ) + switchUC := NewSwitchBinaryUseCase(&mockDevnetRepo{}, &mockNodeRepo{}, &mockProcessExecutor{}, &mockBinaryCache{}, &testLogger{}) + + rpcHeights := []int64{100, 100, 100, 100, 100, 101, 102} + execRPC := &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := rpcHeights[0] + rpcHeights = rpcHeights[1:] + return h, nil + }} + + uc := NewExecuteUpgradeUseCase( + proposeUC, + voteUC, + switchUC, + &mockExportUseCase{}, + execRPC, + &mockDevnetRepo{}, + nil, + &testLogger{}, + ) + + out, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + UpgradeName: "v2.0.0", + UpgradeHeight: 100, + VotingPeriod: 30 * time.Second, + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "cache-ref", + SkipGovernance: false, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success || out.ProposalID == 0 { + t.Fatalf("unexpected output: %+v", out) + } +} diff --git a/internal/application/upgrade/propose_vote_test.go b/internal/application/upgrade/propose_vote_test.go new file mode 100644 index 00000000..ad687685 --- /dev/null +++ b/internal/application/upgrade/propose_vote_test.go @@ -0,0 +1,306 @@ +package upgrade + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "math/big" + "strings" + "testing" + "time" + + "github.com/altuslabsxyz/devnet-builder/internal/application/dto" + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" +) + +func TestProposeUseCase_Execute_Errors(t *testing.T) { + t.Parallel() + + t.Run("load devnet fails", func(t *testing.T) { + t.Parallel() + uc := NewProposeUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return nil, errors.New("load fail") + }}, + &mockRPCClient{}, + &mockValidatorKeyLoader{}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.ProposeInput{HomeDir: "/tmp/devnet", UpgradeHeight: 10}) + if err == nil || !strings.Contains(err.Error(), "failed to load devnet") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("devnet not running", func(t *testing.T) { + t.Parallel() + uc := NewProposeUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{Status: ports.StateStopped}, nil + }}, + &mockRPCClient{}, + &mockValidatorKeyLoader{}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.ProposeInput{HomeDir: "/tmp/devnet", UpgradeHeight: 10}) + if err == nil || err.Error() != "devnet is not running" { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("calculate height fails", func(t *testing.T) { + t.Parallel() + uc := NewProposeUseCase( + &mockDevnetRepo{}, + &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 0, errors.New("height fail") }}, + &mockValidatorKeyLoader{}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.ProposeInput{HomeDir: "/tmp/devnet", UpgradeHeight: 0}) + if err == nil || !strings.Contains(err.Error(), "failed to calculate upgrade height") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("validator key load fails", func(t *testing.T) { + t.Parallel() + uc := NewProposeUseCase( + &mockDevnetRepo{}, + &mockRPCClient{}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return nil, errors.New("key load fail") + }}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.ProposeInput{HomeDir: "/tmp/devnet", UpgradeHeight: 10}) + if err == nil || !strings.Contains(err.Error(), "failed to load validator keys") { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestProposeUseCase_CalculateUpgradeHeight(t *testing.T) { + t.Parallel() + + rpc := &mockRPCClient{ + getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 100, nil }, + getGovParamsFunc: func(ctx context.Context) (*ports.GovParams, error) { return nil, errors.New("gov fail") }, + getBlockTimeFunc: func(ctx context.Context, sampleSize int) (time.Duration, error) { return 0, errors.New("bt fail") }, + } + uc := NewProposeUseCase(&mockDevnetRepo{}, rpc, &mockValidatorKeyLoader{}, &testLogger{}) + + height, err := uc.calculateUpgradeHeight(context.Background(), dto.ProposeInput{VotingPeriod: 30 * time.Second, HeightBuffer: 0}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if height <= 100 { + t.Fatalf("expected calculated height > 100, got %d", height) + } +} + +func TestProposeUseCase_CalculateHeightBuffer(t *testing.T) { + t.Parallel() + + uc := NewProposeUseCase(&mockDevnetRepo{}, &mockRPCClient{}, &mockValidatorKeyLoader{}, &testLogger{}) + + if got := uc.calculateHeightBuffer(context.Background(), 1, time.Second); got != 40 { + t.Fatalf("expected default buffer 40, got %d", got) + } + if got := uc.calculateHeightBuffer(context.Background(), 10, 20*time.Second); got != 10 { + t.Fatalf("expected minimum buffer 10, got %d", got) + } + if got := uc.calculateHeightBuffer(context.Background(), 10, 10*time.Millisecond); got != 200 { + t.Fatalf("expected max cap 200, got %d", got) + } +} + +func TestProposalHelpers(t *testing.T) { + t.Parallel() + + proposalJSON := buildProposalJSON("v2.0.0", 1234, "info") + if !strings.Contains(proposalJSON, "v2.0.0") || !strings.Contains(proposalJSON, "1234") { + t.Fatalf("proposal JSON missing expected fields: %s", proposalJSON) + } + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(proposalJSON), &parsed); err != nil { + t.Fatalf("invalid proposal JSON: %v", err) + } + + callData, err := buildSubmitProposalCallData("0x0000000000000000000000000000000000000001", proposalJSON, "astable", "100") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(callData) < 4 { + t.Fatalf("callData too short") + } + wantMethodID := crypto.Keccak256([]byte("submitProposal(address,bytes,(string,uint256)[])"))[:4] + if got := hex.EncodeToString(callData[:4]); got != hex.EncodeToString(wantMethodID) { + t.Fatalf("methodID mismatch: got %s", got) + } +} + +func TestParseProposalIDFromLogs(t *testing.T) { + t.Parallel() + + eventSig := crypto.Keccak256Hash([]byte("SubmitProposal(address,uint64)")) + + id := uint64(42) + idBig := new(big.Int).SetUint64(id) + data := common.LeftPadBytes(idBig.Bytes(), 32) + logs := []*ethtypes.Log{{Topics: []common.Hash{eventSig}, Data: data}} + + parsedID, err := parseProposalIDFromLogs(logs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if parsedID != id { + t.Fatalf("id = %d, want %d", parsedID, id) + } + + fallbackLogs := []*ethtypes.Log{{Topics: []common.Hash{common.HexToHash("0x01"), common.BigToHash(big.NewInt(77))}}} + parsedID, err = parseProposalIDFromLogs(fallbackLogs) + if err != nil { + t.Fatalf("unexpected fallback error: %v", err) + } + if parsedID != 77 { + t.Fatalf("fallback id = %d", parsedID) + } + + _, err = parseProposalIDFromLogs([]*ethtypes.Log{{Topics: []common.Hash{common.HexToHash("0x01")}, Data: []byte("short")}}) + if err == nil { + t.Fatalf("expected parse error") + } +} + +func TestVoteUseCase_Execute_Errors(t *testing.T) { + t.Parallel() + + t.Run("devnet load fails", func(t *testing.T) { + t.Parallel() + uc := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return nil, errors.New("load fail") + }}, + &mockRPCClient{}, + &mockValidatorKeyLoader{}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.VoteInput{HomeDir: "/tmp/devnet", ProposalID: 1, VoteOption: "yes"}) + if err == nil || !strings.Contains(err.Error(), "failed to load devnet") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("proposal not in voting", func(t *testing.T) { + t.Parallel() + uc := NewVoteUseCase( + &mockDevnetRepo{}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusPassed}, nil + }}, + &mockValidatorKeyLoader{}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.VoteInput{HomeDir: "/tmp/devnet", ProposalID: 1, VoteOption: "yes"}) + if err == nil || !strings.Contains(err.Error(), "proposal is not in voting period") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("validator key load fails", func(t *testing.T) { + t.Parallel() + uc := NewVoteUseCase( + &mockDevnetRepo{}, + &mockRPCClient{}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return nil, errors.New("key fail") + }}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.VoteInput{HomeDir: "/tmp/devnet", ProposalID: 1, VoteOption: "yes"}) + if err == nil || !strings.Contains(err.Error(), "failed to load validator keys") { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("invalid vote option", func(t *testing.T) { + t.Parallel() + uc := NewVoteUseCase( + &mockDevnetRepo{}, + &mockRPCClient{}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{}, nil + }}, + &testLogger{}, + ) + _, err := uc.Execute(context.Background(), dto.VoteInput{HomeDir: "/tmp/devnet", ProposalID: 1, VoteOption: "invalid"}) + if err == nil || !strings.Contains(err.Error(), "invalid vote option") { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestVoteHelpers(t *testing.T) { + t.Parallel() + + tests := []struct { + input string + want int + }{ + {input: "yes", want: VoteOptionYes}, + {input: "no", want: VoteOptionNo}, + {input: "abstain", want: VoteOptionAbstain}, + {input: "no_with_veto", want: VoteOptionNoWithVeto}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.input, func(t *testing.T) { + t.Parallel() + got, err := ParseVoteOption(tt.input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Fatalf("option = %d, want %d", got, tt.want) + } + }) + } + + if _, err := ParseVoteOption("bad"); err == nil { + t.Fatalf("expected invalid option error") + } + + data := buildVoteCallData("0x0000000000000000000000000000000000000001", 7, VoteOptionYes, "") + if len(data) < 4 { + t.Fatalf("call data too short") + } + method := crypto.Keccak256([]byte("vote(address,uint64,uint8,string)"))[:4] + if hex.EncodeToString(data[:4]) != hex.EncodeToString(method) { + t.Fatalf("method id mismatch") + } +} + +func TestBuildSubmitProposalCallData_IsDeterministic(t *testing.T) { + t.Parallel() + + proposal := buildProposalJSON("v3", 999, "deterministic") + a, err := buildSubmitProposalCallData("0x0000000000000000000000000000000000000001", proposal, "astable", "100") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + b, err := buildSubmitProposalCallData("0x0000000000000000000000000000000000000001", proposal, "astable", "100") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + shaA := sha256.Sum256(a) + shaB := sha256.Sum256(b) + if shaA != shaB { + t.Fatalf("expected deterministic call data") + } +} diff --git a/internal/application/upgrade/resumable_execute_test.go b/internal/application/upgrade/resumable_execute_test.go new file mode 100644 index 00000000..e0450159 --- /dev/null +++ b/internal/application/upgrade/resumable_execute_test.go @@ -0,0 +1,903 @@ +package upgrade + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/altuslabsxyz/devnet-builder/internal/application/dto" + "github.com/altuslabsxyz/devnet-builder/internal/application/ports" + "github.com/altuslabsxyz/devnet-builder/types" +) + +func TestResumableExecuteUpgradeUseCase_Execute_InitialStateSaveFailure(t *testing.T) { + t.Parallel() + + uc := &ResumableExecuteUpgradeUseCase{ + stateManager: &mockStateManager{saveStateFunc: func(ctx context.Context, state *ports.UpgradeState) error { + return errors.New("save failed") + }}, + transitioner: &mockTransitioner{}, + logger: &testLogger{}, + } + + _, err := uc.Execute(context.Background(), dto.ExecuteUpgradeInput{UpgradeName: "v2", Mode: types.ExecutionModeLocal, SkipGovernance: true}, nil) + if err == nil || !strings.Contains(err.Error(), "failed to save initial state") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestNewResumableExecuteUpgradeUseCase(t *testing.T) { + t.Parallel() + + uc := NewResumableExecuteUpgradeUseCase( + nil, + nil, + nil, + nil, + &mockStateManager{}, + NewStateTransitioner(), + &mockStateDetector{}, + &mockRPCClient{}, + &mockExportUseCase{}, + &mockDevnetRepo{}, + &testLogger{}, + ) + if uc == nil { + t.Fatalf("expected non-nil use case") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovCompleted(t *testing.T) { + t.Parallel() + + deleted := false + uc := &ResumableExecuteUpgradeUseCase{ + stateManager: &mockStateManager{deleteStateFunc: func(ctx context.Context) error { deleted = true; return nil }}, + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageCompleted + out, err := uc.executeSkipGovResumable(context.Background(), dto.ExecuteUpgradeInput{}, state, time.Now().Add(-time.Second)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success || !deleted { + t.Fatalf("expected completed success and state deletion") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovUnknownStage(t *testing.T) { + t.Parallel() + + uc := &ResumableExecuteUpgradeUseCase{logger: &testLogger{}} + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = "Unknown" + _, err := uc.executeSkipGovResumable(context.Background(), dto.ExecuteUpgradeInput{}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "cannot resume from stage") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovTerminalStages(t *testing.T) { + t.Parallel() + + uc := &ResumableExecuteUpgradeUseCase{logger: &testLogger{}} + + failed := ports.NewUpgradeState("v2", "local", false) + failed.Stage = ports.ResumableStageFailed + failed.Error = "vote failed" + _, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{}, failed, time.Now()) + if err == nil || !strings.Contains(err.Error(), "upgrade previously failed") { + t.Fatalf("unexpected failed-stage error: %v", err) + } + + rejected := ports.NewUpgradeState("v2", "local", false) + rejected.Stage = ports.ResumableStageProposalRejected + _, err = uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{}, rejected, time.Now()) + if err == nil || !strings.Contains(err.Error(), "proposal was rejected") { + t.Fatalf("unexpected rejected-stage error: %v", err) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovPreExportFailure(t *testing.T) { + t.Parallel() + + uc := &ResumableExecuteUpgradeUseCase{ + exportUC: &mockExportUseCase{executeFunc: func(ctx context.Context, input interface{}) (interface{}, error) { + return nil, errors.New("export fail") + }}, + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageInitialized + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{WithExport: true, HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "pre-upgrade export failed") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error to be populated") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovCompleted(t *testing.T) { + t.Parallel() + + deleted := false + uc := &ResumableExecuteUpgradeUseCase{ + stateManager: &mockStateManager{deleteStateFunc: func(ctx context.Context) error { deleted = true; return nil }}, + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageCompleted + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{}, state, time.Now().Add(-2*time.Second)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success || !deleted { + t.Fatalf("expected successful completion path") + } +} + +func TestResumableExecuteUpgradeUseCase_TransitionAndSave(t *testing.T) { + t.Parallel() + + state := ports.NewUpgradeState("v2", "local", false) + + uc := &ResumableExecuteUpgradeUseCase{ + transitioner: NewStateTransitioner(), + stateManager: &mockStateManager{}, + logger: &testLogger{}, + } + if err := uc.transitionAndSave(context.Background(), state, ports.ResumableStageProposalSubmitted, "ok"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + ucErr := &ResumableExecuteUpgradeUseCase{ + transitioner: &mockTransitioner{transitionToFunc: func(state *ports.UpgradeState, target ports.ResumableStage, reason string) error { + return errors.New("bad transition") + }}, + stateManager: &mockStateManager{}, + logger: &testLogger{}, + } + if err := ucErr.transitionAndSave(context.Background(), state, ports.ResumableStageVoting, "x"); err == nil { + t.Fatalf("expected transition error") + } + + ucSaveErr := &ResumableExecuteUpgradeUseCase{ + transitioner: NewStateTransitioner(), + stateManager: &mockStateManager{saveStateFunc: func(ctx context.Context, state *ports.UpgradeState) error { return errors.New("disk full") }}, + logger: &testLogger{}, + } + state2 := ports.NewUpgradeState("v2", "local", false) + if err := ucSaveErr.transitionAndSave(context.Background(), state2, ports.ResumableStageProposalSubmitted, "x"); err == nil || !strings.Contains(err.Error(), "failed to save state") { + t.Fatalf("unexpected save error: %v", err) + } +} + +func TestResumableExecuteUpgradeUseCase_StateAccessors(t *testing.T) { + t.Parallel() + + state := ports.NewUpgradeState("v2", "local", false) + mgr := &mockStateManager{loadStateFunc: func(ctx context.Context) (*ports.UpgradeState, error) { return state, nil }} + uc := &ResumableExecuteUpgradeUseCase{stateManager: mgr} + + got, err := uc.GetCurrentState(context.Background()) + if err != nil { + t.Fatalf("GetCurrentState error: %v", err) + } + if got != state { + t.Fatalf("state mismatch") + } + + if err := uc.ClearState(context.Background()); err != nil { + t.Fatalf("ClearState error: %v", err) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSwitchBinaryAndVotingHelpers(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", false) + state.UpgradeHeight = 100 + state.ProposalID = 10 + + pid := 3000 + switchUC := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{loadAllFunc: func(ctx context.Context, homeDir string) ([]*ports.NodeMetadata, error) { + return []*ports.NodeMetadata{ + {Index: 0, Name: "node0", HomeDir: "/tmp/node0", PID: &pid}, + {Index: 1, Name: "node1", HomeDir: "/tmp/node1"}, + }, nil + }}, + &mockProcessExecutor{}, + &mockBinaryCache{}, + &testLogger{}, + ) + + voteUC := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 0, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{}, nil + }}, + &testLogger{}, + ) + + uc := &ResumableExecuteUpgradeUseCase{ + switchUC: switchUC, + voteUC: voteUC, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + stateDetector: &mockStateDetector{}, + logger: &testLogger{}, + } + + switchOut, err := uc.executeSwitchBinary(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + TargetBinary: "/tmp/new-stabled", + Mode: types.ExecutionModeLocal, + CacheRef: "cache-ref", + }, state) + if err != nil { + t.Fatalf("executeSwitchBinary error: %v", err) + } + if switchOut.NewBinary == "" { + t.Fatalf("expected new binary path") + } + if len(state.NodeSwitches) != 2 { + t.Fatalf("expected node switch tracking, got %d", len(state.NodeSwitches)) + } + + voteOut, err := uc.executeVoting(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state) + if err != nil { + t.Fatalf("executeVoting error: %v", err) + } + if voteOut.TotalVoters != 0 || voteOut.VotesCast != 0 { + t.Fatalf("unexpected vote output: %+v", voteOut) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovResumableFromSwitching(t *testing.T) { + heights := []int64{10, 11} + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + switchUC: NewSwitchBinaryUseCase(&mockDevnetRepo{}, &mockNodeRepo{}, &mockProcessExecutor{}, &mockBinaryCache{}, &testLogger{}), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageSwitchingBinary + + out, err := uc.executeSkipGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "cache-ref", + }, state, time.Now()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success { + t.Fatalf("expected success output") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_VotingToWaitError(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageProposalSubmitted + state.ProposalID = 15 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 0, errors.New("height down") }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + voteUC: NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 0, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{}, nil + }}, + &testLogger{}, + ), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "failed to get block height") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output with error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_FromInitializedUntilWaitError(t *testing.T) { + teardown := startFakeEthRPCServer(t, 22) + defer teardown() + + key := testValidatorKey(t) + keyLoader := &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{key}, nil + }} + + proposeUC := NewProposeUseCase( + &mockDevnetRepo{}, + &mockRPCClient{}, + keyLoader, + &testLogger{}, + ) + voteUC := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 1, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + keyLoader, + &testLogger{}, + ) + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 0, errors.New("height down") }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + proposeUC: proposeUC, + voteUC: voteUC, + exportUC: &mockExportUseCase{executeFunc: func(ctx context.Context, input interface{}) (interface{}, error) { + return &dto.ExportOutput{ExportPath: "/tmp/pre-upgrade.json"}, nil + }}, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageInitialized + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + UpgradeName: "v2.0.0", + UpgradeHeight: 100, + VotingPeriod: 30 * time.Second, + WithExport: true, + }, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "failed to get block height") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.PreGenesisPath == "" || out.Error == nil { + t.Fatalf("expected output with pre-export path and error: %+v", out) + } + if state.Stage != ports.ResumableStageFailed { + t.Fatalf("expected failed stage, got %s", state.Stage) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_FromVerifyingResumeSuccess(t *testing.T) { + heights := []int64{200, 201} + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + exportUC: &mockExportUseCase{executeFunc: func(ctx context.Context, input interface{}) (interface{}, error) { + return &dto.ExportOutput{ExportPath: "/tmp/post-upgrade.json"}, nil + }}, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageVerifyingResume + state.ProposalID = 10 + state.UpgradeHeight = 100 + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + WithExport: true, + }, state, time.Now()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success || out.PostGenesisPath == "" { + t.Fatalf("unexpected output: %+v", out) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovResumable_SwitchError(t *testing.T) { + uc := &ResumableExecuteUpgradeUseCase{ + switchUC: NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{}, + &mockProcessExecutor{}, + &mockBinaryCache{setActiveFunc: func(ref string) error { return errors.New("activate failed") }}, + &testLogger{}, + ), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageSwitchingBinary + + out, err := uc.executeSkipGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "cache-ref", + }, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "failed to activate binary") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteVoting_WithValidatorTx(t *testing.T) { + teardown := startFakeEthRPCServer(t, 33) + defer teardown() + + key := testValidatorKey(t) + voteUC := NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 1, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{key}, nil + }}, + &testLogger{}, + ) + + state := ports.NewUpgradeState("v2", "local", false) + state.ProposalID = 33 + uc := &ResumableExecuteUpgradeUseCase{ + voteUC: voteUC, + stateManager: &mockStateManager{}, + logger: &testLogger{}, + } + + out, err := uc.executeVoting(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if out.VotesCast != 1 || len(state.ValidatorVotes) != 1 || !state.ValidatorVotes[0].Voted { + t.Fatalf("unexpected vote tracking: out=%+v state=%+v", out, state.ValidatorVotes) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSwitchBinary_UpdatesExistingNodeEntry(t *testing.T) { + pid := 4000 + switchUC := NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{loadAllFunc: func(ctx context.Context, homeDir string) ([]*ports.NodeMetadata, error) { + return []*ports.NodeMetadata{{Index: 0, Name: "node0", HomeDir: "/tmp/node0", PID: &pid}}, nil + }}, + &mockProcessExecutor{}, + &mockBinaryCache{}, + &testLogger{}, + ) + state := ports.NewUpgradeState("v2", "local", false) + state.UpgradeHeight = 100 + state.NodeSwitches = []ports.NodeSwitchState{{NodeName: "node0", Switched: false}} + + uc := &ResumableExecuteUpgradeUseCase{ + switchUC: switchUC, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + stateDetector: &mockStateDetector{}, + logger: &testLogger{}, + } + + _, err := uc.executeSwitchBinary(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + TargetBinary: "/tmp/new-stabled", + Mode: types.ExecutionModeLocal, + CacheRef: "cache-ref", + }, state) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(state.NodeSwitches) != 1 || !state.NodeSwitches[0].Switched { + t.Fatalf("existing node switch entry was not updated: %+v", state.NodeSwitches) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_VotingMismatch(t *testing.T) { + teardown := startFakeEthRPCServer(t, 44) + defer teardown() + + valid := testValidatorKey(t) + invalid := ports.ValidatorKey{ + Name: "validator1", + HexAddress: valid.HexAddress, + PrivateKey: "not-a-private-key", + } + + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageVoting + state.ProposalID = 44 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + voteUC: NewVoteUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{ + Status: ports.StateRunning, + NumValidators: 2, + ExecutionMode: types.ExecutionModeLocal, + CurrentVersion: "v1.0.0", + BinaryName: "stabled", + }, nil + }}, + &mockRPCClient{getProposalFunc: func(ctx context.Context, id uint64) (*ports.Proposal, error) { + return &ports.Proposal{Status: ports.ProposalStatusVoting}, nil + }}, + &mockValidatorKeyLoader{loadValidatorKeysFunc: func(ctx context.Context, opts ports.ValidatorKeyOptions) ([]ports.ValidatorKey, error) { + return []ports.ValidatorKey{valid, invalid}, nil + }}, + &testLogger{}, + ), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "not all votes cast") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_ChainHaltError(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageChainHalted + state.ProposalID = 50 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { return 200, nil }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "upgrade proposal may have failed") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_VerifyError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageVerifyingResume + state.ProposalID = 60 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(ctx, dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil { + t.Fatalf("expected verify error") + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_UnknownStage(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = "Unknown" + uc := &ResumableExecuteUpgradeUseCase{logger: &testLogger{}} + + _, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "cannot resume from stage") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_InitializedProposeError(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageInitialized + + uc := &ResumableExecuteUpgradeUseCase{ + proposeUC: NewProposeUseCase( + &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return &ports.DevnetMetadata{Status: ports.StateStopped}, nil + }}, + &mockRPCClient{}, + &mockValidatorKeyLoader{}, + &testLogger{}, + ), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "devnet is not running") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil || state.Stage != ports.ResumableStageFailed { + t.Fatalf("expected failed transition and output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_WaitSuccessThenChainHaltError(t *testing.T) { + heights := []int64{100, 200} + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageWaitingForHeight + state.ProposalID = 70 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "upgrade proposal may have failed") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_SwitchError(t *testing.T) { + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageSwitchingBinary + state.ProposalID = 80 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + switchUC: NewSwitchBinaryUseCase( + &mockDevnetRepo{}, + &mockNodeRepo{}, + &mockProcessExecutor{}, + &mockBinaryCache{setActiveFunc: func(ref string) error { return errors.New("activate failed") }}, + &testLogger{}, + ), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "cache-ref", + }, state, time.Now()) + if err == nil || !strings.Contains(err.Error(), "failed to activate binary") { + t.Fatalf("unexpected error: %v", err) + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteWithGovResumable_PostExportFailureNonFatal(t *testing.T) { + heights := []int64{300, 301} + state := ports.NewUpgradeState("v2", "local", false) + state.Stage = ports.ResumableStageVerifyingResume + state.ProposalID = 90 + state.UpgradeHeight = 100 + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + exportUC: &mockExportUseCase{executeFunc: func(ctx context.Context, input interface{}) (interface{}, error) { + return nil, errors.New("post export failed") + }}, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + out, err := uc.executeWithGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + WithExport: true, + }, state, time.Now()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success { + t.Fatalf("expected success despite post-export failure") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovResumable_FromInitializedSuccess(t *testing.T) { + heights := []int64{500, 501} + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{getBlockHeightFunc: func(ctx context.Context) (int64, error) { + h := heights[0] + heights = heights[1:] + return h, nil + }}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + switchUC: NewSwitchBinaryUseCase(&mockDevnetRepo{}, &mockNodeRepo{}, &mockProcessExecutor{}, &mockBinaryCache{}, &testLogger{}), + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + stateDetector: &mockStateDetector{}, + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageInitialized + + out, err := uc.executeSkipGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + Mode: types.ExecutionModeLocal, + TargetBinary: "/tmp/new-stabled", + CacheRef: "cache-ref", + }, state, time.Now()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success { + t.Fatalf("expected successful skip-gov resumable output") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovResumable_VerifyError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + rpcClient: &mockRPCClient{}, + devnetRepo: &mockDevnetRepo{}, + logger: &testLogger{}, + }, + stateManager: &mockStateManager{}, + transitioner: NewStateTransitioner(), + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageVerifyingResume + + out, err := uc.executeSkipGovResumable(ctx, dto.ExecuteUpgradeInput{HomeDir: "/tmp/devnet"}, state, time.Now()) + if err == nil { + t.Fatalf("expected verify error") + } + if out == nil || out.Error == nil { + t.Fatalf("expected output error") + } +} + +func TestResumableExecuteUpgradeUseCase_ExecuteSkipGovResumable_CompletedWithTargetVersion(t *testing.T) { + uc := &ResumableExecuteUpgradeUseCase{ + executeUC: &ExecuteUpgradeUseCase{ + devnetRepo: &mockDevnetRepo{loadFunc: func(ctx context.Context, homeDir string) (*ports.DevnetMetadata, error) { + return nil, errors.New("load fail") + }}, + logger: &testLogger{}, + }, + stateManager: &mockStateManager{}, + logger: &testLogger{}, + } + + state := ports.NewUpgradeState("v2", "local", true) + state.Stage = ports.ResumableStageCompleted + + out, err := uc.executeSkipGovResumable(context.Background(), dto.ExecuteUpgradeInput{ + HomeDir: "/tmp/devnet", + TargetVersion: "v2.0.0", + }, state, time.Now()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !out.Success { + t.Fatalf("expected success output") + } +}