From 0c438d1622257f22e613b30f7417f3f6c53e59ec Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 15:11:12 +0100 Subject: [PATCH 1/9] make: add Makefile As a preparation to get rid of the clunky to handle gotest.sh, we add a Makefile that behaves in mostly the same way as does lnd's Makefile. --- .gitignore | 2 +- Makefile | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 Makefile diff --git a/.gitignore b/.gitignore index bf5349c45..52c3bddd1 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,4 @@ vendor/ breakpoints.txt # coverage output -profile.cov +coverage.txt diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..7f8bbc97b --- /dev/null +++ b/Makefile @@ -0,0 +1,115 @@ +PKG := github.com/lightninglabs/neutrino + +LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint +GOACC_PKG := github.com/ory/go-acc +GOIMPORTS_PKG := golang.org/x/tools/cmd/goimports + +GO_BIN := ${GOPATH}/bin +LINT_BIN := $(GO_BIN)/golangci-lint +GOACC_BIN := $(GO_BIN)/go-acc + +LINT_COMMIT := v1.18.0 +GOACC_COMMIT := ddc355013f90fea78d83d3a6c71f1d37ac07ecd5 + +DEPGET := cd /tmp && GO111MODULE=on go get -v +GOBUILD := GO111MODULE=on go build -v +GOINSTALL := GO111MODULE=on go install -v +GOTEST := GO111MODULE=on go test + +GOLIST := go list -deps $(PKG)/... | grep '$(PKG)' +GOLIST_COVER := $$(go list -deps $(PKG)/... | grep '$(PKG)') +GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") + +RM := rm -f +CP := cp +MAKE := make +XARGS := xargs -L 1 + +# Linting uses a lot of memory, so keep it under control by limiting the number +# of workers if requested. +ifneq ($(workers),) +LINT_WORKERS = --concurrency=$(workers) +endif + +LINT = $(LINT_BIN) run -v $(LINT_WORKERS) + +GREEN := "\\033[0;32m" +NC := "\\033[0m" +define print + echo $(GREEN)$1$(NC) +endef + +default: build + +all: build check + +# ============ +# DEPENDENCIES +# ============ + +$(LINT_BIN): + @$(call print, "Fetching linter") + $(DEPGET) $(LINT_PKG)@$(LINT_COMMIT) + +$(GOACC_BIN): + @$(call print, "Fetching go-acc") + $(DEPGET) $(GOACC_PKG)@$(GOACC_COMMIT) + +goimports: + @$(call print, "Installing goimports.") + $(DEPGET) $(GOIMPORTS_PKG) + +# ============ +# INSTALLATION +# ============ + +build: + @$(call print, "Compiling neutrino.") + $(GOBUILD) $(PKG)/... + +# ======= +# TESTING +# ======= + +check: unit + +unit: + @$(call print, "Running unit tests.") + $(GOLIST) | $(XARGS) env $(GOTEST) + +unit-cover: $(GOACC_BIN) + @$(call print, "Running unit coverage tests.") + $(GOACC_BIN) $(GOLIST_COVER) + +unit-race: + @$(call print, "Running unit race tests.") + env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOLIST) | $(XARGS) env $(GOTEST) -race + +# ========= +# UTILITIES +# ========= + +fmt: goimports + @$(call print, "Fixing imports.") + goimports -w $(GOFILES_NOVENDOR) + @$(call print, "Formatting source.") + gofmt -l -w -s $(GOFILES_NOVENDOR) + +lint: $(LINT_BIN) + @$(call print, "Linting source.") + $(LINT) + +clean: + @$(call print, "Cleaning source.$(NC)") + $(RM) coverage.txt + +.PHONY: all \ + default \ + build \ + check \ + unit \ + unit-cover \ + unit-race \ + fmt \ + lint \ + clean From e1caab74d4ae60b0ff379e94713b6bcbef84cd1e Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 15:18:11 +0100 Subject: [PATCH 2/9] lint: add new linter configuration We use the golangci-lint in lnd and it has quite a few more detections enabled than what's in the current gotest.sh script. We don't start with a given base commit on purpose but instead fix everything the linter finds in the following commits. --- .golangci.yml | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 .golangci.yml diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..5f055f8be --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,51 @@ +run: + # timeout for analysis + deadline: 10m + +linters-settings: + govet: + # Don't report about shadowed variables + check-shadowing: false + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + +linters: + enable-all: true + disable: + # Global variables are used in many places throughout the code base. + - gochecknoglobals + + # Some lines are over 80 characters on purpose and we don't want to make them + # even longer by marking them as 'nolint'. + - lll + + # We don't care (enough) about misaligned structs to lint that. + - maligned + + # We have long functions, especially in tests. Moving or renaming those would + # trigger funlen problems that we may not want to solve at that time. + - funlen + + # Disable for now as we haven't yet tuned the sensitivity to our codebase + # yet. Enabling by default for example, would also force new contributors to + # potentially extensively refactor code, when they want to smaller change to + # land. + - gocyclo + + # Instances of table driven tests that don't pre-allocate shouldn't trigger + # the linter. + - prealloc + + # Init functions are used by loggers throughout the codebase. + - gochecknoinits + +issues: + exclude-rules: + # Exclude gosec from running for tests so that tests with weak randomness + # (math/rand) will pass the linter. + - path: _test\.go + linters: + - gosec + - errcheck + - dupl From 098f60cb86c517424ec878bbf631af94c35838fb Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 16:32:25 +0100 Subject: [PATCH 3/9] neutrino+blockntfns: fix pointer to loop variable This is a common mistake in golang programs: You should never take the address of a loop variable to pass it into a function/method. The address will stay the same while its content will change with each iteration. This might even explain some of the flakes we're seeing with Neutrino in the lnd itests. --- batch_spend_reporter.go | 7 +++++-- blockntfns/manager.go | 6 +++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/batch_spend_reporter.go b/batch_spend_reporter.go index a51f99c88..88b660bdd 100644 --- a/batch_spend_reporter.go +++ b/batch_spend_reporter.go @@ -61,6 +61,8 @@ func (b *batchSpendReporter) NotifyUnspentAndUnfound() { log.Debugf("Finished batch, %d unspent outpoints", len(b.requests)) for outpoint, requests := range b.requests { + op := outpoint + // A nil SpendReport indicates the output was not found. tx, ok := b.initialTxns[outpoint] if !ok { @@ -68,7 +70,7 @@ func (b *batchSpendReporter) NotifyUnspentAndUnfound() { outpoint) } - b.notifyRequests(&outpoint, requests, tx, nil) + b.notifyRequests(&op, requests, tx, nil) } } @@ -78,7 +80,8 @@ func (b *batchSpendReporter) NotifyUnspentAndUnfound() { // return reporter.FailRemaining(err) func (b *batchSpendReporter) FailRemaining(err error) error { for outpoint, requests := range b.requests { - b.notifyRequests(&outpoint, requests, nil, err) + op := outpoint + b.notifyRequests(&op, requests, nil, err) } return err } diff --git a/blockntfns/manager.go b/blockntfns/manager.go index 24e2675f4..17c823905 100644 --- a/blockntfns/manager.go +++ b/blockntfns/manager.go @@ -139,10 +139,10 @@ func (m *SubscriptionManager) Stop() { var wg sync.WaitGroup wg.Add(len(m.subscribers)) for _, subscriber := range m.subscribers { - go func() { + go func(s *newSubscription) { defer wg.Done() - subscriber.cancel() - }() + s.cancel() + }(subscriber) } wg.Wait() From b6f2200afa3077ba4729914b6d192fedc967c28e Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 15:17:14 +0100 Subject: [PATCH 4/9] multi: use unused code The new linter configuration detected a lot of unused code that we're removing with this commit. --- bamboozle_unit_test.go | 51 ++++++++++----------------------------- blockmanager.go | 22 ----------------- blockntfns/log.go | 17 ------------- filterdb/db.go | 2 -- headerfs/index.go | 6 ----- log.go | 16 ------------ neutrino.go | 48 ------------------------------------ pushtx/log.go | 17 ------------- query.go | 22 ----------------- query/log.go | 17 ------------- query/workmanager_test.go | 7 ------ sync_test.go | 7 +----- 12 files changed, 14 insertions(+), 218 deletions(-) diff --git a/bamboozle_unit_test.go b/bamboozle_unit_test.go index e6903b348..d9d62ab8b 100644 --- a/bamboozle_unit_test.go +++ b/bamboozle_unit_test.go @@ -42,7 +42,6 @@ type checkCFHTestCase struct { type resolveFilterTestCase struct { name string - block *wire.MsgBlock banThreshold int peerFilters map[string]*gcs.Filter badPeers []string @@ -210,17 +209,6 @@ var ( decodeHashNoError("01234567890abcdeffedcba09f76543210"), }, } - headers4 = func() *wire.MsgCFHeaders { - cfh := &wire.MsgCFHeaders{ - FilterHashes: []*chainhash.Hash{ - decodeHashNoError("fedcba09f7654321001234567890abcdef"), - }, - } - filter, _ := builder.BuildBasicFilter(block, nil) - filterHash, _ := builder.GetFilterHash(filter) - cfh.FilterHashes = append(cfh.FilterHashes, &filterHash) - return cfh - }() cfCheckptTestCases = []*cfCheckptTestCase{ { @@ -383,8 +371,7 @@ var ( resolveFilterTestCases = []*resolveFilterTestCase{ { - name: "all bad 1", - block: block, + name: "all bad 1", peerFilters: map[string]*gcs.Filter{ "a": fakeFilter1, "b": fakeFilter1, @@ -393,8 +380,7 @@ var ( badPeers: []string{"a", "b"}, }, { - name: "all bad 2", - block: block, + name: "all bad 2", peerFilters: map[string]*gcs.Filter{ "a": fakeFilter2, "b": fakeFilter2, @@ -403,8 +389,7 @@ var ( badPeers: []string{"a", "b"}, }, { - name: "all bad 3", - block: block, + name: "all bad 3", peerFilters: map[string]*gcs.Filter{ "a": fakeFilter2, "b": fakeFilter2, @@ -413,8 +398,7 @@ var ( badPeers: []string{"a", "b"}, }, { - name: "all bad 4", - block: block, + name: "all bad 4", peerFilters: map[string]*gcs.Filter{ "a": fakeFilter1, "b": fakeFilter2, @@ -423,8 +407,7 @@ var ( badPeers: []string{"a", "b"}, }, { - name: "all bad 5", - block: block, + name: "all bad 5", peerFilters: map[string]*gcs.Filter{ "a": fakeFilter2, "b": fakeFilter1, @@ -433,8 +416,7 @@ var ( badPeers: []string{"a", "b"}, }, { - name: "one good", - block: block, + name: "one good", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": fakeFilter1, @@ -444,8 +426,7 @@ var ( badPeers: []string{"b", "c"}, }, { - name: "all good", - block: block, + name: "all good", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": correctFilter, @@ -456,8 +437,7 @@ var ( { // One peer is serving a filter tha lacks an element, // we should immediately notice this and ban it. - name: "filter missing element", - block: block, + name: "filter missing element", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": correctFilter, @@ -470,8 +450,7 @@ var ( // One peer is serving the "old-old" filter which // contains all OP_RETURN output, we expect this peer // to be banned first. - name: "old old peer", - block: block, + name: "old old peer", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": oldFilter, @@ -484,8 +463,7 @@ var ( // One peer is serving the "old" filter, which contains // non-push OP_RETURNS. We expect this peer to be // banned. - name: "old peer", - block: block, + name: "old peer", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": oldFilter, @@ -497,8 +475,7 @@ var ( { // We should go with the majority filter in case we // cannot determine who is serving an invalid one. - name: "majority filter", - block: block, + name: "majority filter", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": correctFilter, @@ -510,8 +487,7 @@ var ( { // We should go with the majority filter in case we // cannot determine who is serving an invalid one. - name: "majority filter 2", - block: block, + name: "majority filter 2", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": correctFilter, @@ -523,8 +499,7 @@ var ( { // If we need at least 3 peers to consider a filter // consistent, we shuold fail. - name: "majority filter 3", - block: block, + name: "majority filter 3", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": correctFilter, diff --git a/blockmanager.go b/blockmanager.go index 057f3a8b1..9ea30b491 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -49,21 +49,6 @@ const ( maxCFCheckptsPerQuery = wire.MaxCFHeadersPerMsg / wire.CFCheckptInterval ) -// filterStoreLookup -type filterStoreLookup func(*ChainService) *headerfs.FilterHeaderStore - -var ( - // filterTypes is a map of filter types to synchronize to a lookup - // function for the service's store for that filter type. - filterTypes = map[wire.FilterType]filterStoreLookup{ - wire.GCSFilterRegular: func( - s *ChainService) *headerfs.FilterHeaderStore { - - return s.RegFilterHeaders - }, - } -) - // zeroHash is the zero value hash (all zeros). It is defined as a convenience. var zeroHash chainhash.Hash @@ -91,13 +76,6 @@ type donePeerMsg struct { peer *ServerPeer } -// txMsg packages a bitcoin tx message and the peer it came from together -// so the block handler has access to that information. -type txMsg struct { - tx *btcutil.Tx - peer *ServerPeer -} - // blockManagerCfg holds options and dependencies needed by the blockManager // during operation. type blockManagerCfg struct { diff --git a/blockntfns/log.go b/blockntfns/log.go index 0e10441de..64c3ef205 100644 --- a/blockntfns/log.go +++ b/blockntfns/log.go @@ -24,20 +24,3 @@ func DisableLog() { func UseLogger(logger btclog.Logger) { log = logger } - -// LogClosure is a closure that can be printed with %v to be used to -// generate expensive-to-create data for a detailed log level and avoid doing -// the work if the data isn't printed. -type logClosure func() string - -// String invokes the log closure and returns the results string. -func (c logClosure) String() string { - return c() -} - -// newLogClosure returns a new closure over the passed function which allows -// it to be used as a parameter in a logging function that is only invoked when -// the logging level is such that the message will actually be logged. -func newLogClosure(c func() string) logClosure { - return logClosure(c) -} diff --git a/filterdb/db.go b/filterdb/db.go index 4f7d312cb..c0bcc126b 100644 --- a/filterdb/db.go +++ b/filterdb/db.go @@ -60,8 +60,6 @@ type FilterDatabase interface { // backed by boltdb. type FilterStore struct { db walletdb.DB - - chainParams chaincfg.Params } // A compile-time check to ensure the FilterStore adheres to the FilterDatabase diff --git a/headerfs/index.go b/headerfs/index.go index 3d71bb6bb..f6a5c0428 100644 --- a/headerfs/index.go +++ b/headerfs/index.go @@ -26,12 +26,6 @@ var ( // current block hash of the best known chain that the headers for // regular filter are synced to. regFilterTip = []byte("regular") - - // extFilterTip is the key which tracks the "tip" of the extended - // compact filter header chain. The value of this key will be the - // current block hash of the best known chain that the headers for - // extended filter are synced to. - extFilterTip = []byte("ext") ) var ( diff --git a/log.go b/log.go index 1fc784570..748a50e46 100644 --- a/log.go +++ b/log.go @@ -42,19 +42,3 @@ func UseLogger(logger btclog.Logger) { connmgr.UseLogger(logger) query.UseLogger(logger) } - -// logClosure is used to provide a closure over expensive logging operations so -// don't have to be performed when the logging level doesn't warrant it. -type logClosure func() string - -// String invokes the underlying function and returns the result. -func (c logClosure) String() string { - return c() -} - -// newLogClosure returns a new closure over a function that returns a string -// which itself provides a Stringer interface so that it can be used with the -// logging system. -func newLogClosure(c func() string) logClosure { - return logClosure(c) -} diff --git a/neutrino.go b/neutrino.go index d217e91eb..fb26133ef 100644 --- a/neutrino.go +++ b/neutrino.go @@ -157,10 +157,7 @@ type ServerPeer struct { connReq *connmgr.ConnReq server *ChainService persistent bool - continueHash *chainhash.Hash - requestQueue []*wire.InvVect knownAddresses map[string]struct{} - banScore connmgr.DynamicBanScore quit chan struct{} // The following map of subcribers is used to subscribe to messages @@ -208,51 +205,6 @@ func (sp *ServerPeer) addKnownAddresses(addresses []*wire.NetAddress) { } } -// addressKnown true if the given address is already known to the peer. -func (sp *ServerPeer) addressKnown(na *wire.NetAddress) bool { - _, exists := sp.knownAddresses[addrmgr.NetAddressKey(na)] - return exists -} - -// addBanScore increases the persistent and decaying ban score fields by the -// values passed as parameters. If the resulting score exceeds half of the ban -// threshold, a warning is logged including the reason provided. Further, if -// the score is above the ban threshold, the peer will be banned and -// disconnected. -func (sp *ServerPeer) addBanScore(persistent, transient uint32, reason string) { - // No warning is logged and no score is calculated if banning is - // disabled. - warnThreshold := BanThreshold >> 1 - if transient == 0 && persistent == 0 { - // The score is not being increased, but a warning message is - // still logged if the score is above the warn threshold. - score := sp.banScore.Int() - if score > warnThreshold { - log.Warnf("Misbehaving peer %s: %s -- ban score is "+ - "%d, it was not increased this time", sp, - reason, score) - } - return - } - - score := sp.banScore.Increase(persistent, transient) - if score > warnThreshold { - log.Warnf("Misbehaving peer %s: %s -- ban score increased to %d", - sp, reason, score) - - if score > BanThreshold { - peerAddr := sp.Addr() - err := sp.server.BanPeer( - peerAddr, banman.ExceededBanThreshold, - ) - if err != nil { - log.Errorf("Unable to ban peer %v: %v", - peerAddr, err) - } - } - } -} - // OnVerAck is invoked when a peer receives a verack bitcoin message and is used // to kick start communication with them. func (sp *ServerPeer) OnVerAck(_ *peer.Peer, msg *wire.MsgVerAck) { diff --git a/pushtx/log.go b/pushtx/log.go index 030b5e374..c3a378622 100644 --- a/pushtx/log.go +++ b/pushtx/log.go @@ -24,20 +24,3 @@ func DisableLog() { func UseLogger(logger btclog.Logger) { log = logger } - -// LogClosure is a closure that can be printed with %v to be used to -// generate expensive-to-create data for a detailed log level and avoid doing -// the work if the data isn't printed. -type logClosure func() string - -// String invokes the log closure and returns the results string. -func (c logClosure) String() string { - return c() -} - -// newLogClosure returns a new closure over the passed function which allows -// it to be used as a parameter in a logging function that is only invoked when -// the logging level is such that the message will actually be logged. -func newLogClosure(c func() string) logClosure { - return logClosure(c) -} diff --git a/query.go b/query.go index 08907b5f8..6722eafd0 100644 --- a/query.go +++ b/query.go @@ -197,28 +197,6 @@ func MaxBatchSize(maxSize int64) QueryOption { } } -// queryState is an atomically updated per-query state for each query in a -// batch. -// -// State transitions are: -// -// * queryWaitSubmit->queryWaitResponse - send query to peer -// * queryWaitResponse->queryWaitSubmit - query timeout with no acceptable -// response -// * queryWaitResponse->queryAnswered - acceptable response to query received -type queryState uint32 - -const ( - // Waiting to be submitted to a peer. - queryWaitSubmit queryState = iota - - // Submitted to a peer, waiting for reply. - queryWaitResponse - - // Valid reply received. - queryAnswered -) - // We provide 3 kinds of queries: // // * queryAllPeers allows a single query to be broadcast to all peers, and diff --git a/query/log.go b/query/log.go index a907d6e8e..9b99795f7 100644 --- a/query/log.go +++ b/query/log.go @@ -24,20 +24,3 @@ func DisableLog() { func UseLogger(logger btclog.Logger) { log = logger } - -// LogClosure is a closure that can be printed with %v to be used to -// generate expensive-to-create data for a detailed log level and avoid doing -// the work if the data isn't printed. -type logClosure func() string - -// String invokes the log closure and returns the results string. -func (c logClosure) String() string { - return c() -} - -// newLogClosure returns a new closure over the passed function which allows -// it to be used as a parameter in a logging function that is only invoked when -// the logging level is such that the message will actually be logged. -func newLogClosure(c func() string) logClosure { - return logClosure(c) -} diff --git a/query/workmanager_test.go b/query/workmanager_test.go index 035d3e515..670f9a67a 100644 --- a/query/workmanager_test.go +++ b/query/workmanager_test.go @@ -15,10 +15,6 @@ type mockWorker struct { var _ Worker = (*mockWorker)(nil) -func (m *mockWorker) exited() <-chan struct{} { - return nil -} - func (m *mockWorker) NewJob() chan<- *queryJob { return m.nextJob } @@ -59,9 +55,6 @@ func (p *mockPeerRanking) Order(peers []string) { }) } -func (p *mockPeerRanking) addPeer(peer string) { -} - func (p *mockPeerRanking) Punish(peer string) { } diff --git a/sync_test.go b/sync_test.go index a24ee2161..b7449feb0 100644 --- a/sync_test.go +++ b/sync_test.go @@ -251,10 +251,6 @@ func newSecSource(params *chaincfg.Params) *secSource { } } -type testLogger struct { - t *testing.T -} - type neutrinoHarness struct { h1, h2, h3 *rpctest.Harness svc *neutrino.ChainService @@ -305,7 +301,7 @@ var ( secSrc *secSource addr1, addr2, addr3 btcutil.Address script1, script2, script3 []byte - tx1, tx2, tx3 *wire.MsgTx + tx1, tx2 *wire.MsgTx ourOutPoint wire.OutPoint ) @@ -995,7 +991,6 @@ func testRandomBlocks(harness *neutrinoHarness, t *testing.T) { if lastErr != nil { t.Fatal(lastErr) } - return } func TestNeutrinoSync(t *testing.T) { From 8e249845b49f5ec539d1bb22562d9e81995cafff Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 16:04:52 +0100 Subject: [PATCH 5/9] multi: fix multiple linter issues We want to start with a clean slate when it comes to the linter so we fix all issues it brings up. Unfortunately there are some false positives when it comes to struct members when only used as an embedded sub struct. --- bamboozle_unit_test.go | 3 +++ banman/codec.go | 4 +-- banman/codec_test.go | 1 + banman/store_test.go | 4 +-- banman/util_test.go | 1 + batch_spend_reporter.go | 2 +- blockmanager.go | 34 +++++++++++++----------- blockmanager_test.go | 19 +++++++++----- blockntfns/manager_test.go | 8 +++--- cache/cache_test.go | 6 +++-- cache/lru/lru.go | 2 +- cache/lru/lru_test.go | 14 +++++----- chainsync/filtercontrol_test.go | 2 +- headerfs/file.go | 4 +-- headerfs/index.go | 2 +- headerfs/index_test.go | 2 +- headerfs/store.go | 8 +++--- headerfs/store_test.go | 1 + headerlist/bounded_header_list.go | 2 +- headerlist/bounded_header_list_test.go | 3 --- neutrino.go | 6 ++--- notifications.go | 2 +- pushtx/broadcaster.go | 10 +++---- query.go | 24 ++++++++++------- query/interface.go | 4 +-- query/worker.go | 6 ++--- query/workmanager.go | 2 +- query/workmanager_test.go | 16 +++++------- query_test.go | 8 +++--- rescan.go | 36 +++++++++++++------------- rescan_test.go | 4 +-- sync_test.go | 13 +++++----- utxoscanner_test.go | 4 +-- 33 files changed, 138 insertions(+), 119 deletions(-) diff --git a/bamboozle_unit_test.go b/bamboozle_unit_test.go index d9d62ab8b..c3373fbde 100644 --- a/bamboozle_unit_test.go +++ b/bamboozle_unit_test.go @@ -631,6 +631,7 @@ func TestCheckCFCheckptSanity(t *testing.T) { t.Parallel() for _, testCase := range cfCheckptTestCases { + testCase := testCase t.Run(testCase.name, func(t *testing.T) { runCheckCFCheckptSanityTestCase(t, testCase) }) @@ -641,6 +642,7 @@ func TestCheckForCFHeadersMismatch(t *testing.T) { t.Parallel() for _, testCase := range checkCFHTestCases { + testCase := testCase t.Run(testCase.name, func(t *testing.T) { mismatch := checkForCFHeaderMismatch( testCase.headers, testCase.idx, @@ -677,6 +679,7 @@ func TestResolveFilterMismatchFromBlock(t *testing.T) { } for _, testCase := range resolveFilterTestCases { + testCase := testCase t.Run(testCase.name, func(t *testing.T) { badPeers, err := resolveFilterMismatchFromBlock( block, wire.GCSFilterRegular, testCase.peerFilters, diff --git a/banman/codec.go b/banman/codec.go index e3aaa7bb9..f7113f3c8 100644 --- a/banman/codec.go +++ b/banman/codec.go @@ -72,11 +72,11 @@ func decodeIPNet(r io.Reader) (*net.IPNet, error) { // Once we have the type and its corresponding length, attempt to read // it and its mask. ip := make([]byte, ipLen) - if _, err := r.Read(ip[:]); err != nil { + if _, err := r.Read(ip); err != nil { return nil, err } mask := make([]byte, ipLen) - if _, err := r.Read(mask[:]); err != nil { + if _, err := r.Read(mask); err != nil { return nil, err } return &net.IPNet{IP: ip, Mask: mask}, nil diff --git a/banman/codec_test.go b/banman/codec_test.go index 5f306888a..eb2fd046a 100644 --- a/banman/codec_test.go +++ b/banman/codec_test.go @@ -66,6 +66,7 @@ func TestIPNetSerialization(t *testing.T) { } for _, testCase := range testCases { + testCase := testCase success := t.Run(testCase.name, func(t *testing.T) { // Serialize the IP network and deserialize it back. // We'll do this to ensure we are properly serializing diff --git a/banman/store_test.go b/banman/store_test.go index 81dfe6466..e4b5f8c40 100644 --- a/banman/store_test.go +++ b/banman/store_test.go @@ -80,8 +80,8 @@ func TestBanStore(t *testing.T) { t.Fatalf("expected ban reason \"%v\", got \"%v\"", reason, banStatus.Reason) } - banDuration := banStatus.Expiration.Sub(time.Now()) - if banStatus.Expiration.Sub(time.Now()) > duration { + banDuration := time.Until(banStatus.Expiration) + if banDuration > duration { t.Fatalf("expected ban duration to be within %v, got %v", duration, banDuration) } diff --git a/banman/util_test.go b/banman/util_test.go index a3f0c3631..ebcb880a1 100644 --- a/banman/util_test.go +++ b/banman/util_test.go @@ -56,6 +56,7 @@ func TestParseIPNet(t *testing.T) { } for _, testCase := range testCases { + testCase := testCase success := t.Run(testCase.name, func(t *testing.T) { // Parse the IP network from each test's address and // mask. diff --git a/batch_spend_reporter.go b/batch_spend_reporter.go index 88b660bdd..5cb955912 100644 --- a/batch_spend_reporter.go +++ b/batch_spend_reporter.go @@ -27,7 +27,7 @@ type batchSpendReporter struct { outpoints map[wire.OutPoint][]byte // filterEntries holds the current set of watched outpoint, and is - // applied to cfilters to guage whether we should download the block. + // applied to cfilters to gauge whether we should download the block. // // NOTE: This watchlist is updated during each call to ProcessBlock. filterEntries [][]byte diff --git a/blockmanager.go b/blockmanager.go index 9ea30b491..0713546fa 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -29,7 +29,7 @@ import ( const ( // maxTimeOffset is the maximum duration a block time is allowed to be - // ahead of the curent time. This is currently 2 hours. + // ahead of the current time. This is currently 2 hours. maxTimeOffset = 2 * time.Hour // numMaxMemHeaders is the max number of headers to store in memory for @@ -402,7 +402,7 @@ func (b *blockManager) handleNewPeerMsg(peers *list.List, sp *ServerPeer) { return } stopHash := &zeroHash - sp.PushGetHeadersMsg(locator, stopHash) + _ = sp.PushGetHeadersMsg(locator, stopHash) } // Start syncing by choosing the best candidate if needed. @@ -802,7 +802,7 @@ func (b *blockManager) getUncheckpointedCFHeaders( // set of peers. pristineHeaders, ok := headers[key] if !ok { - return fmt.Errorf("All peers served bogus headers! Retrying " + + return fmt.Errorf("all peers served bogus headers, retrying " + "with new set") } @@ -985,15 +985,14 @@ func (b *blockManager) getCheckpointedCFHeaders(checkpoints []*chainhash.Hash, // maxCFCheckptsPerQuery unless we don't have enough checkpoints // to do so. In that case, our query will consist of whatever is // left. - startHeightRange := uint32( - currentInterval*wire.CFCheckptInterval, - ) + 1 + startHeightRange := + (currentInterval * wire.CFCheckptInterval) + 1 nextInterval := currentInterval + maxCFCheckptsPerQuery if nextInterval > uint32(len(checkpoints)) { nextInterval = uint32(len(checkpoints)) } - endHeightRange := uint32(nextInterval * wire.CFCheckptInterval) + endHeightRange := nextInterval * wire.CFCheckptInterval log.Tracef("Checkpointed cfheaders request start_range=%v, "+ "end_range=%v", startHeightRange, endHeightRange) @@ -1012,7 +1011,7 @@ func (b *blockManager) getCheckpointedCFHeaders(checkpoints []*chainhash.Hash, // Once we have the stop hash, we can construct the query // message itself. queryMsg := wire.NewMsgGetCFHeaders( - fType, uint32(startHeightRange), &stopHash, + fType, startHeightRange, &stopHash, ) // We'll mark that the ith interval is queried by this message, @@ -1070,7 +1069,7 @@ func (b *blockManager) getCheckpointedCFHeaders(checkpoints []*chainhash.Hash, return } - // The query did finish succesfully, but continue to + // The query did finish successfully, but continue to // allow picking up the last header sent on the // headerChan. continue @@ -1191,7 +1190,7 @@ func (b *blockManager) writeCFHeadersMsg(msg *wire.MsgCFHeaders, } if *tip != msg.PrevFilterHeader { return nil, 0, fmt.Errorf("attempt to write cfheaders out of "+ - "order! Tip=%v (height=%v), prev_hash=%v.", *tip, + "order, tip=%v (height=%v), prev_hash=%v", *tip, tipHeight, msg.PrevFilterHeader) } @@ -1785,6 +1784,9 @@ func (b *blockManager) getCFHeadersForAllPeers(height uint32, // or at the end of the maximum-size response message, whichever is // larger. stopHeader, stopHeight, err := b.cfg.BlockHeaders.ChainTip() + if err != nil { + return nil, 0 + } if stopHeight-height >= wire.MaxCFHeadersPerMsg { stopHeader, err = b.cfg.BlockHeaders.FetchHeaderByHeight( height + wire.MaxCFHeadersPerMsg - 1, @@ -1808,8 +1810,9 @@ func (b *blockManager) getCFHeadersForAllPeers(height uint32, msg, func(sp *ServerPeer, resp wire.Message, quit chan<- struct{}, peerQuit chan<- struct{}) { - switch m := resp.(type) { - case *wire.MsgCFHeaders: + + m, isHeaders := resp.(*wire.MsgCFHeaders) + if isHeaders { if m.StopHash == stopHash && m.FilterType == fType && len(m.FilterHashes) == numHeaders { @@ -1894,8 +1897,9 @@ func (b *blockManager) getCheckpts(lastHash *chainhash.Hash, getCheckptMsg, func(sp *ServerPeer, resp wire.Message, quit chan<- struct{}, peerQuit chan<- struct{}) { - switch m := resp.(type) { - case *wire.MsgCFCheckpt: + + m, isCheckpoint := resp.(*wire.MsgCFCheckpt) + if isCheckpoint { if m.FilterType == fType && m.StopHash == *lastHash { checkpoints[sp.Addr()] = m.FilterHeaders @@ -2164,7 +2168,7 @@ func (b *blockManager) startSync(peers *list.List) { // With our stop hash selected, we'll kick off the sync from // this peer with an initial GetHeaders message. - b.SyncPeer().PushGetHeadersMsg(locator, stopHash) + _ = b.SyncPeer().PushGetHeadersMsg(locator, stopHash) } else { log.Warnf("No sync peer candidates available") } diff --git a/blockmanager_test.go b/blockmanager_test.go index aad1ce990..9833d4766 100644 --- a/blockmanager_test.go +++ b/blockmanager_test.go @@ -120,7 +120,7 @@ type headers struct { // generateHeaders generates block headers, filter header and hashes, and // checkpoints from the given genesis. The onCheckpoint method will be called // with the current cf header on each checkpoint to modify the derivation of -// the next interval +// the next interval. func generateHeaders(genesisBlockHeader *wire.BlockHeader, genesisFilterHeader *chainhash.Hash, onCheckpoint func(*chainhash.Hash)) (*headers, error) { @@ -444,8 +444,9 @@ func TestBlockManagerInitialInterval(t *testing.T) { for i := startHeight; i <= maxHeight; i++ { ntfn := <-bm.blockNtfnChan if _, ok := ntfn.(*blockntfns.Connected); !ok { - t.Fatal("expected block connected " + + t.Error("expected block connected " + "notification") + return } } }() @@ -545,6 +546,7 @@ func TestBlockManagerInvalidInterval(t *testing.T) { } for _, test := range testCases { + test := test bm, hdrStore, cfStore, cleanUp, err := setupBlockManager() if err != nil { t.Fatalf("unable to set up ChainService: %v", err) @@ -664,16 +666,18 @@ func TestBlockManagerInvalidInterval(t *testing.T) { ) if i == test.firstInvalid { if progress.Finished { - t.Fatalf("expected interval "+ + t.Errorf("expected interval "+ "%d to be invalid", i) + return } errChan <- fmt.Errorf("invalid interval") break } if !progress.Finished { - t.Fatalf("expected interval %d to be "+ + t.Errorf("expected interval %d to be "+ "valid", i) + return } } @@ -693,8 +697,9 @@ func TestBlockManagerInvalidInterval(t *testing.T) { for i := startHeight; i <= maxHeight; i++ { ntfn := <-bm.blockNtfnChan if _, ok := ntfn.(*blockntfns.Connected); !ok { - t.Fatal("expected block connected " + + t.Error("expected block connected " + "notification") + return } } }() @@ -789,7 +794,7 @@ func TestBlockManagerDetectBadPeers(t *testing.T) { peers = []string{"good1:1", "good2:1", "bad:1", "good3:1"} expBad = map[string]struct{}{ - "bad:1": struct{}{}, + "bad:1": {}, } ) @@ -875,7 +880,7 @@ func TestBlockManagerDetectBadPeers(t *testing.T) { } for i := uint32(0); i < 2*badIndex; i++ { - msg.AddCFHash(&filterHash) + _ = msg.AddCFHash(&filterHash) } headers := make(map[string]*wire.MsgCFHeaders) diff --git a/blockntfns/manager_test.go b/blockntfns/manager_test.go index 4b2882da8..7a3cd2855 100644 --- a/blockntfns/manager_test.go +++ b/blockntfns/manager_test.go @@ -217,7 +217,7 @@ func TestManagerHistoricalBacklog(t *testing.T) { return nil, 0, errors.New("") } - sub, err := subMgr.NewSubscription(0) + _, err := subMgr.NewSubscription(0) if err == nil { t.Fatal("expected registration to fail due to not delivering " + "backlog") @@ -228,13 +228,13 @@ func TestManagerHistoricalBacklog(t *testing.T) { // NotificationsSinceHeight should then return notifications for blocks // 11-20. const chainTip uint32 = 20 - subCurrentHeight := uint32(chainTip / 2) + subCurrentHeight := chainTip / 2 numBacklog := chainTip - subCurrentHeight blockSource.blocksSinceHeight = func(uint32) ([]blockntfns.BlockNtfn, uint32, error) { blocks := make([]blockntfns.BlockNtfn, 0, numBacklog) - for i := uint32(subCurrentHeight + 1); i <= chainTip; i++ { + for i := subCurrentHeight + 1; i <= chainTip; i++ { blocks = append(blocks, blockntfns.NewBlockConnected( emptyHeader, i, )) @@ -244,7 +244,7 @@ func TestManagerHistoricalBacklog(t *testing.T) { } // Register a new client with the expected current height. - sub, err = subMgr.NewSubscription(subCurrentHeight) + sub, err := subMgr.NewSubscription(subCurrentHeight) if err != nil { t.Fatalf("unable to register new subscription: %v", err) } diff --git a/cache/cache_test.go b/cache/cache_test.go index d2bbe6183..48dd5c175 100644 --- a/cache/cache_test.go +++ b/cache/cache_test.go @@ -57,7 +57,7 @@ func TestBlockFilterCaches(t *testing.T) { // Put the generated filter in the filter caches. cacheKey := cache.FilterCacheKey{blockHash, filterType} for _, c := range filterCaches { - c.Put(cacheKey, &cache.CacheableFilter{Filter: filter}) + _, _ = c.Put(cacheKey, &cache.CacheableFilter{Filter: filter}) } msgBlock := &wire.MsgBlock{} @@ -70,13 +70,15 @@ func TestBlockFilterCaches(t *testing.T) { wire.InvTypeWitnessBlock, &blockHash, ) for _, c := range blockCaches { - c.Put(*blockKey, &cache.CacheableBlock{block}) + _, _ = c.Put(*blockKey, &cache.CacheableBlock{block}) } } // Now go through the list of block hashes, and make sure we can // retrieve all elements from the caches. for i, blockHash := range blockHashes { + blockHash := blockHash + // Check filter caches. cacheKey := cache.FilterCacheKey{blockHash, filterType} for _, c := range filterCaches { diff --git a/cache/lru/lru.go b/cache/lru/lru.go index a111b09f4..38d7b982f 100644 --- a/cache/lru/lru.go +++ b/cache/lru/lru.go @@ -25,7 +25,7 @@ type Cache struct { // of elements or a number of bytes, decided by the cache.Value's Size. capacity uint64 - // size represents the size of all the elements currenty in the cache. + // size represents the size of all the elements currently in the cache. size uint64 // ll is a doubly linked list which keeps track of recency of used diff --git a/cache/lru/lru_test.go b/cache/lru/lru_test.go index 3b5a2b959..b9796eb90 100644 --- a/cache/lru/lru_test.go +++ b/cache/lru/lru_test.go @@ -8,7 +8,7 @@ import ( "github.com/lightninglabs/neutrino/cache" ) -func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { // nolint:unparam if a == b { return } @@ -220,7 +220,7 @@ func TestConcurrencySimple(t *testing.T) { defer wg.Done() _, err := c.Put(i, &sizeable{value: i, size: 1}) if err != nil { - t.Fatal(err) + t.Error(err) } }(i) } @@ -231,7 +231,7 @@ func TestConcurrencySimple(t *testing.T) { defer wg.Done() _, err := c.Get(i) if err != nil && err != cache.ErrElementNotFound { - t.Fatal(err) + t.Error(err) } }(i) } @@ -254,7 +254,7 @@ func TestConcurrencySmallCache(t *testing.T) { defer wg.Done() _, err := c.Put(i, &sizeable{value: i, size: 1}) if err != nil { - t.Fatal(err) + t.Error(err) } }(i) } @@ -265,7 +265,7 @@ func TestConcurrencySmallCache(t *testing.T) { defer wg.Done() _, err := c.Get(i) if err != nil && err != cache.ErrElementNotFound { - t.Fatal(err) + t.Error(err) } }(i) } @@ -288,7 +288,7 @@ func TestConcurrencyBigCache(t *testing.T) { defer wg.Done() _, err := c.Put(i, &sizeable{value: i, size: 1}) if err != nil { - t.Fatal(err) + t.Error(err) } }(i) } @@ -299,7 +299,7 @@ func TestConcurrencyBigCache(t *testing.T) { defer wg.Done() _, err := c.Get(i) if err != nil && err != cache.ErrElementNotFound { - t.Fatal(err) + t.Error(err) } }(i) } diff --git a/chainsync/filtercontrol_test.go b/chainsync/filtercontrol_test.go index 4a090087a..55f7efbaf 100644 --- a/chainsync/filtercontrol_test.go +++ b/chainsync/filtercontrol_test.go @@ -17,7 +17,7 @@ func TestControlCFHeader(t *testing.T) { "4a242283a406a7c089f671bb8df7671e5d5e9ba577cea1047d30a7f4919df193", ) filterHeaderCheckpoints = map[wire.BitcoinNet]map[uint32]*chainhash.Hash{ - chaincfg.MainNetParams.Net: map[uint32]*chainhash.Hash{ + chaincfg.MainNetParams.Net: { height: header, }, } diff --git a/headerfs/file.go b/headerfs/file.go index 1b979d9a2..0a075ce42 100644 --- a/headerfs/file.go +++ b/headerfs/file.go @@ -49,11 +49,11 @@ func (h *headerStore) readRaw(seekDist uint64) ([]byte, error) { // for that number of bytes, and read directly from the file into the // buffer. rawHeader := make([]byte, headerSize) - if _, err := h.file.ReadAt(rawHeader[:], int64(seekDist)); err != nil { + if _, err := h.file.ReadAt(rawHeader, int64(seekDist)); err != nil { return nil, &ErrHeaderNotFound{err} } - return rawHeader[:], nil + return rawHeader, nil } // readHeaderRange will attempt to fetch a series of block headers within the diff --git a/headerfs/index.go b/headerfs/index.go index f6a5c0428..b0b8f4b71 100644 --- a/headerfs/index.go +++ b/headerfs/index.go @@ -142,7 +142,7 @@ func (h headerBatch) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -// addHeaders writes a batch of header entries in a single atomic batch +// addHeaders writes a batch of header entries in a single atomic batch. func (h *headerIndex) addHeaders(batch headerBatch) error { // If we're writing a 0-length batch, make no changes and return. if len(batch) == 0 { diff --git a/headerfs/index_test.go b/headerfs/index_test.go index 9ab86d9e2..ad9165e58 100644 --- a/headerfs/index_test.go +++ b/headerfs/index_test.go @@ -79,7 +79,7 @@ func TestAddHeadersIndexRetrieve(t *testing.T) { for i, headerEntry := range headerEntries { height, err := hIndex.heightFromHash(&headerEntry.hash) if err != nil { - t.Fatalf("unable to retreive height(%v): %v", i, err) + t.Fatalf("unable to retrieve height(%v): %v", i, err) } if height != headerEntry.height { t.Fatalf("height doesn't match: expected %v, got %v", diff --git a/headerfs/store.go b/headerfs/store.go index 18c9f523d..ad85dfe46 100644 --- a/headerfs/store.go +++ b/headerfs/store.go @@ -85,14 +85,14 @@ var headerBufPool = sync.Pool{ } // headerStore combines a on-disk set of headers within a flat file in addition -// to a databse which indexes that flat file. Together, these two abstractions +// to a database which indexes that flat file. Together, these two abstractions // can be used in order to build an indexed header store for any type of // "header" as it deals only with raw bytes, and leaves it to a higher layer to // interpret those raw bytes accordingly. // -// TODO(roasbeef): quickcheck coverage +// TODO(roasbeef): quickcheck coverage. type headerStore struct { - mtx sync.RWMutex + mtx sync.RWMutex // nolint:structcheck // false positive because used as embedded struct only fileName string @@ -429,7 +429,7 @@ func (h *blockHeaderStore) blockLocatorFromHash(hash *chainhash.Hash) ( locator = append(locator, hash) // If hash isn't found in DB or this is the genesis block, return the - // locator as is + // locator as is. height, err := h.heightFromHash(hash) if height == 0 || err != nil { return locator, nil diff --git a/headerfs/store_test.go b/headerfs/store_test.go index 2cfd83f04..687a165ef 100644 --- a/headerfs/store_test.go +++ b/headerfs/store_test.go @@ -599,6 +599,7 @@ func TestFilterHeaderStateAssertion(t *testing.T) { } for _, testCase := range testCases { + testCase := testCase success := t.Run(testCase.name, func(t *testing.T) { // We'll start the test by setting up our required // dependencies. diff --git a/headerlist/bounded_header_list.go b/headerlist/bounded_header_list.go index 8a31f7647..5d2a59ae8 100644 --- a/headerlist/bounded_header_list.go +++ b/headerlist/bounded_header_list.go @@ -1,6 +1,6 @@ package headerlist -// BoundedMemoryChain is an implemetnation of the headerlist.Chain interface +// BoundedMemoryChain is an implementation of the headerlist.Chain interface // which has a bounded size. The chain will be stored purely in memory. This is // useful for enforcing that only the past N headers are stored in memory, or // even as the primary header store. If an element inserted to the end of the diff --git a/headerlist/bounded_header_list_test.go b/headerlist/bounded_header_list_test.go index 1d3229b8d..1c55864c1 100644 --- a/headerlist/bounded_header_list_test.go +++ b/headerlist/bounded_header_list_test.go @@ -172,7 +172,6 @@ func TestBoundedMemoryChainPrevIteration(t *testing.T) { // We'll now walk backwards with the iterNode until we run into the nil // pointer. - numIters := 0 for iterNode != nil { nextNode := iterNode iterNode = iterNode.Prev() @@ -182,7 +181,5 @@ func TestBoundedMemoryChainPrevIteration(t *testing.T) { spew.Sdump(nextNode.Prev()), spew.Sdump(iterNode)) } - - numIters++ } } diff --git a/neutrino.go b/neutrino.go index fb26133ef..fa56fd95d 100644 --- a/neutrino.go +++ b/neutrino.go @@ -485,7 +485,7 @@ type Config struct { DataDir string // Database is an *open* database instance that we'll use to storm - // indexes of teh chain. + // indexes of the chain. Database walletdb.DB // ChainParams is the chain that we're running on. @@ -558,7 +558,7 @@ type peerSubscription struct { cancel <-chan struct{} } -// ChainService is instantiated with functional options +// ChainService is instantiated with functional options. type ChainService struct { // The following variables must only be used atomically. // Putting the uint64s first makes them 64-bit aligned for 32-bit systems. @@ -1021,7 +1021,7 @@ func (s *ChainService) IsBanned(addr string) bool { // Log how much time left the peer will remain banned for, if any. if time.Now().Before(banStatus.Expiration) { log.Debugf("Peer %v is banned for another %v", addr, - banStatus.Expiration.Sub(time.Now())) + time.Until(banStatus.Expiration)) } return banStatus.Banned diff --git a/notifications.go b/notifications.go index 87882106e..0b59deba5 100644 --- a/notifications.go +++ b/notifications.go @@ -162,7 +162,7 @@ func (s *ChainService) handleQuery(state *peerState, querymsg interface{}) { // Request a list of the persistent (added) peers. case getAddedNodesMsg: - // Respond with a slice of the relavent peers. + // Respond with a slice of the relevant peers. peers := make([]*ServerPeer, 0, len(state.persistentPeers)) for _, sp := range state.persistentPeers { peers = append(peers, sp) diff --git a/pushtx/broadcaster.go b/pushtx/broadcaster.go index a04795c31..aa855e93b 100644 --- a/pushtx/broadcaster.go +++ b/pushtx/broadcaster.go @@ -13,8 +13,8 @@ import ( ) var ( - // ErrBroadcastStopped is an error returned when we attempt to process a - // request to broadcast a transaction but the Broadcaster has already + // ErrBroadcasterStopped is an error returned when we attempt to process + // a request to broadcast a transaction but the Broadcaster has already // been stopped. ErrBroadcasterStopped = errors.New("broadcaster has been stopped") ) @@ -85,11 +85,11 @@ func NewBroadcaster(cfg *Config) *Broadcaster { // Start starts all of the necessary steps for the Broadcaster to begin properly // carrying out its duties. func (b *Broadcaster) Start() error { - var err error + var returnErr error b.start.Do(func() { sub, err := b.cfg.SubscribeBlocks() if err != nil { - err = fmt.Errorf("unable to subscribe for block "+ + returnErr = fmt.Errorf("unable to subscribe for block "+ "notifications: %v", err) return } @@ -97,7 +97,7 @@ func (b *Broadcaster) Start() error { b.wg.Add(1) go b.broadcastHandler(sub) }) - return err + return returnErr } // Stop halts the Broadcaster from rebroadcasting pending transactions. diff --git a/query.go b/query.go index 6722eafd0..b95326c09 100644 --- a/query.go +++ b/query.go @@ -23,7 +23,7 @@ var ( // query. QueryTimeout = time.Second * 10 - // QueryBatchTimout is the total time we'll wait for a batch fetch + // QueryBatchTimeout is the total time we'll wait for a batch fetch // query to complete. // TODO(halseth): instead use timeout since last received response? QueryBatchTimeout = time.Second * 30 @@ -500,7 +500,10 @@ checkResponses: func (s *ChainService) getFilterFromCache(blockHash *chainhash.Hash, filterType filterdb.FilterType) (*gcs.Filter, error) { - cacheKey := cache.FilterCacheKey{*blockHash, filterType} + cacheKey := cache.FilterCacheKey{ + BlockHash: *blockHash, + FilterType: filterType, + } filterValue, err := s.FilterCache.Get(cacheKey) if err != nil { @@ -512,9 +515,12 @@ func (s *ChainService) getFilterFromCache(blockHash *chainhash.Hash, // putFilterToCache inserts a given filter in ChainService's FilterCache. func (s *ChainService) putFilterToCache(blockHash *chainhash.Hash, - filterType filterdb.FilterType, filter *gcs.Filter) (bool, error) { + filterType filterdb.FilterType, filter *gcs.Filter) (bool, error) { // nolint:unparam - cacheKey := cache.FilterCacheKey{*blockHash, filterType} + cacheKey := cache.FilterCacheKey{ + BlockHash: *blockHash, + FilterType: filterType, + } return s.FilterCache.Put(cacheKey, &cache.CacheableFilter{Filter: filter}) } @@ -911,7 +917,7 @@ func (s *ChainService) GetBlock(blockHash chainhash.Hash, // can't request it. blockHeader, height, err := s.BlockHeaders.FetchHeader(&blockHash) if err != nil || blockHeader.BlockHash() != blockHash { - return nil, fmt.Errorf("Couldn't get header for block %s "+ + return nil, fmt.Errorf("couldn't get header for block %s "+ "from database", blockHash) } @@ -939,7 +945,7 @@ func (s *ChainService) GetBlock(blockHash chainhash.Hash, // Construct the appropriate getdata message to fetch the target block. getData := wire.NewMsgGetData() - getData.AddInvVect(inv) + _ = getData.AddInvVect(inv) // The block is only updated from the checkResponse function argument, // which is always called single-threadedly. We don't check the block @@ -1011,12 +1017,12 @@ func (s *ChainService) GetBlock(blockHash chainhash.Hash, options..., ) if foundBlock == nil { - return nil, fmt.Errorf("Couldn't retrieve block %s from "+ + return nil, fmt.Errorf("couldn't retrieve block %s from "+ "network", blockHash) } // Add block to the cache before returning it. - _, err = s.BlockCache.Put(*inv, &cache.CacheableBlock{foundBlock}) + _, err = s.BlockCache.Put(*inv, &cache.CacheableBlock{Block: foundBlock}) if err != nil { log.Warnf("couldn't write block to cache: %v", err) } @@ -1047,7 +1053,7 @@ func (s *ChainService) sendTransaction(tx *wire.MsgTx, options ...QueryOption) e // Create an inv. txHash := tx.TxHash() inv := wire.NewMsgInv() - inv.AddInvVect(wire.NewInvVect(invType, &txHash)) + _ = inv.AddInvVect(wire.NewInvVect(invType, &txHash)) // We'll gather all of the peers who replied to our query, along with // the ones who rejected it and their reason for rejecting it. We'll use diff --git a/query/interface.go b/query/interface.go index 4063d4a93..a1c7fa6b6 100644 --- a/query/interface.go +++ b/query/interface.go @@ -36,7 +36,7 @@ type queryOptions struct { // methods, such as GetBlock and GetCFilter (when that resorts to a network // query). These are always processed in order, with later options overriding // earlier ones. -type QueryOption func(*queryOptions) +type QueryOption func(*queryOptions) // nolint:golint // defaultQueryOptions returns a queryOptions set to package-level defaults. func defaultQueryOptions() *queryOptions { @@ -86,7 +86,7 @@ type Progress struct { Finished bool // Progressed is true if the query made progress towards fully - // answering the request as a result of the recived response. This is + // answering the request as a result of the received response. This is // used for the requests types where more than one response is // expected. Progressed bool diff --git a/query/worker.go b/query/worker.go index 2d4aa7003..1477c1f31 100644 --- a/query/worker.go +++ b/query/worker.go @@ -64,7 +64,7 @@ type worker struct { // A compile-time check to ensure worker satisfies the Worker interface. var _ Worker = (*worker)(nil) -// NewWorker creates a new worker assosiated with the given peer. +// NewWorker creates a new worker associated with the given peer. func NewWorker(peer Peer) Worker { return &worker{ peer: peer, @@ -167,7 +167,7 @@ func (w *worker) Run(results chan<- *jobResult, quit <-chan struct{}) { // queries with multiple responses // expected won't timeout before all // responses have been handled. - // TODO(halseth): separete progress + // TODO(halseth): separate progress // timeout value. if progress.Progressed { timeout.Stop() @@ -194,7 +194,7 @@ func (w *worker) Run(results chan<- *jobResult, quit <-chan struct{}) { break Loop - // If the peer disconnectes before giving us a valid + // If the peer disconnects before giving us a valid // answer, we'll also exit with an error. case <-peer.OnDisconnect(): log.Debugf("Peer %v for worker disconnected, "+ diff --git a/query/workmanager.go b/query/workmanager.go index 8da023ad6..76e801a1d 100644 --- a/query/workmanager.go +++ b/query/workmanager.go @@ -146,7 +146,7 @@ func (w *WorkManager) Stop() error { // workDispatcher receives batches of queries to be performed from external // callers, and dispatches these to active workers. It makes sure to // prioritize the queries in the order they come in, such that early queries -// will be attemped completed first. +// will be attempted completed first. // // NOTE: MUST be run as a goroutine. func (w *WorkManager) workDispatcher() { diff --git a/query/workmanager_test.go b/query/workmanager_test.go index 670f9a67a..b9609121d 100644 --- a/query/workmanager_test.go +++ b/query/workmanager_test.go @@ -202,19 +202,17 @@ func TestWorkManagerWorkDispatcherFailures(t *testing.T) { scheduledJobs[i] = make(chan sched) } - // Fot each wotker, spin up a goroutine that will forward the job it - // got to our slice of sheduled jobs, such that we can handle them in + // Fot each worker, spin up a goroutine that will forward the job it + // got to our slice of scheduled jobs, such that we can handle them in // order. for i := 0; i < len(workers); i++ { wk := workers[i] go func() { for { - select { - case job := <-wk.nextJob: - scheduledJobs[job.index] <- sched{ - wk: wk, - job: job, - } + job := <-wk.nextJob + scheduledJobs[job.index] <- sched{ + wk: wk, + job: job, } } }() @@ -463,7 +461,7 @@ func TestWorkManagerWorkRankingScheduling(t *testing.T) { q := &Request{} queries = append(queries, q) } - errChan = wm.Query(queries) + _ = wm.Query(queries) // The new jobs should be scheduled on the even numbered workers. for i := 0; i < len(workers); i += 2 { diff --git a/query_test.go b/query_test.go index afc0e18ec..378c83b64 100644 --- a/query_test.go +++ b/query_test.go @@ -42,7 +42,7 @@ var ( // loadBlocks loads the blocks contained in the testdata directory and returns // a slice of them. // -// NOTE: copied from btcsuite/btcd/database/ffldb/interface_test.go +// NOTE: copied from btcsuite/btcd/database/ffldb/interface_test.go. func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ( []*btcutil.Block, error) { // Open the file that contains the blocks for reading. @@ -123,7 +123,7 @@ func genRandomBlockHash() *chainhash.Hash { // will then convert that filter into CacheableFilter to compute it's size for // convenience. It will return the filter along with it's size and randomly // generated block hash. testing.T is passed in as a convenience to deal with -// errors in this method and making the test code more straigthforward. Method +// errors in this method and making the test code more straightforward. Method // originally taken from filterdb/db_test.go. func genRandFilter(numElements uint32, t *testing.T) ( *chainhash.Hash, *gcs.Filter, uint64) { @@ -172,7 +172,7 @@ func getFilter(cs *ChainService, b *chainhash.Hash, t *testing.T) *gcs.Filter { return val } -func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { // nolint:unparam if a == b { return } @@ -266,7 +266,7 @@ func TestBlockCache(t *testing.T) { } headers.WriteHeaders(header) - sz, _ := (&cache.CacheableBlock{b}).Size() + sz, _ := (&cache.CacheableBlock{Block: b}).Size() if i < len(blocks)/2 { size += sz } diff --git a/rescan.go b/rescan.go index 54c232e4b..d5d0488a3 100644 --- a/rescan.go +++ b/rescan.go @@ -354,7 +354,7 @@ func rescan(chain ChainSource, options ...RescanOption) error { // If we don't have a quit channel, and the end height is still // unspecified, then we'll exit out here. if ro.quit == nil && ro.endBlock.Height == 0 { - return fmt.Errorf("Rescan request must specify a quit channel" + + return fmt.Errorf("rescan request must specify a quit channel" + " or valid end block") } @@ -560,8 +560,8 @@ func rescan(chain ChainSource, options ...RescanOption) error { newStamp.Height, &header, nil, ) } - if ro.ntfn.OnBlockConnected != nil { - ro.ntfn.OnBlockConnected( + if ro.ntfn.OnBlockConnected != nil { // nolint:staticcheck + ro.ntfn.OnBlockConnected( // nolint:staticcheck &newStamp.Hash, newStamp.Height, header.Timestamp, ) @@ -606,7 +606,7 @@ func rescan(chain ChainSource, options ...RescanOption) error { // handleBlockDisconnected is a helper closure that handles a new block // disconnected notification. - handleBlockDisconnected := func(ntfn *blockntfns.Disconnected) error { + handleBlockDisconnected := func(ntfn *blockntfns.Disconnected) error { // nolint:unparam blockDisconnected := ntfn.Header() log.Debugf("Rescan got disconnected block %d (%s)", ntfn.Height(), blockDisconnected.BlockHash()) @@ -624,8 +624,8 @@ func rescan(chain ChainSource, options ...RescanOption) error { curStamp.Height, &curHeader, ) } - if ro.ntfn.OnBlockDisconnected != nil { - ro.ntfn.OnBlockDisconnected( + if ro.ntfn.OnBlockDisconnected != nil { // nolint:staticcheck + ro.ntfn.OnBlockDisconnected( // nolint:staticcheck &curStamp.Hash, curStamp.Height, curHeader.Timestamp, ) @@ -932,8 +932,8 @@ func notifyBlock(chain ChainSource, ro *rescanOptions, relevantTxs) } - if ro.ntfn.OnBlockConnected != nil { - ro.ntfn.OnBlockConnected(&curStamp.Hash, + if ro.ntfn.OnBlockConnected != nil { // nolint:staticcheck + ro.ntfn.OnBlockConnected(&curStamp.Hash, // nolint:staticcheck curStamp.Height, curHeader.Timestamp) } @@ -984,8 +984,8 @@ func extractBlockMatches(chain ChainSource, ro *rescanOptions, if ro.spendsWatchedInput(tx) { relevant = true - if ro.ntfn.OnRedeemingTx != nil { - ro.ntfn.OnRedeemingTx(tx, &txDetails) + if ro.ntfn.OnRedeemingTx != nil { // nolint:staticcheck + ro.ntfn.OnRedeemingTx(tx, &txDetails) // nolint:staticcheck } } @@ -1000,8 +1000,8 @@ func extractBlockMatches(chain ChainSource, ro *rescanOptions, if pays { relevant = true - if ro.ntfn.OnRecvTx != nil { - ro.ntfn.OnRecvTx(tx, &txDetails) + if ro.ntfn.OnRecvTx != nil { // nolint:staticcheck + ro.ntfn.OnRecvTx(tx, &txDetails) // nolint:staticcheck } } @@ -1049,8 +1049,8 @@ func notifyBlockWithFilter(chain ChainSource, ro *rescanOptions, relevantTxs) } - if ro.ntfn.OnBlockConnected != nil { - ro.ntfn.OnBlockConnected(&curStamp.Hash, + if ro.ntfn.OnBlockConnected != nil { // nolint:staticcheck + ro.ntfn.OnBlockConnected(&curStamp.Hash, // nolint:staticcheck curStamp.Height, curHeader.Timestamp) } @@ -1148,9 +1148,9 @@ func (ro *rescanOptions) updateFilter(chain ChainSource, update *updateOptions, // If we need to rewind, then we'll walk backwards in the chain until // we arrive at the block _just_ before the rewind. for curStamp.Height > int32(update.rewind) { - if ro.ntfn.OnBlockDisconnected != nil && + if ro.ntfn.OnBlockDisconnected != nil && // nolint:staticcheck !update.disableDisconnectedNtfns { - ro.ntfn.OnBlockDisconnected(&curStamp.Hash, + ro.ntfn.OnBlockDisconnected(&curStamp.Hash, // nolint:staticcheck curStamp.Height, curHeader.Timestamp) } if ro.ntfn.OnFilteredBlockDisconnected != nil && @@ -1301,7 +1301,7 @@ func (r *Rescan) Start() <-chan error { errChan := make(chan error, 1) if !atomic.CompareAndSwapUint32(&r.started, 0, 1) { - errChan <- fmt.Errorf("Rescan already started") + errChan <- fmt.Errorf("rescan already started") return errChan } @@ -1461,7 +1461,7 @@ type SpendReport struct { // used to give a hint of which transaction in the block matches it (coinbase // is 0, first normal transaction is 1, etc.). // -// TODO(roasbeef): WTB utxo-commitments +// TODO(roasbeef): WTB utxo-commitments. func (s *ChainService) GetUtxo(options ...RescanOption) (*SpendReport, error) { // Before we start we'll fetch the set of default options, and apply // any user specified options in a functional manner. diff --git a/rescan_test.go b/rescan_test.go index e128bf31e..5c5061ceb 100644 --- a/rescan_test.go +++ b/rescan_test.go @@ -348,7 +348,7 @@ type rescanTestContext struct { // newRescanTestContext creates a new test harness for the Rescan struct backed // by a chain of numBlocks. -func newRescanTestContext(t *testing.T, numBlocks int, +func newRescanTestContext(t *testing.T, numBlocks int, // nolint:unparam options ...RescanOption) *rescanTestContext { blocksConnected := make(chan headerfs.BlockStamp) @@ -393,7 +393,7 @@ func newRescanTestContext(t *testing.T, numBlocks int, } // start starts the backing rescan. -func (ctx *rescanTestContext) start(waitUntilSynced bool) { +func (ctx *rescanTestContext) start(waitUntilSynced bool) { // nolint:unparam if !waitUntilSynced { ctx.errChan = ctx.rescan.Start() return diff --git a/sync_test.go b/sync_test.go index b7449feb0..1f50f5c64 100644 --- a/sync_test.go +++ b/sync_test.go @@ -219,7 +219,7 @@ func (s *secSource) add(privKey *btcec.PrivateKey) (btcutil.Address, error) { return addr, nil } -// GetKey is required by the txscript.KeyDB interface +// GetKey is required by the txscript.KeyDB interface. func (s *secSource) GetKey(addr btcutil.Address) (*btcec.PrivateKey, bool, error) { privKey, ok := s.keys[addr.String()] @@ -229,7 +229,7 @@ func (s *secSource) GetKey(addr btcutil.Address) (*btcec.PrivateKey, bool, return privKey, true, nil } -// GetScript is required by the txscript.ScriptDB interface +// GetScript is required by the txscript.ScriptDB interface. func (s *secSource) GetScript(addr btcutil.Address) ([]byte, error) { script, ok := s.scripts[addr.String()] if !ok { @@ -1102,10 +1102,10 @@ func TestNeutrinoSync(t *testing.T) { db, err := walletdb.Create( "bdb", tempDir+"/weks.db", true, dbOpenTimeout, ) - defer db.Close() if err != nil { t.Fatalf("Error opening DB: %s\n", err) } + defer db.Close() config := neutrino.Config{ DataDir: tempDir, Database: db, @@ -1130,9 +1130,10 @@ func TestNeutrinoSync(t *testing.T) { // Create a test harness with the three nodes and the neutrino instance. testHarness := &neutrinoHarness{h1, h2, h3, svc} - for _, test := range testCases { - if ok := t.Run(test.name, func(t *testing.T) { - test.test(testHarness, t) + for _, testCase := range testCases { + testCase := testCase + if ok := t.Run(testCase.name, func(t *testing.T) { + testCase.test(testHarness, t) }); !ok { break } diff --git a/utxoscanner_test.go b/utxoscanner_test.go index 27ed2c442..b0fd801a1 100644 --- a/utxoscanner_test.go +++ b/utxoscanner_test.go @@ -229,7 +229,7 @@ func TestDequeueAtHeight(t *testing.T) { } // Now, add the requests in order of their block heights. - req100000, err = scanner.Enqueue(makeTestInputWithScript(), 100000, func(height uint32) {}) + _, err = scanner.Enqueue(makeTestInputWithScript(), 100000, func(height uint32) {}) if err != nil { t.Fatalf("unable to enqueue scan request: %v", err) } @@ -295,7 +295,7 @@ func TestDequeueAtHeight(t *testing.T) { if err != nil { t.Fatalf("unable to enqueue scan request: %v", err) } - req100000, err = scanner.Enqueue(makeTestInputWithScript(), 100000, func(height uint32) {}) + _, err = scanner.Enqueue(makeTestInputWithScript(), 100000, func(height uint32) {}) if err != nil { t.Fatalf("unable to enqueue scan request: %v", err) } From 32cef72ed9c08356d2014101c719f3656134bb4f Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 16:19:23 +0100 Subject: [PATCH 6/9] neutrino: return start/stop errors We don't just want to swallow/ignore errors during startup or shutdown of the main chain service. We return early with an error on startup and return the last error on shutdown instead. --- neutrino.go | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/neutrino.go b/neutrino.go index fa56fd95d..9c427d9dc 100644 --- a/neutrino.go +++ b/neutrino.go @@ -1525,9 +1525,13 @@ func (s *ChainService) Start() error { s.addrManager.Start() s.blockManager.Start() s.blockSubscriptionMgr.Start() - s.workManager.Start() + if err := s.workManager.Start(); err != nil { + return fmt.Errorf("unable to start work manager: %v", err) + } - s.utxoScanner.Start() + if err := s.utxoScanner.Start(); err != nil { + return fmt.Errorf("unable to start utxo scanner: %v", err) + } if err := s.broadcaster.Start(); err != nil { return fmt.Errorf("unable to start transaction broadcaster: %v", @@ -1552,18 +1556,31 @@ func (s *ChainService) Stop() error { return nil } + var returnErr error s.connManager.Stop() s.broadcaster.Stop() - s.utxoScanner.Stop() - s.workManager.Stop() + if err := s.utxoScanner.Stop(); err != nil { + log.Errorf("error stopping utxo scanner: %v", err) + returnErr = err + } + if err := s.workManager.Stop(); err != nil { + log.Errorf("error stopping work manager: %v", err) + returnErr = err + } s.blockSubscriptionMgr.Stop() - s.blockManager.Stop() - s.addrManager.Stop() + if err := s.blockManager.Stop(); err != nil { + log.Errorf("error stopping block manager: %v", err) + returnErr = err + } + if err := s.addrManager.Stop(); err != nil { + log.Errorf("error stopping address manager: %v", err) + returnErr = err + } // Signal the remaining goroutines to quit. close(s.quit) s.wg.Wait() - return nil + return returnErr } // IsCurrent lets the caller know whether the chain service's block manager From 4ddceb57dae2bd06d97c79e628abdeb09c6236eb Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 23 Mar 2021 16:24:14 +0100 Subject: [PATCH 7/9] Travis+GitHub: move CI over to GitHub --- .github/workflows/main.yml | 88 +++++++++++++++++++++++ .travis.yml | 22 ------ btcd_checkout.sh | 12 ---- gotest.sh | 138 ------------------------------------- 4 files changed, 88 insertions(+), 172 deletions(-) create mode 100644 .github/workflows/main.yml delete mode 100644 .travis.yml delete mode 100755 btcd_checkout.sh delete mode 100755 gotest.sh diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..39ddd2532 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,88 @@ +name: CI + +on: + push: + branches: + - "master" + pull_request: + branches: + - "*" + +defaults: + run: + shell: bash + +env: + GO_VERSION: 1.16.x + +jobs: + ######################## + # compilation check + ######################## + rpc-check: + name: RPC and mobile compilation check + runs-on: ubuntu-latest + steps: + - name: git checkout + uses: actions/checkout@v2 + + - name: setup go ${{ env.GO_VERSION }} + uses: actions/setup-go@v2 + with: + go-version: '${{ env.GO_VERSION }}' + + - name: run check + run: make build + + ######################## + # lint code + ######################## + lint: + name: lint code + runs-on: ubuntu-latest + steps: + - name: git checkout + uses: actions/checkout@v2 + + - name: Fetch all history for linter + run: git fetch --prune --unshallow + + - name: setup go ${{ env.GO_VERSION }} + uses: actions/setup-go@v2 + with: + go-version: '${{ env.GO_VERSION }}' + + - name: lint + run: make lint + + ######################## + # run unit tests + ######################## + unit-test: + name: run unit tests + runs-on: ubuntu-latest + strategy: + # Allow other tests in the matrix to continue if one fails. + fail-fast: false + matrix: + unit_type: + - unit-cover + - unit-race + steps: + - name: git checkout + uses: actions/checkout@v2 + + - name: setup go ${{ env.GO_VERSION }} + uses: actions/setup-go@v2 + with: + go-version: '${{ env.GO_VERSION }}' + + - name: run ${{ matrix.unit_type }} + run: make ${{ matrix.unit_type }} + + - name: Send coverage + uses: shogo82148/actions-goveralls@v1 + if: matrix.unit_type == 'unit-cover' + with: + path-to-profile: coverage.txt + parallel: true diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5715253b0..000000000 --- a/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go -cache: - directories: - - $GOCACHE - - $GOPATH/pkg/mod - - $GOPATH/src/github.com/btcsuite - - $GOPATH/src/github.com/golang - - $GOPATH/src/gopkg.in/alecthomas -go: - - "1.13.x" -sudo: false -install: - - export PATH=$PATH:$PWD/linux-amd64/ - - ./btcd_checkout.sh -env: - matrix: - - RACE=false - - RACE=true -script: - - export GO111MODULE=on - - export PATH=$PATH:$GOPATH/bin - - ./gotest.sh diff --git a/btcd_checkout.sh b/btcd_checkout.sh deleted file mode 100755 index de4784da1..000000000 --- a/btcd_checkout.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -BTCD_COMMIT=$(cat go.mod | \ - grep github.com/btcsuite/btcd | \ - tail -n1 | \ - awk -F " " '{ print $2 }' | \ - awk -F "/" '{ print $1 }') -echo "Fetching btcd at $BTCD_COMMIT" - -pushd /tmp -GO111MODULE=on go get -v github.com/btcsuite/btcd@$BTCD_COMMIT -popd diff --git a/gotest.sh b/gotest.sh deleted file mode 100755 index 080abf64e..000000000 --- a/gotest.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash -# The script does automatic checking on a Go package and its sub-packages, including: -# 1. gofmt (http://golang.org/cmd/gofmt/) -# 2. golint (https://github.com/golang/lint) -# 3. go vet (http://golang.org/cmd/vet) -# 4. gosimple (https://github.com/dominikh/go-simple) -# 5. unconvert (https://github.com/mdempsky/unconvert) -# 6. race detector (http://blog.golang.org/race-detector) -# 7. test coverage (http://blog.golang.org/cover) -# -# gometalinter (github.com/alecthomas/gometalinter) is used to run each static -# checker. - -declare GREEN='\033[0;32m' -declare NC='\033[0m' -print () { - echo -e "${GREEN}$1${NC}" -} - -# test_with_profile run test coverage on each subdirectories and merge the -# coverage profile. Be aware that we are skipping the integration tests, as the -# tool won't gather any useful coverage information from them. -test_with_coverage_profile() { - print "* Running tests with creating coverage profile" - - echo "mode: count" > profile.cov - - # Standard go tooling behavior is to ignore dirs with leading underscores. - for dir in $(find . -maxdepth 10 \ - -not -path './.git*' \ - -not -path '*/_*' \ - -not -path './cmd*' \ - -not -path './release*' \ - -not -path './vendor*' \ - -type d) - do - if ls $dir/*.go &> /dev/null; then - go test -v -covermode=count -coverprofile=$dir/profile.tmp $dir - - if [ -f $dir/profile.tmp ]; then - cat $dir/profile.tmp | tail -n +2 >> profile.cov - rm $dir/profile.tmp - fi - fi - done - - if [ "$TRAVIS" == "true" ] ; then - print "* Send test coverage to travis server" - # Make sure goveralls is installed and $GOPATH/bin is in your path. - if [ ! -x "$(type -p goveralls)" ]; then - print "** Install goveralls:" - go get github.com/mattn/goveralls - fi - - print "** Submit the test coverage result to coveralls.io" - goveralls -coverprofile=profile.cov -service=travis-ci - fi -} - -# test_race_conditions run standard go test without creating coverage -# profile but with race condition checks. -test_race_conditions() { - print "* Running tests with the race condition detector" - - test_targets=$(go list ./... | grep -v '/vendor/') - env GORACE="history_size=7 halt_on_error=1" go test -v -p 1 -race $test_targets -} - -# lint_check runs static checks. -lint_check() { - print "* Run static checks" - - # Make sure gometalinter is installed and $GOPATH/bin is in your path. - if [ ! -x "$(type -p gometalinter.v2)" ]; then - print "** Install gometalinter" - GO111MODULE=off go get -u gopkg.in/alecthomas/gometalinter.v2 - GO111MODULE=off gometalinter.v2 --install - fi - - # Update metalinter if needed. - GO111MODULE=off gometalinter.v2 --install 1>/dev/null - - # Automatic checks - linter_targets=$(go list ./... | grep -v 'lnrpc') - test -z "$(gometalinter.v2 --disable-all \ - --enable=gofmt \ - --enable=vet \ - --enable=golint \ - --line-length=72 \ - --deadline=4m $linter_targets 2>&1 | grep -v 'ALL_CAPS\|OP_' 2>&1 | tee /dev/stderr)" -} - -set -e - -print "====+ Start +====" - -# Read input flags and initialize variables -NEED_LINT="false" -NEED_COVERAGE="false" -NEED_RACE="false" -NEED_INSTALL="false" - -while getopts "lrcio" flag; do - case "${flag}" in - l) NEED_LINT="true" ;; - r) NEED_RACE="true" ;; - c) NEED_COVERAGE="true" ;; - i) NEED_INSTALL="true" ;; - *) - printf '\nUsage: %s [-l] [-r] [-c] [-i] [-o], where:\n' $0 - printf ' -l: include code lint check\n' - printf ' -r: run tests with race condition check\n' - printf ' -c: run tests with test coverage\n' - printf ' -i: reinstall project dependencies\n' - exit 1 ;; - esac - done - -# remove the options from the positional parameters -shift $(( OPTIND - 1 )) - -# Lint check is first because we shouldn't run tests on garbage code. -if [ "$NEED_LINT" == "true" ] || [ "$RACE" == "false" ]; then - lint_check -fi - -# Race condition second because we shouldn't check coverage on buggy code. -if [ "$NEED_RACE" == "true" ] || [ "$RACE" == "true" ]; then - test_race_conditions -fi - -# Test coverage is third because in this case code should work properly and -# we may calmly send coverage profile (if script is run on travis) -if [ "$NEED_COVERAGE" == "true" ] || [ "$RACE" == "false" ]; then - test_with_coverage_profile -fi - -print "=====+ End +=====" From 952bdebdb1f80d55f36245f7c94c423fe72045c9 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 6 May 2021 13:55:04 +0200 Subject: [PATCH 8/9] multi: enable struct alignment check, fix for often used For structs we use often, such as the query options, we want to have proper alignment to save some memory space. Other structs that we only instantiate once or a few times we don't align in favor of better readability of the code. --- .golangci.yml | 5 ++--- blockmanager.go | 6 +++--- blockntfns/manager.go | 9 +++++---- neutrino.go | 2 +- query.go | 26 +++++++++++++------------- rescan.go | 5 ++--- 6 files changed, 26 insertions(+), 27 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 5f055f8be..e5c2ea559 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,6 +9,8 @@ linters-settings: gofmt: # simplify code: gofmt with `-s` option, true by default simplify: true + maligned: + suggest-new: true linters: enable-all: true @@ -20,9 +22,6 @@ linters: # even longer by marking them as 'nolint'. - lll - # We don't care (enough) about misaligned structs to lint that. - - maligned - # We have long functions, especially in tests. Moving or renaming those would # trigger funlen problems that we may not want to solve at that time. - funlen diff --git a/blockmanager.go b/blockmanager.go index 0713546fa..57dadd14e 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -117,9 +117,9 @@ type blockManagerCfg struct { // blockManager provides a concurrency safe block manager for handling all // incoming blocks. -type blockManager struct { - started int32 - shutdown int32 +type blockManager struct { // nolint:maligned + started int32 // To be used atomically. + shutdown int32 // To be used atomically. cfg *blockManagerCfg diff --git a/blockntfns/manager.go b/blockntfns/manager.go index 17c823905..52a9a1de0 100644 --- a/blockntfns/manager.go +++ b/blockntfns/manager.go @@ -19,15 +19,16 @@ var ( // newSubscription is an internal message used within the SubscriptionManager to // denote a new client's intent to receive block notifications. type newSubscription struct { - canceled sync.Once + id uint64 // To be used atomically. - id uint64 + bestHeight uint32 + + canceled sync.Once ntfnChan chan BlockNtfn ntfnQueue *queue.ConcurrentQueue - bestHeight uint32 - errChan chan error + errChan chan error quit chan struct{} wg sync.WaitGroup diff --git a/neutrino.go b/neutrino.go index 9c427d9dc..8b7efb9a3 100644 --- a/neutrino.go +++ b/neutrino.go @@ -559,7 +559,7 @@ type peerSubscription struct { } // ChainService is instantiated with functional options. -type ChainService struct { +type ChainService struct { // nolint:maligned // The following variables must only be used atomically. // Putting the uint64s first makes them 64-bit aligned for 32-bit systems. bytesReceived uint64 // Total bytes received from all peers since start. diff --git a/query.go b/query.go index b95326c09..c4990d65e 100644 --- a/query.go +++ b/query.go @@ -57,37 +57,37 @@ var ( // // TODO: Make more query options that override global options. type queryOptions struct { + // maxBatchSize is the maximum items that the query should return in the + // case the optimisticBatch option is used. It saves bandwidth in the case + // the caller has a limited amount of items to fetch but still wants to use + // batching. + maxBatchSize int64 + // timeout lets the query know how long to wait for a peer to answer // the query before moving onto the next peer. timeout time.Duration - // numRetries tells the query how many times to retry asking each peer - // the query. - numRetries uint8 - // peerConnectTimeout lets the query know how long to wait for the // underlying chain service to connect to a peer before giving up // on a query in case we don't have any peers. peerConnectTimeout time.Duration + // doneChan lets the query signal the caller when it's done, in case + // it's run in a goroutine. + doneChan chan<- struct{} + // encoding lets the query know which encoding to use when queueing // messages to a peer. encoding wire.MessageEncoding - // doneChan lets the query signal the caller when it's done, in case - // it's run in a goroutine. - doneChan chan<- struct{} + // numRetries tells the query how many times to retry asking each peer + // the query. + numRetries uint8 // optimisticBatch indicates whether we expect more calls to follow, // and that we should attempt to batch more items with the query such // that they can be cached, avoiding the extra round trip. optimisticBatch optimisticBatchType - - // maxBatchSize is the maximum items that the query should return in the - // case the optimisticBatch option is used. It saves bandwidth in the case - // the caller has a limited amount of items to fetch but still wants to use - // batching. - maxBatchSize int64 } // optimisticBatchType is a type indicating the kind of batching we want to diff --git a/rescan.go b/rescan.go index d5d0488a3..0636ddf9c 100644 --- a/rescan.go +++ b/rescan.go @@ -1260,8 +1260,8 @@ txOutLoop: // client with updateable filters. It's meant to be close to a drop-in // replacement for the btcd rescan and notification functionality used in // wallets. It only contains information about whether a goroutine is running. -type Rescan struct { - started uint32 +type Rescan struct { // nolint:maligned + started uint32 // To be used atomically. running chan struct{} updateChan chan *updateOptions @@ -1374,7 +1374,6 @@ func DisableDisconnectedNtfns(disabled bool) UpdateOption { // Update sends an update to a long-running rescan/notification goroutine. func (r *Rescan) Update(options ...UpdateOption) error { - ro := defaultRescanOptions() for _, option := range r.options { option(ro) From eb989f6942d407377e8ad53ef8a0ad3b8857b1e0 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 6 May 2021 14:01:17 +0200 Subject: [PATCH 9/9] multi: enable prealloc linter for non-test code --- .golangci.yml | 8 ++++---- blockmanager.go | 8 ++++---- neutrino.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e5c2ea559..330237f9f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -32,10 +32,6 @@ linters: # land. - gocyclo - # Instances of table driven tests that don't pre-allocate shouldn't trigger - # the linter. - - prealloc - # Init functions are used by loggers throughout the codebase. - gochecknoinits @@ -48,3 +44,7 @@ issues: - gosec - errcheck - dupl + + # Instances of table driven tests that don't pre-allocate shouldn't + # trigger the linter. + - prealloc diff --git a/blockmanager.go b/blockmanager.go index 57dadd14e..49fcae944 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -822,12 +822,12 @@ type checkpointedCFHeadersQuery struct { // requests creates the query.Requests for this CF headers query. func (c *checkpointedCFHeadersQuery) requests() []*query.Request { - var reqs []*query.Request - for _, m := range c.msgs { - reqs = append(reqs, &query.Request{ + reqs := make([]*query.Request, len(c.msgs)) + for idx, m := range c.msgs { + reqs[idx] = &query.Request{ Req: m, HandleResp: c.handleResponse, - }) + } } return reqs } diff --git a/neutrino.go b/neutrino.go index 8b7efb9a3..e0dd47c9d 100644 --- a/neutrino.go +++ b/neutrino.go @@ -337,7 +337,7 @@ func (sp *ServerPeer) OnAddr(_ *peer.Peer, msg *wire.MsgAddr) { return } - var addrsSupportingServices []*wire.NetAddress + addrsSupportingServices := make([]*wire.NetAddress, 0, len(msg.AddrList)) for _, na := range msg.AddrList { // Don't add more address if we're disconnecting. if !sp.Connected() {