From d50121048ffeb05dc7b5deb0ebe6d4b413964767 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 10 Feb 2023 19:13:50 -0800 Subject: [PATCH] build: fix linter issues, ignore less useful linters --- .golangci.yml | 2 ++ bamboozle_unit_test.go | 2 +- blockmanager.go | 4 ++-- headerfs/index_test.go | 4 ++-- neutrino.go | 8 ++++---- query/workmanager.go | 4 ++-- rescan.go | 2 +- sync_test.go | 30 ++++++++++++++++-------------- 8 files changed, 30 insertions(+), 26 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b974c499..defd7b1e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -86,6 +86,8 @@ linters: - nilnil - stylecheck - thelper + - exhaustruct + - nosnakecase issues: exclude-rules: diff --git a/bamboozle_unit_test.go b/bamboozle_unit_test.go index 3ee6781e..f5f649e4 100644 --- a/bamboozle_unit_test.go +++ b/bamboozle_unit_test.go @@ -450,7 +450,7 @@ var ( // One peer is serving the "old-old" filter which // contains all OP_RETURN output, we expect this peer // to be banned first. - name: "old old peer", + name: "very old peer", peerFilters: map[string]*gcs.Filter{ "a": correctFilter, "b": oldFilter, diff --git a/blockmanager.go b/blockmanager.go index 8afb07e5..6a734b61 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -275,8 +275,8 @@ func newBlockManager(cfg *blockManagerCfg) (*blockManager, error) { return nil, err } - // We must also ensure the the filter header tip hash is set to the - // block hash at the filter tip height. + // We must also ensure the filter header tip hash is set to the block + // hash at the filter tip height. fh, err := cfg.BlockHeaders.FetchHeaderByHeight(bm.filterHeaderTip) if err != nil { return nil, err diff --git a/headerfs/index_test.go b/headerfs/index_test.go index 6c3a1b77..ebebfd4f 100644 --- a/headerfs/index_test.go +++ b/headerfs/index_test.go @@ -50,7 +50,7 @@ func TestAddHeadersIndexRetrieve(t *testing.T) { t.Fatalf("unable to create test db: %v", err) } - // First, we'll create a a series of random headers that we'll use to + // First, we'll create a series of random headers that we'll use to // write into the database. const numHeaders = 100 headerEntries, headerIndex, err := writeRandomBatch(hIndex, numHeaders) @@ -152,7 +152,7 @@ func TestHeaderStorageFallback(t *testing.T) { t.Fatalf("error writing random batch with old data: %v", err) } - // Next, we'll create a a series of random headers that we'll use to + // Next, we'll create a series of random headers that we'll use to // write into the database through the normal interface. This means they // will be written to the new sub buckets. newHeaderEntries, _, err := writeRandomBatch(hIndex, numHeaders) diff --git a/neutrino.go b/neutrino.go index 2ab0b15a..54e0385d 100644 --- a/neutrino.go +++ b/neutrino.go @@ -75,7 +75,7 @@ var ( // DefaultFilterCacheSize is the size (in bytes) of filters neutrino // will keep in memory if no size is specified in the neutrino.Config. // Since we utilize the cache during batch filter fetching, it is - // beneficial if it is able to to keep a whole batch. The current batch + // beneficial if it is able to keep a whole batch. The current batch // size is 1000, so we default to 30 MB, which can fit about 1450 to // 2300 mainnet filters. DefaultFilterCacheSize uint64 = 3120 * 10 * 1000 @@ -1002,7 +1002,7 @@ func (s *ChainService) BestBlock() (*headerfs.BlockStamp, error) { return nil, err } - // Filter headers might lag behind block headers, so we can can fetch a + // Filter headers might lag behind block headers, so we can fetch a // previous block header if the filter headers are not caught up. if filterHeight < bestHeight { bestHeight = filterHeight @@ -1572,8 +1572,8 @@ func (s *ChainService) peerDoneHandler(sp *ServerPeer) { close(sp.quit) } -// UpdatePeerHeights updates the heights of all peers who have have announced -// the latest connected main chain block, or a recognized orphan. These height +// UpdatePeerHeights updates the heights of all peers who have announced the +// latest connected main chain block, or a recognized orphan. These height // updates allow us to dynamically refresh peer heights, ensuring sync peer // selection has access to the latest block heights for each peer. func (s *ChainService) UpdatePeerHeights(latestBlkHash *chainhash.Hash, diff --git a/query/workmanager.go b/query/workmanager.go index dc2e9399..ba44a1fa 100644 --- a/query/workmanager.go +++ b/query/workmanager.go @@ -343,8 +343,8 @@ Loop: heap.Push(work, result.job) currentQueries[result.job.index] = batchNum - // Otherwise we we got a successful result and update - // the status of the batch this query is a part of. + // Otherwise we got a successful result and update the + // status of the batch this query is a part of. default: // Reward the peer for the successful query. w.cfg.Ranking.Reward(result.peer.Addr()) diff --git a/rescan.go b/rescan.go index 8b68bfda..9d7d9831 100644 --- a/rescan.go +++ b/rescan.go @@ -685,7 +685,7 @@ rescanLoop: // If we have to rewind our state, then we'll // mark ourselves as not current so we can walk - // forward in the chain again until we we are + // forward in the chain again until we are // current. This is our way of doing a manual // rescan. if rewound { diff --git a/sync_test.go b/sync_test.go index a59ca950..20e77c34 100644 --- a/sync_test.go +++ b/sync_test.go @@ -62,7 +62,8 @@ var ( // "rd": OnRedeemingTx // "bd": OnBlockDisconnected // "fd": OnFilteredBlockDisconnected. - wantLog = func() (log []byte) { + wantLog = func() []byte { + var log []byte for i := 1096; i <= 1100; i++ { // FilteredBlockConnected log = append(log, []byte("fc")...) @@ -452,17 +453,17 @@ func testStartRescan(harness *neutrinoHarness, t *testing.T) { ourIndex = i } } - return func(target btcutil.Amount) (total btcutil.Amount, - inputs []*wire.TxIn, inputValues []btcutil.Amount, - scripts [][]byte, err error) { + return func(target btcutil.Amount) (btcutil.Amount, + []*wire.TxIn, []btcutil.Amount, + [][]byte, error) { if ourIndex == 1<<30 { err = fmt.Errorf("Couldn't find our address " + "in the passed transaction's outputs.") - return + return 0, nil, nil, nil, err } - total = target - inputs = []*wire.TxIn{ + total := target + inputs := []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ Hash: tx.TxHash(), @@ -470,11 +471,12 @@ func testStartRescan(harness *neutrinoHarness, t *testing.T) { }, }, } - inputValues = []btcutil.Amount{ - btcutil.Amount(tx.TxOut[ourIndex].Value)} - scripts = [][]byte{tx.TxOut[ourIndex].PkScript} - err = nil - return + inputValues := []btcutil.Amount{ + btcutil.Amount(tx.TxOut[ourIndex].Value), + } + scripts := [][]byte{tx.TxOut[ourIndex].PkScript} + + return total, inputs, inputValues, scripts, nil } } @@ -814,8 +816,8 @@ func testRescanResults(harness *neutrinoHarness, t *testing.T) { t.Fatalf("Rescan ended with error: %s", err) } - // Immediately try to add a new update to to the rescan that was just - // shut down. This should fail as it is no longer running. + // Immediately try to add a new update to the rescan that was just shut + // down. This should fail as it is no longer running. rescan.WaitForShutdown() err = rescan.Update(neutrino.AddAddrs(addr2), neutrino.Rewind(1095)) if err == nil {