From f357d2ef23f47f00a1acfd5f0d68d422f0ac4873 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 3 Jul 2024 09:23:26 +0200 Subject: [PATCH 01/98] Use unit SC/TB/month for maxStoragePrice when pinning it (#1358) --- internal/bus/pinmanager.go | 44 +++++++++----------------------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 02e4df79b..21591b21c 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -265,15 +265,9 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin if err != nil { pm.logger.Warn("failed to convert max download price to currency") } else if !gs.MaxDownloadPrice.Equals(update) { - bkp := gs.MaxDownloadPrice gs.MaxDownloadPrice = update - if err := gs.Validate(); err != nil { - pm.logger.Warn("failed to update gouging setting, new download price makes the setting invalid", zap.Error(err)) - gs.MaxDownloadPrice = bkp - } else { - pm.logger.Infow("updating max download price", "old", bkp, "new", gs.MaxDownloadPrice, "rate", rate) - updated = true - } + pm.logger.Infow("updating max download price", "old", gs.MaxDownloadPrice, "new", update, "rate", rate) + updated = true } } @@ -283,33 +277,21 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin if err != nil { pm.logger.Warnw("failed to convert max RPC price to currency", zap.Error(err)) } else if !gs.MaxRPCPrice.Equals(update) { - bkp := gs.MaxRPCPrice + pm.logger.Infow("updating max RPC price", "old", gs.MaxRPCPrice, "new", update, "rate", rate) gs.MaxRPCPrice = update - if err := gs.Validate(); err != nil { - pm.logger.Warnw("failed to update gouging setting, new RPC price makes the setting invalid", zap.Error(err)) - gs.MaxRPCPrice = bkp - } else { - pm.logger.Infow("updating max RPC price", "old", bkp, "new", gs.MaxRPCPrice, "rate", rate) - updated = true - } + updated = true } } // update max storage price if pins.MaxStorage.IsPinned() { - update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxStorage.Value), rate) + maxStorageCurr, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxStorage.Value), rate) if err != nil { pm.logger.Warnw("failed to convert max storage price to currency", zap.Error(err)) - } else if !gs.MaxStoragePrice.Equals(update) { - bkp := gs.MaxStoragePrice + } else if update := maxStorageCurr.Div64(1e12).Div64(144 * 30); !gs.MaxStoragePrice.Equals(update) { // convert from SC/TB/month to SC/byte/block + pm.logger.Infow("updating max storage price", "old", gs.MaxStoragePrice, "new", update, "rate", rate) gs.MaxStoragePrice = update - if err := gs.Validate(); err != nil { - pm.logger.Warnw("failed to update gouging setting, new storage price makes the setting invalid", zap.Error(err)) - gs.MaxStoragePrice = bkp - } else { - pm.logger.Infow("updating max storage price", "old", bkp, "new", gs.MaxStoragePrice, "rate", rate) - updated = true - } + updated = true } } @@ -319,15 +301,9 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin if err != nil { pm.logger.Warnw("failed to convert max upload price to currency", zap.Error(err)) } else if !gs.MaxUploadPrice.Equals(update) { - bkp := gs.MaxUploadPrice + pm.logger.Infow("updating max upload price", "old", gs.MaxUploadPrice, "new", update, "rate", rate) gs.MaxUploadPrice = update - if err := gs.Validate(); err != nil { - pm.logger.Warnw("failed to update gouging setting, new upload price makes the setting invalid", zap.Error(err)) - gs.MaxUploadPrice = bkp - } else { - pm.logger.Infow("updating max upload price", "old", bkp, "new", gs.MaxUploadPrice, "rate", rate) - updated = true - } + updated = true } } From d615bdd950bbd331d9d81b06d0309b1354206b88 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 23 Aug 2024 15:21:05 +0200 Subject: [PATCH 02/98] e2e: cancel syncer context --- internal/test/e2e/host.go | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/internal/test/e2e/host.go b/internal/test/e2e/host.go index e6867fa2c..bd10b4af1 100644 --- a/internal/test/e2e/host.go +++ b/internal/test/e2e/host.go @@ -107,9 +107,10 @@ type Host struct { dir string privKey types.PrivateKey - s *syncer.Syncer - cm *chain.Manager - chainDB *coreutils.BoltChainDB + s *syncer.Syncer + syncerCancel context.CancelFunc + cm *chain.Manager + chainDB *coreutils.BoltChainDB store *sqlite.Store wallet *wallet.SingleAddressWallet @@ -157,6 +158,7 @@ func (h *Host) Close() error { h.contracts.Close() h.storage.Close() h.store.Close() + h.syncerCancel() h.s.Close() h.chainDB.Close() return nil @@ -242,7 +244,13 @@ func NewHost(privKey types.PrivateKey, dir string, network *consensus.Network, g NetAddress: l.Addr().String(), }, syncer.WithPeerDiscoveryInterval(100*time.Millisecond), syncer.WithSyncInterval(100*time.Millisecond)) syncErrChan := make(chan error, 1) - go func() { syncErrChan <- s.Run(context.Background()) }() + syncerCtx, syncerCancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + syncerCancel() + } + }() + go func() { syncErrChan <- s.Run(syncerCtx) }() log := zap.NewNop() db, err := sqlite.OpenDatabase(filepath.Join(dir, "hostd.db"), log.Named("sqlite")) @@ -304,9 +312,10 @@ func NewHost(privKey types.PrivateKey, dir string, network *consensus.Network, g dir: dir, privKey: privKey, - s: s, - cm: cm, - chainDB: chainDB, + s: s, + syncerCancel: syncerCancel, + cm: cm, + chainDB: chainDB, store: db, wallet: wallet, From 028a6fbdd69f20ceede7e51e1cebc70bfae1c875 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 26 Aug 2024 10:36:54 +0200 Subject: [PATCH 03/98] Fix node setup (#1477) Refactoring the node setup it looks like I made `autopilot.Run` blocking. This prevents S3 setup from successfully completing. Fixes #1473 --- cmd/renterd/node.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index b4c330fc4..89dd75ab0 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -232,7 +232,7 @@ func newNode(cfg config.Config, network *consensus.Network, genesis types.Block) } setupFns = append(setupFns, fn{ name: "Autopilot", - fn: func(_ context.Context) error { ap.Run(); return nil }, + fn: func(_ context.Context) error { go ap.Run(); return nil }, }) shutdownFns = append(shutdownFns, fn{ name: "Autopilot", @@ -252,6 +252,7 @@ func newNode(cfg config.Config, network *consensus.Network, genesis types.Block) setupFns: setupFns, shutdownFns: shutdownFns, + bus: bc, cfg: cfg, logger: logger.Sugar(), From faca1493fc6fc27c25e3cd0973f0feb4f6198ee1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 26 Aug 2024 13:41:26 +0200 Subject: [PATCH 04/98] bus/worker/main: update/remove deprecated code --- api/wallet.go | 22 +-- bus/bus.go | 15 +- bus/client/wallet.go | 65 +------- bus/routes.go | 236 ++---------------------------- cmd/renterd/config.go | 55 ------- cmd/renterd/logger.go | 4 - config/config.go | 14 +- internal/test/e2e/cluster_test.go | 3 - worker/worker.go | 8 - 9 files changed, 27 insertions(+), 395 deletions(-) diff --git a/api/wallet.go b/api/wallet.go index d2ddbc857..83ee5e0e1 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -7,6 +7,7 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" ) type ( @@ -77,12 +78,9 @@ type ( // WalletResponse is the response type for the /wallet endpoint. WalletResponse struct { - ScanHeight uint64 `json:"scanHeight"` - Address types.Address `json:"address"` - Spendable types.Currency `json:"spendable"` - Confirmed types.Currency `json:"confirmed"` - Unconfirmed types.Currency `json:"unconfirmed"` - Immature types.Currency `json:"immature"` + wallet.Balance + + Address types.Address `json:"address"` } WalletSendRequest struct { @@ -103,18 +101,6 @@ type ( // WalletTransactionsOption is an option for the WalletTransactions method. type WalletTransactionsOption func(url.Values) -func WalletTransactionsWithBefore(before time.Time) WalletTransactionsOption { - return func(q url.Values) { - q.Set("before", before.Format(time.RFC3339)) - } -} - -func WalletTransactionsWithSince(since time.Time) WalletTransactionsOption { - return func(q url.Values) { - q.Set("since", since.Format(time.RFC3339)) - } -} - func WalletTransactionsWithLimit(limit int) WalletTransactionsOption { return func(q url.Values) { q.Set("limit", fmt.Sprint(limit)) diff --git a/bus/bus.go b/bus/bus.go index c5ae1113e..1e6197fa0 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -513,16 +513,11 @@ func (b *Bus) Handler() http.Handler { "DELETE /upload/:id": b.uploadFinishedHandlerDELETE, "POST /upload/:id/sector": b.uploadAddSectorHandlerPOST, - "GET /wallet": b.walletHandler, - "POST /wallet/discard": b.walletDiscardHandler, - "POST /wallet/fund": b.walletFundHandler, - "GET /wallet/outputs": b.walletOutputsHandler, - "GET /wallet/pending": b.walletPendingHandler, - "POST /wallet/prepare/renew": b.walletPrepareRenewHandler, - "POST /wallet/redistribute": b.walletRedistributeHandler, - "POST /wallet/send": b.walletSendSiacoinsHandler, - "POST /wallet/sign": b.walletSignHandler, - "GET /wallet/transactions": b.walletTransactionsHandler, + "GET /wallet": b.walletHandler, + "GET /wallet/events": b.walletEventsHandler, + "GET /wallet/pending": b.walletPendingHandler, + "POST /wallet/redistribute": b.walletRedistributeHandler, + "POST /wallet/send": b.walletSendSiacoinsHandler, "GET /webhooks": b.webhookHandlerGet, "POST /webhooks": b.webhookHandlerPost, diff --git a/bus/client/wallet.go b/bus/client/wallet.go index 0fcc8d0b5..f6d958dce 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -6,7 +6,6 @@ import ( "net/http" "net/url" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" ) @@ -28,34 +27,6 @@ func (c *Client) Wallet(ctx context.Context) (resp api.WalletResponse, err error return } -// WalletDiscard discards the provided txn, make its inputs usable again. This -// should only be called on transactions that will never be broadcast. -func (c *Client) WalletDiscard(ctx context.Context, txn types.Transaction) error { - return c.c.WithContext(ctx).POST("/wallet/discard", txn, nil) -} - -// WalletFund funds txn using inputs controlled by the wallet. -func (c *Client) WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTransactions bool) ([]types.Hash256, []types.Transaction, error) { - req := api.WalletFundRequest{ - Transaction: *txn, - Amount: amount, - UseUnconfirmedTxns: useUnconfirmedTransactions, - } - var resp api.WalletFundResponse - err := c.c.WithContext(ctx).POST("/wallet/fund", req, &resp) - if err != nil { - return nil, nil, err - } - *txn = resp.Transaction - return resp.ToSign, resp.DependsOn, nil -} - -// WalletOutputs returns the set of unspent outputs controlled by the wallet. -func (c *Client) WalletOutputs(ctx context.Context) (resp []api.SiacoinElement, err error) { - err = c.c.WithContext(ctx).GET("/wallet/outputs", &resp) - return -} - // WalletPending returns the txpool transactions that are relevant to the // wallet. func (c *Client) WalletPending(ctx context.Context) (resp []types.Transaction, err error) { @@ -63,26 +34,6 @@ func (c *Client) WalletPending(ctx context.Context) (resp []types.Transaction, e return } -// WalletPrepareRenew funds and signs a contract renewal transaction. -func (c *Client) WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) { - req := api.WalletPrepareRenewRequest{ - Revision: revision, - EndHeight: endHeight, - ExpectedNewStorage: expectedStorage, - HostAddress: hostAddress, - PriceTable: pt, - MaxFundAmount: maxFundAmount, - MinNewCollateral: minNewCollateral, - RenterAddress: renterAddress, - RenterFunds: renterFunds, - RenterKey: renterKey, - WindowSize: windowSize, - } - var resp api.WalletPrepareRenewResponse - err := c.c.WithContext(ctx).POST("/wallet/prepare/renew", req, &resp) - return resp, err -} - // WalletRedistribute broadcasts a transaction that redistributes the money in // the wallet in the desired number of outputs of given amount. If the // transaction was successfully broadcasted it will return the transaction ID. @@ -96,19 +47,9 @@ func (c *Client) WalletRedistribute(ctx context.Context, outputs int, amount typ return } -// WalletSign signs txn using the wallet's private key. -func (c *Client) WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error { - req := api.WalletSignRequest{ - Transaction: *txn, - ToSign: toSign, - CoveredFields: cf, - } - return c.c.WithContext(ctx).POST("/wallet/sign", req, txn) -} - -// WalletTransactions returns all transactions relevant to the wallet. -func (c *Client) WalletTransactions(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []api.Transaction, err error) { - c.c.Custom("GET", "/wallet/transactions", nil, &resp) +// WalletEvents returns all events relevant to the wallet. +func (c *Client) WalletEvents(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []api.Transaction, err error) { + c.c.Custom("GET", "/wallet/events", nil, &resp) values := url.Values{} for _, opt := range opts { diff --git a/bus/routes.go b/bus/routes.go index f020c5944..a0df4cfad 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -21,7 +21,6 @@ import ( "go.sia.tech/core/gateway" "go.sia.tech/core/types" - "go.sia.tech/coreutils/wallet" "go.sia.tech/gofakes3" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" @@ -189,22 +188,13 @@ func (b *Bus) walletHandler(jc jape.Context) { return } - tip, err := b.w.Tip() - if jc.Check("couldn't fetch wallet scan height", err) != nil { - return - } - jc.Encode(api.WalletResponse{ - ScanHeight: tip.Height, - Address: address, - Confirmed: balance.Confirmed, - Spendable: balance.Spendable, - Unconfirmed: balance.Unconfirmed, - Immature: balance.Immature, + Balance: balance, + Address: address, }) } -func (b *Bus) walletTransactionsHandler(jc jape.Context) { +func (b *Bus) walletEventsHandler(jc jape.Context) { offset := 0 limit := -1 if jc.DecodeForm("offset", &offset) != nil || @@ -212,126 +202,11 @@ func (b *Bus) walletTransactionsHandler(jc jape.Context) { return } - // TODO: deprecate these parameters when moving to v2.0.0 - var before, since time.Time - if jc.DecodeForm("before", (*api.TimeRFC3339)(&before)) != nil || - jc.DecodeForm("since", (*api.TimeRFC3339)(&since)) != nil { - return - } - - // convertToTransaction converts wallet event data to a Transaction. - convertToTransaction := func(kind string, data wallet.EventData) (txn types.Transaction, ok bool) { - ok = true - switch kind { - case wallet.EventTypeMinerPayout, - wallet.EventTypeFoundationSubsidy, - wallet.EventTypeSiafundClaim: - payout, _ := data.(wallet.EventPayout) - txn = types.Transaction{SiacoinOutputs: []types.SiacoinOutput{payout.SiacoinElement.SiacoinOutput}} - case wallet.EventTypeV1Transaction: - v1Txn, _ := data.(wallet.EventV1Transaction) - txn = types.Transaction(v1Txn.Transaction) - case wallet.EventTypeV1ContractResolution: - fce, _ := data.(wallet.EventV1ContractResolution) - txn = types.Transaction{ - FileContracts: []types.FileContract{fce.Parent.FileContract}, - SiacoinOutputs: []types.SiacoinOutput{fce.SiacoinElement.SiacoinOutput}, - } - default: - ok = false - } - return - } - - // convertToTransactions converts wallet events to API transactions. - convertToTransactions := func(events []wallet.Event) []api.Transaction { - var transactions []api.Transaction - for _, e := range events { - if txn, ok := convertToTransaction(e.Type, e.Data); ok { - transactions = append(transactions, api.Transaction{ - Raw: txn, - Index: e.Index, - ID: types.TransactionID(e.ID), - Inflow: e.SiacoinInflow(), - Outflow: e.SiacoinOutflow(), - Timestamp: e.Timestamp, - }) - } - } - return transactions - } - - if before.IsZero() && since.IsZero() { - events, err := b.w.Events(offset, limit) - if jc.Check("couldn't load transactions", err) == nil { - jc.Encode(convertToTransactions(events)) - } - return - } - - // TODO: remove this when 'before' and 'since' are deprecated, until then we - // fetch all transactions and paginate manually if either is specified - events, err := b.w.Events(0, -1) - if jc.Check("couldn't load transactions", err) != nil { - return - } - filtered := events[:0] - for _, txn := range events { - if (before.IsZero() || txn.Timestamp.Before(before)) && - (since.IsZero() || txn.Timestamp.After(since)) { - filtered = append(filtered, txn) - } - } - events = filtered - if limit == 0 || limit == -1 { - jc.Encode(convertToTransactions(events[offset:])) - } else { - jc.Encode(convertToTransactions(events[offset : offset+limit])) - } -} - -func (b *Bus) walletOutputsHandler(jc jape.Context) { - utxos, err := b.w.SpendableOutputs() - if jc.Check("couldn't load outputs", err) == nil { - // convert to siacoin elements - elements := make([]api.SiacoinElement, len(utxos)) - for i, sce := range utxos { - elements[i] = api.SiacoinElement{ - ID: sce.StateElement.ID, - SiacoinOutput: types.SiacoinOutput{ - Value: sce.SiacoinOutput.Value, - Address: sce.SiacoinOutput.Address, - }, - MaturityHeight: sce.MaturityHeight, - } - } - jc.Encode(elements) - } -} - -func (b *Bus) walletFundHandler(jc jape.Context) { - var wfr api.WalletFundRequest - if jc.Decode(&wfr) != nil { - return - } - txn := wfr.Transaction - - if len(txn.MinerFees) == 0 { - // if no fees are specified, we add some - fee := b.cm.RecommendedFee().Mul64(b.cm.TipState().TransactionWeight(txn)) - txn.MinerFees = []types.Currency{fee} - } - - toSign, err := b.w.FundTransaction(&txn, wfr.Amount.Add(txn.MinerFees[0]), wfr.UseUnconfirmedTxns) - if jc.Check("couldn't fund transaction", err) != nil { + events, err := b.w.Events(offset, limit) + if jc.Check("couldn't load events", err) != nil { return } - - jc.Encode(api.WalletFundResponse{ - Transaction: txn, - ToSign: toSign, - DependsOn: b.cm.UnconfirmedParents(txn), - }) + jc.Encode(events) } func (b *Bus) walletSendSiacoinsHandler(jc jape.Context) { @@ -405,15 +280,6 @@ func (b *Bus) walletSendSiacoinsHandler(jc jape.Context) { } } -func (b *Bus) walletSignHandler(jc jape.Context) { - var wsr api.WalletSignRequest - if jc.Decode(&wsr) != nil { - return - } - b.w.SignTransaction(&wsr.Transaction, wsr.ToSign, wsr.CoveredFields) - jc.Encode(wsr.Transaction) -} - func (b *Bus) walletRedistributeHandler(jc jape.Context) { var wfr api.WalletRedistributeRequest if jc.Decode(&wfr) != nil { @@ -474,94 +340,12 @@ func (b *Bus) walletRedistributeHandler(jc jape.Context) { jc.Encode(ids) } -func (b *Bus) walletDiscardHandler(jc jape.Context) { - var txn types.Transaction - if jc.Decode(&txn) == nil { - b.w.ReleaseInputs([]types.Transaction{txn}, nil) - } -} - -func (b *Bus) walletPrepareRenewHandler(jc jape.Context) { - var wprr api.WalletPrepareRenewRequest - if jc.Decode(&wprr) != nil { - return - } - if wprr.RenterKey == nil { - jc.Error(errors.New("no renter key provided"), http.StatusBadRequest) - return - } - cs := b.cm.TipState() - - // Create the final revision from the provided revision. - finalRevision := wprr.Revision - finalRevision.MissedProofOutputs = finalRevision.ValidProofOutputs - finalRevision.Filesize = 0 - finalRevision.FileMerkleRoot = types.Hash256{} - finalRevision.RevisionNumber = math.MaxUint64 - - // Prepare the new contract. - fc, basePrice, err := rhpv3.PrepareContractRenewal(wprr.Revision, wprr.HostAddress, wprr.RenterAddress, wprr.RenterFunds, wprr.MinNewCollateral, wprr.PriceTable, wprr.ExpectedNewStorage, wprr.EndHeight) - if jc.Check("couldn't prepare contract renewal", err) != nil { - return - } - - // Create the transaction containing both the final revision and new - // contract. - txn := types.Transaction{ - FileContracts: []types.FileContract{fc}, - FileContractRevisions: []types.FileContractRevision{finalRevision}, - MinerFees: []types.Currency{wprr.PriceTable.TxnFeeMaxRecommended.Mul64(4096)}, - } - - // Compute how much renter funds to put into the new contract. - cost := rhpv3.ContractRenewalCost(cs, wprr.PriceTable, fc, txn.MinerFees[0], basePrice) - - // Make sure we don't exceed the max fund amount. - // TODO: remove the IsZero check for the v2 change - if /*!wprr.MaxFundAmount.IsZero() &&*/ wprr.MaxFundAmount.Cmp(cost) < 0 { - jc.Error(fmt.Errorf("%w: %v > %v", api.ErrMaxFundAmountExceeded, cost, wprr.MaxFundAmount), http.StatusBadRequest) - return - } - - // Fund the txn. We are not signing it yet since it's not complete. The host - // still needs to complete it and the revision + contract are signed with - // the renter key by the worker. - toSign, err := b.w.FundTransaction(&txn, cost, true) - if jc.Check("couldn't fund transaction", err) != nil { - return - } - - jc.Encode(api.WalletPrepareRenewResponse{ - FundAmount: cost, - ToSign: toSign, - TransactionSet: append(b.cm.UnconfirmedParents(txn), txn), - }) -} - func (b *Bus) walletPendingHandler(jc jape.Context) { - isRelevant := func(txn types.Transaction) bool { - addr := b.w.Address() - for _, sci := range txn.SiacoinInputs { - if sci.UnlockConditions.UnlockHash() == addr { - return true - } - } - for _, sco := range txn.SiacoinOutputs { - if sco.Address == addr { - return true - } - } - return false - } - - txns := b.cm.PoolTransactions() - relevant := txns[:0] - for _, txn := range txns { - if isRelevant(txn) { - relevant = append(relevant, txn) - } + events, err := b.w.UnconfirmedEvents() + if jc.Check("couldn't fetch unconfirmed events", err) != nil { + return } - jc.Encode(relevant) + jc.Encode(events) } func (b *Bus) hostsHandlerGETDeprecated(jc jape.Context) { diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 38231458d..ca9b80ea2 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -2,7 +2,6 @@ package main import ( "bufio" - "encoding/hex" "errors" "flag" "fmt" @@ -20,10 +19,8 @@ import ( "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/api" "go.sia.tech/renterd/config" - "go.sia.tech/renterd/worker/s3" "golang.org/x/term" "gopkg.in/yaml.v3" - "lukechampine.com/frand" ) // TODO: handle RENTERD_S3_HOST_BUCKET_BASES correctly @@ -69,7 +66,6 @@ func defaultConfig() config.Config { }, }, Log: config.Log{ - Path: "", // deprecated. included for compatibility. Level: "", File: config.LogFile{ Enabled: true, @@ -127,7 +123,6 @@ func defaultConfig() config.Config { Address: "localhost:8080", Enabled: true, DisableAuth: false, - KeypairsV4: nil, }, } } @@ -205,17 +200,6 @@ func sanitizeConfig(cfg *config.Config) error { } } - // parse S3 auth keys - if cfg.S3.Enabled { - if !cfg.S3.DisableAuth && keyPairsV4 != "" { - var err error - cfg.S3.KeypairsV4, err = s3.Parsev4AuthKeys(strings.Split(keyPairsV4, ";")) - if err != nil { - return fmt.Errorf("failed to parse keypairs: %v", err) - } - } - } - // default log levels if cfg.Log.Level == "" { cfg.Log.Level = "info" @@ -253,12 +237,6 @@ func parseYamlConfig(cfg *config.Config) { } func parseCLIFlags(cfg *config.Config) { - // deprecated - these go first so that they can be overwritten by the non-deprecated flags - flag.StringVar(&cfg.Log.Database.Level, "db.logger.logLevel", cfg.Log.Database.Level, "(deprecated) Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") - flag.BoolVar(&cfg.Database.Log.IgnoreRecordNotFoundError, "db.logger.ignoreNotFoundError", cfg.Database.Log.IgnoreRecordNotFoundError, "(deprecated) Ignores 'not found' errors in logger (overrides with RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR)") - flag.DurationVar(&cfg.Database.Log.SlowThreshold, "db.logger.slowThreshold", cfg.Database.Log.SlowThreshold, "(deprecated) Threshold for slow queries in logger (overrides with RENTERD_DB_LOGGER_SLOW_THRESHOLD)") - flag.StringVar(&cfg.Log.Path, "log-path", cfg.Log.Path, "(deprecated) Path to directory for logs (overrides with RENTERD_LOG_PATH)") - // node flag.StringVar(&cfg.HTTP.Address, "http", cfg.HTTP.Address, "Address for serving the API") flag.StringVar(&cfg.Directory, "dir", cfg.Directory, "Directory for storing node state") @@ -289,7 +267,6 @@ func parseCLIFlags(cfg *config.Config) { flag.Uint64Var(&cfg.Bus.AnnouncementMaxAgeHours, "bus.announcementMaxAgeHours", cfg.Bus.AnnouncementMaxAgeHours, "Max age for announcements") flag.BoolVar(&cfg.Bus.Bootstrap, "bus.bootstrap", cfg.Bus.Bootstrap, "Bootstraps gateway and consensus modules") flag.StringVar(&cfg.Bus.GatewayAddr, "bus.gatewayAddr", cfg.Bus.GatewayAddr, "Address for Sia peer connections (overrides with RENTERD_BUS_GATEWAY_ADDR)") - flag.DurationVar(&cfg.Bus.PersistInterval, "bus.persistInterval", cfg.Bus.PersistInterval, "(deprecated) Interval for persisting consensus updates") flag.DurationVar(&cfg.Bus.UsedUTXOExpiry, "bus.usedUTXOExpiry", cfg.Bus.UsedUTXOExpiry, "Expiry for used UTXOs in transactions") flag.Int64Var(&cfg.Bus.SlabBufferCompletionThreshold, "bus.slabBufferCompletionThreshold", cfg.Bus.SlabBufferCompletionThreshold, "Threshold for slab buffer upload (overrides with RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD)") @@ -360,9 +337,7 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_DB_NAME", &cfg.Database.MySQL.Database) parseEnvVar("RENTERD_DB_METRICS_NAME", &cfg.Database.MySQL.MetricsDatabase) - parseEnvVar("RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR", &cfg.Database.Log.IgnoreRecordNotFoundError) parseEnvVar("RENTERD_DB_LOGGER_LOG_LEVEL", &cfg.Log.Level) - parseEnvVar("RENTERD_DB_LOGGER_SLOW_THRESHOLD", &cfg.Database.Log.SlowThreshold) parseEnvVar("RENTERD_WORKER_ENABLED", &cfg.Worker.Enabled) parseEnvVar("RENTERD_WORKER_ID", &cfg.Worker.ID) @@ -381,7 +356,6 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_S3_HOST_BUCKET_ENABLED", &cfg.S3.HostBucketEnabled) parseEnvVar("RENTERD_S3_HOST_BUCKET_BASES", &cfg.S3.HostBucketBases) - parseEnvVar("RENTERD_LOG_PATH", &cfg.Log.Path) parseEnvVar("RENTERD_LOG_LEVEL", &cfg.Log.Level) parseEnvVar("RENTERD_LOG_FILE_ENABLED", &cfg.Log.File.Enabled) parseEnvVar("RENTERD_LOG_FILE_FORMAT", &cfg.Log.File.Format) @@ -675,33 +649,6 @@ func setS3Config(cfg *config.Config) { fmt.Println("It should not be exposed to the public internet without setting up a reverse proxy.") setListenAddress("S3 Address", &cfg.S3.Address, true) - // s3 access key - if len(cfg.S3.KeypairsV4) != 0 { - fmt.Println("") - fmt.Println("A S3 keypair has already been created.") - fmt.Println("If you change your S3 key pair, you will need to update any scripts or applications that use the S3 API.") - if !promptYesNo("Would you like to change your S3 key pair?") { - return - } - } - - cfg.S3.KeypairsV4 = make(map[string]string) - - fmt.Println("") - answer := promptQuestion("Would you like to automatically generate a new S3 key pair or set your own?", []string{"auto", "manual"}) - if strings.EqualFold(answer, "auto") { - // generate a new key pair - accessKey := hex.EncodeToString(frand.Bytes(20)) - secretKey := hex.EncodeToString(frand.Bytes(20)) - cfg.S3.KeypairsV4[accessKey] = secretKey - fmt.Println("") - fmt.Println("A new S3 key pair has been generated below.") - fmt.Println(wrapANSI("\033[34;1m", "Access Key:", "\033[0m"), accessKey) - fmt.Println(wrapANSI("\033[34;1m", "Secret Key:", "\033[0m"), secretKey) - fmt.Println("") - return - } - var accessKey, secretKey string for { fmt.Println("") @@ -722,6 +669,4 @@ func setS3Config(cfg *config.Config) { } fmt.Println(wrapANSI("\033[31m", "Secret key must be be 40 characters!", "\033[0m")) } - - cfg.S3.KeypairsV4[accessKey] = secretKey } diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go index d107cc4a0..d53bdd709 100644 --- a/cmd/renterd/logger.go +++ b/cmd/renterd/logger.go @@ -14,10 +14,6 @@ import ( func NewLogger(dir, filename string, cfg config.Log) (*zap.Logger, func(context.Context) error, error) { // path path := filepath.Join(dir, filename) - if cfg.Path != "" { - path = filepath.Join(cfg.Path, filename) - } - if cfg.File.Path != "" { path = cfg.File.Path } diff --git a/config/config.go b/config/config.go index 99382240b..8aaf48e7e 100644 --- a/config/config.go +++ b/config/config.go @@ -41,7 +41,6 @@ type ( } Database struct { - Log DatabaseLog `yaml:"log,omitempty"` // deprecated. included for compatibility. // optional fields depending on backend MySQL MySQL `yaml:"mysql,omitempty"` } @@ -55,7 +54,6 @@ type ( RemotePassword string `yaml:"remotePassword,omitempty"` UsedUTXOExpiry time.Duration `yaml:"usedUtxoExpiry,omitempty"` SlabBufferCompletionThreshold int64 `yaml:"slabBufferCompleionThreshold,omitempty"` - PersistInterval time.Duration `yaml:"persistInterval,omitempty"` // deprecated } // LogFile configures the file output of the logger. @@ -76,7 +74,6 @@ type ( } Log struct { - Path string `yaml:"path,omitempty"` // deprecated. included for compatibility. Level string `yaml:"level,omitempty"` // global log level StdOut StdOut `yaml:"stdout,omitempty"` File LogFile `yaml:"file,omitempty"` @@ -104,12 +101,11 @@ type ( } S3 struct { - Address string `yaml:"address,omitempty"` - DisableAuth bool `yaml:"disableAuth,omitempty"` - Enabled bool `yaml:"enabled,omitempty"` - KeypairsV4 map[string]string `yaml:"keypairsV4,omitempty"` // deprecated. included for compatibility. - HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` - HostBucketBases []string `yaml:"hostBucketBases,omitempty"` + Address string `yaml:"address,omitempty"` + DisableAuth bool `yaml:"disableAuth,omitempty"` + Enabled bool `yaml:"enabled,omitempty"` + HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` + HostBucketBases []string `yaml:"hostBucketBases,omitempty"` } // Worker contains the configuration for a worker. diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 255999911..977a6f3e1 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1897,9 +1897,6 @@ func TestWallet(t *testing.T) { // Check wallet info is sane after startup. wallet, err := b.Wallet(context.Background()) tt.OK(err) - if wallet.ScanHeight == 0 { - t.Fatal("wallet scan height should not be 0") - } if wallet.Confirmed.IsZero() { t.Fatal("wallet confirmed balance should not be zero") } diff --git a/worker/worker.go b/worker/worker.go index 111300ff2..4ea269d94 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -86,7 +86,6 @@ type ( WebhookStore Syncer - Wallet } // An AccountStore manages ephemaral accounts state. @@ -151,13 +150,6 @@ type ( SyncerPeers(ctx context.Context) (resp []string, err error) } - Wallet interface { - WalletDiscard(ctx context.Context, txn types.Transaction) error - WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) - WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) - WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error - } - WebhookStore interface { RegisterWebhook(ctx context.Context, webhook webhooks.Webhook) error UnregisterWebhook(ctx context.Context, webhook webhooks.Webhook) error From f1542e1b560274543a29a94f1865a138c9cc2339 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 26 Aug 2024 13:54:10 +0200 Subject: [PATCH 05/98] bus: update CreateMultipartUploadRequest --- api/multipart.go | 21 ++++++++------------- bus/client/multipart-upload.go | 11 +++++------ bus/routes.go | 6 ++---- worker/s3/backend.go | 7 +++---- 4 files changed, 18 insertions(+), 27 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index ecd19789f..04417963f 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -51,10 +51,9 @@ type ( } CreateMultipartOptions struct { - GenerateKey bool - Key *object.EncryptionKey - MimeType string - Metadata ObjectUserMetadata + DisableClientSideEncryption bool + MimeType string + Metadata ObjectUserMetadata } CompleteMultipartOptions struct { @@ -92,15 +91,11 @@ type ( } MultipartCreateRequest struct { - Bucket string `json:"bucket"` - Path string `json:"path"` - Key *object.EncryptionKey `json:"key"` - MimeType string `json:"mimeType"` - Metadata ObjectUserMetadata `json:"metadata"` - - // TODO: The next major version change should invert this to create a - // key by default - GenerateKey bool `json:"generateKey"` + Bucket string `json:"bucket"` + Path string `json:"path"` + MimeType string `json:"mimeType"` + Metadata ObjectUserMetadata `json:"metadata"` + DisableClientSideEncryption bool `json:"disableClientSideEncryption"` } MultipartCreateResponse struct { diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index 6fd06204c..4358d4693 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -47,12 +47,11 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uplo // CreateMultipartUpload creates a new multipart upload. func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/create", api.MultipartCreateRequest{ - Bucket: bucket, - GenerateKey: opts.GenerateKey, - Path: path, - Key: opts.Key, - MimeType: opts.MimeType, - Metadata: opts.Metadata, + Bucket: bucket, + DisableClientSideEncryption: opts.DisableClientSideEncryption, + Path: path, + MimeType: opts.MimeType, + Metadata: opts.Metadata, }, &resp) return } diff --git a/bus/routes.go b/bus/routes.go index a0df4cfad..08ee15494 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1960,12 +1960,10 @@ func (b *Bus) multipartHandlerCreatePOST(jc jape.Context) { } var key object.EncryptionKey - if req.GenerateKey { - key = object.GenerateEncryptionKey() - } else if req.Key == nil { + if req.DisableClientSideEncryption { key = object.NoOpKey } else { - key = *req.Key + key = object.GenerateEncryptionKey() } resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, key, req.MimeType, req.Metadata) diff --git a/worker/s3/backend.go b/worker/s3/backend.go index a8dd1cb22..528f9e7ca 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -12,7 +12,6 @@ import ( "go.sia.tech/gofakes3" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" - "go.sia.tech/renterd/object" "go.uber.org/zap" ) @@ -433,9 +432,9 @@ func (s *s3) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKe func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta map[string]string) (gofakes3.UploadID, error) { convertToSiaMetadataHeaders(meta) resp, err := s.b.CreateMultipartUpload(ctx, bucket, "/"+key, api.CreateMultipartOptions{ - Key: &object.NoOpKey, - MimeType: meta["Content-Type"], - Metadata: api.ExtractObjectUserMetadataFrom(meta), + DisableClientSideEncryption: true, + MimeType: meta["Content-Type"], + Metadata: api.ExtractObjectUserMetadataFrom(meta), }) if err != nil { return "", gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) From 3b7130ac28b14f6b439b29ded01d6456c976a883 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 26 Aug 2024 15:07:02 +0200 Subject: [PATCH 06/98] sql: only raise transaction log level to warn after 1s --- internal/sql/sql.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/sql/sql.go b/internal/sql/sql.go index b677e97fd..2b5920f9d 100644 --- a/internal/sql/sql.go +++ b/internal/sql/sql.go @@ -12,6 +12,7 @@ import ( "time" "go.uber.org/zap" + "go.uber.org/zap/zapcore" "lukechampine.com/frand" ) @@ -172,7 +173,11 @@ LOOP: if sleep > maxBackoff { sleep = maxBackoff } - log.Warn("database locked", zap.Duration("elapsed", time.Since(attemptStart)), zap.Duration("totalElapsed", time.Since(start)), zap.Stack("stack"), zap.Duration("retry", sleep)) + lvl := zapcore.DebugLevel + if time.Since(start) > time.Second { + lvl = zapcore.WarnLevel + } + log.Log(lvl, "database locked", zap.Duration("elapsed", time.Since(attemptStart)), zap.Duration("totalElapsed", time.Since(start)), zap.Stack("stack"), zap.Duration("retry", sleep)) select { case <-ctx.Done(): From ae09b5e696df2f38f86432b823a047674f6331d3 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Aug 2024 11:27:34 +0200 Subject: [PATCH 07/98] autopilot: remove compatV105Host check --- autopilot/autopilot.go | 51 ------------------------------------------ 1 file changed, 51 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 9ea235a11..149ac710f 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -722,16 +722,6 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { return } - state, err := ap.buildState(jc.Request.Context()) - if jc.Check("failed to build state", err) != nil { - return - } - - // TODO: remove on next major release - if jc.Check("failed to get host", compatV105Host(jc.Request.Context(), state.ContractsConfig(), ap.bus, hk)) != nil { - return - } - hi, err := ap.bus.Host(jc.Request.Context(), hk) if jc.Check("failed to get host info", err) != nil { return @@ -910,47 +900,6 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta }, nil } -// compatV105Host performs some state checks and bus calls we no longer need but -// are necessary checks to make sure our API is consistent. This should be -// removed in the next major release. -func compatV105Host(ctx context.Context, cfg api.ContractsConfig, b Bus, hk types.PublicKey) error { - // state checks - if cfg.Allowance.IsZero() { - return fmt.Errorf("can not score hosts because contracts allowance is zero") - } - if cfg.Amount == 0 { - return fmt.Errorf("can not score hosts because contracts amount is zero") - } - if cfg.Period == 0 { - return fmt.Errorf("can not score hosts because contract period is zero") - } - - // fetch host - _, err := b.Host(ctx, hk) - if err != nil { - return fmt.Errorf("failed to fetch requested host from bus: %w", err) - } - - // other checks - _, err = b.GougingSettings(ctx) - if err != nil { - return fmt.Errorf("failed to fetch gouging settings from bus: %w", err) - } - _, err = b.RedundancySettings(ctx) - if err != nil { - return fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) - } - _, err = b.ConsensusState(ctx) - if err != nil { - return fmt.Errorf("failed to fetch consensus state from bus: %w", err) - } - _, err = b.RecommendedFee(ctx) - if err != nil { - return fmt.Errorf("failed to fetch recommended fee from bus: %w", err) - } - return nil -} - func compatV105UsabilityFilterModeCheck(usabilityMode string) error { switch usabilityMode { case api.UsabilityFilterModeUsable: From c9fccdf35948c68a2a3dce8d47b47dd9c5655b3f Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Aug 2024 11:30:16 +0200 Subject: [PATCH 08/98] autopilot: remove usability filter mode check --- autopilot/autopilot.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 149ac710f..3661308f4 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -755,11 +755,6 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { return } - // TODO: remove on next major release - if jc.Check("failed to get host info", compatV105UsabilityFilterModeCheck(req.UsabilityMode)) != nil { - return - } - hosts, err := ap.bus.SearchHosts(jc.Request.Context(), api.SearchHostOptions{ AutopilotID: ap.id, Offset: req.Offset, @@ -900,18 +895,6 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta }, nil } -func compatV105UsabilityFilterModeCheck(usabilityMode string) error { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - case api.UsabilityFilterModeUnusable: - case api.UsabilityFilterModeAll: - case "": - default: - return fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) - } - return nil -} - func computeNextPeriod(bh, currentPeriod, period uint64) uint64 { prevPeriod := currentPeriod nextPeriod := prevPeriod From 5966dc3aecc9ab6f4d41694ae7508fc3abc030cb Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Aug 2024 11:36:06 +0200 Subject: [PATCH 09/98] bus: validate SearchHostsRequest --- bus/routes.go | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/bus/routes.go b/bus/routes.go index f020c5944..14bae2cd6 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -585,9 +585,41 @@ func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { return } - // TODO: on the next major release: - // - properly default search params (currently no defaults are set) - // - properly validate and return 400 (currently validation is done in autopilot and the store) + // validate the usability mode + switch req.UsabilityMode { + case api.UsabilityFilterModeUsable: + case api.UsabilityFilterModeUnusable: + case api.UsabilityFilterModeAll: + case "": + req.UsabilityMode = api.UsabilityFilterModeAll + default: + jc.Error(fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", req.UsabilityMode), http.StatusBadRequest) + return + } + + // validate the filter mode + switch req.FilterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + req.FilterMode = api.HostFilterModeAll + case "": + default: + jc.Error(fmt.Errorf("invalid filter mode: '%v', options are 'allowed', 'blocked' or an empty string for no filter", req.FilterMode), http.StatusBadRequest) + return + } + + // validate the offset and limit + if req.Offset < 0 { + jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) + return + } + if req.Limit < 0 { + jc.Error(errors.New("limit must be non-negative"), http.StatusBadRequest) + return + } else if req.Limit == 0 { + req.Limit = -1 + } hosts, err := b.hs.SearchHosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { From 75dad20e458ba02bc38613b5d1d088a8c3fadefc Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Aug 2024 11:39:22 +0200 Subject: [PATCH 10/98] all: use SearchHosts defaults --- autopilot/autopilot.go | 2 +- autopilot/contractor/contractor.go | 14 +++----------- bus/bus.go | 1 - bus/routes.go | 17 +---------------- 4 files changed, 5 insertions(+), 29 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 3661308f4..58bf76bb1 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -194,7 +194,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // fetch hosts - hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) + hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{}) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 4e1b87d3b..37223864b 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -1129,11 +1129,7 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, for _, c := range contracts { usedHosts[c.HostKey] = struct{}{} } - allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{ - Limit: -1, - FilterMode: api.HostFilterModeAllowed, - UsabilityMode: api.UsabilityFilterModeAll, - }) + allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{}) if err != nil { return nil, fmt.Errorf("failed to fetch usable hosts: %w", err) } @@ -1232,7 +1228,7 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, func performHostChecks(ctx *mCtx, bus Bus, logger *zap.SugaredLogger) error { var usabilityBreakdown unusableHostsBreakdown // fetch all hosts that are not blocked - hosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) + hosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{}) if err != nil { return fmt.Errorf("failed to fetch all hosts: %w", err) } @@ -1285,11 +1281,7 @@ func performPostMaintenanceTasks(ctx *mCtx, bus Bus, w Worker, alerter alerts.Al if err != nil { return fmt.Errorf("failed to fetch contracts: %w", err) } - allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{ - Limit: -1, - FilterMode: api.HostFilterModeAllowed, - UsabilityMode: api.UsabilityFilterModeAll, - }) + allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{}) if err != nil { return fmt.Errorf("failed to fetch all hosts: %w", err) } diff --git a/bus/bus.go b/bus/bus.go index c5ae1113e..423329e94 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -442,7 +442,6 @@ func (b *Bus) Handler() http.Handler { "GET /contract/:id/roots": b.contractIDRootsHandlerGET, "GET /contract/:id/size": b.contractSizeHandlerGET, - "GET /hosts": b.hostsHandlerGETDeprecated, "GET /hosts/allowlist": b.hostsAllowlistHandlerGET, "PUT /hosts/allowlist": b.hostsAllowlistHandlerPUT, "GET /hosts/blocklist": b.hostsBlocklistHandlerGET, diff --git a/bus/routes.go b/bus/routes.go index 14bae2cd6..5df645a44 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -564,21 +564,6 @@ func (b *Bus) walletPendingHandler(jc jape.Context) { jc.Encode(relevant) } -func (b *Bus) hostsHandlerGETDeprecated(jc jape.Context) { - offset := 0 - limit := -1 - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { - return - } - - // fetch hosts - hosts, err := b.hs.SearchHosts(jc.Request.Context(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, offset, limit) - if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { - return - } - jc.Encode(hosts) -} - func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { var req api.SearchHostsRequest if jc.Decode(&req) != nil { @@ -602,7 +587,7 @@ func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { case api.HostFilterModeAllowed: case api.HostFilterModeBlocked: case api.HostFilterModeAll: - req.FilterMode = api.HostFilterModeAll + req.FilterMode = api.HostFilterModeAllowed case "": default: jc.Error(fmt.Errorf("invalid filter mode: '%v', options are 'allowed', 'blocked' or an empty string for no filter", req.FilterMode), http.StatusBadRequest) From 24470d5d32c1889b7ba7fccda24d574b31537a3c Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Aug 2024 11:41:28 +0200 Subject: [PATCH 11/98] bus: deprecate /hosts --- api/host.go | 13 ------------- bus/client/hosts.go | 8 -------- bus/routes.go | 6 +++--- internal/test/e2e/blocklist_test.go | 6 +++--- internal/test/e2e/cluster_test.go | 4 ++-- internal/test/e2e/pruning_test.go | 4 ++-- 6 files changed, 10 insertions(+), 31 deletions(-) diff --git a/api/host.go b/api/host.go index d932229d6..36dd0862a 100644 --- a/api/host.go +++ b/api/host.go @@ -104,10 +104,6 @@ type ( // Option types. type ( - GetHostsOptions struct { - Offset int - Limit int - } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 Limit int @@ -125,15 +121,6 @@ type ( } ) -func (opts GetHostsOptions) Apply(values url.Values) { - if opts.Offset != 0 { - values.Set("offset", fmt.Sprint(opts.Offset)) - } - if opts.Limit != 0 { - values.Set("limit", fmt.Sprint(opts.Limit)) - } -} - func (opts HostsForScanningOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 709cb899c..391a9977e 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -28,14 +28,6 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err return } -// Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []api.Host, err error) { - values := url.Values{} - opts.Apply(values) - err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) - return -} - // HostsForScanning returns 'limit' host addresses at given 'offset' which // haven't been scanned after lastScan. func (c *Client) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) (hosts []api.HostAddress, err error) { diff --git a/bus/routes.go b/bus/routes.go index 5df645a44..f1424d15f 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -587,8 +587,8 @@ func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { case api.HostFilterModeAllowed: case api.HostFilterModeBlocked: case api.HostFilterModeAll: - req.FilterMode = api.HostFilterModeAllowed case "": + req.FilterMode = api.HostFilterModeAllowed default: jc.Error(fmt.Errorf("invalid filter mode: '%v', options are 'allowed', 'blocked' or an empty string for no filter", req.FilterMode), http.StatusBadRequest) return @@ -599,8 +599,8 @@ func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) return } - if req.Limit < 0 { - jc.Error(errors.New("limit must be non-negative"), http.StatusBadRequest) + if req.Limit < 0 && req.Limit != -1 { + jc.Error(errors.New("limit must be non-negative or equal to -1 to indicate no limit"), http.StatusBadRequest) return } else if req.Limit == 0 { req.Limit = -1 diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 94659b277..39d2540a3 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -121,7 +121,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := b.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -146,7 +146,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -156,7 +156,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 255999911..f048666fb 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -290,7 +290,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) @@ -744,7 +744,7 @@ func TestUploadDownloadBasic(t *testing.T) { // check that stored data on hosts was updated tt.Retry(100, 100*time.Millisecond, func() error { - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) for _, host := range hosts { if host.StoredData != rhpv2.SectorSize { diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 8492bf9f1..81db425a4 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -59,7 +59,7 @@ func TestHostPruning(t *testing.T) { tt.OKAll(a.Trigger(true)) // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err := b.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -71,7 +71,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err = b.SearchHosts(context.Background(), api.SearchHostOptions{}) tt.OK(err) if len(hostss) != 0 { a.Trigger(true) // trigger autopilot From e2fec6b0f2a5ea372a1a7f8bf25dc292e15f3ca5 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Aug 2024 13:43:04 +0200 Subject: [PATCH 12/98] bus: update error msg --- bus/routes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bus/routes.go b/bus/routes.go index f1424d15f..9733a0eb5 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -590,7 +590,7 @@ func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { case "": req.FilterMode = api.HostFilterModeAllowed default: - jc.Error(fmt.Errorf("invalid filter mode: '%v', options are 'allowed', 'blocked' or an empty string for no filter", req.FilterMode), http.StatusBadRequest) + jc.Error(fmt.Errorf("invalid filter mode: '%v', options are 'allowed', 'blocked' or an empty string for 'allowed' filter", req.FilterMode), http.StatusBadRequest) return } From d20ea8cecc9932a9c1ea0772dfeb921b74002501 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 28 Aug 2024 11:51:36 +0200 Subject: [PATCH 13/98] bus: update settings --- api/events.go | 24 +-- api/setting.go | 9 + autopilot/autopilot.go | 1 - bus/bus.go | 72 ++++++- bus/client/settings.go | 59 +++--- bus/routes.go | 329 +++++++++++++++--------------- cmd/renterd/node.go | 2 +- internal/test/e2e/cluster.go | 12 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/events_test.go | 10 +- internal/test/e2e/gouging_test.go | 4 +- internal/test/e2e/s3_test.go | 2 +- internal/worker/cache.go | 9 - internal/worker/cache_test.go | 1 - stores/settingsdb.go | 26 --- stores/settingsdb_test.go | 39 +--- stores/sql/database.go | 6 - stores/sql/main.go | 24 --- stores/sql/mysql/main.go | 8 - stores/sql/sqlite/main.go | 8 - worker/s3/s3.go | 1 - 21 files changed, 295 insertions(+), 353 deletions(-) diff --git a/api/events.go b/api/events.go index e9600e53b..38d490506 100644 --- a/api/events.go +++ b/api/events.go @@ -19,7 +19,6 @@ const ( EventAdd = "add" EventUpdate = "update" - EventDelete = "delete" EventArchive = "archive" EventRenew = "renew" ) @@ -68,11 +67,6 @@ type ( Update interface{} `json:"update"` Timestamp time.Time `json:"timestamp"` } - - EventSettingDelete struct { - Key string `json:"key"` - Timestamp time.Time `json:"timestamp"` - } ) var ( @@ -138,15 +132,6 @@ var ( URL: url, } } - - WebhookSettingDelete = func(url string, headers map[string]string) webhooks.Webhook { - return webhooks.Webhook{ - Event: EventDelete, - Headers: headers, - Module: ModuleSetting, - URL: url, - } - } ) func ParseEventWebhook(event webhooks.Event) (interface{}, error) { @@ -201,19 +186,12 @@ func ParseEventWebhook(event webhooks.Event) (interface{}, error) { return e, nil } case ModuleSetting: - switch event.Event { - case EventUpdate: + if event.Event == EventUpdate { var e EventSettingUpdate if err := json.Unmarshal(bytes, &e); err != nil { return nil, err } return e, nil - case EventDelete: - var e EventSettingDelete - if err := json.Unmarshal(bytes, &e); err != nil { - return nil, err - } - return e, nil } } return nil, fmt.Errorf("%w: module %s event %s", ErrUnknownEvent, event.Module, event.Event) diff --git a/api/setting.go b/api/setting.go index 5976b00b2..34f5dfd31 100644 --- a/api/setting.go +++ b/api/setting.go @@ -239,6 +239,15 @@ func (gs GougingSettings) Validate() error { return nil } +// Validate returns an error if the upload packing settings are not considered +// valid. +func (up UploadPackingSettings) Validate() error { + if up.Enabled && up.SlabBufferMaxSizeSoft <= 0 { + return errors.New("SlabBufferMaxSizeSoft must be greater than zero when upload packing is enabled") + } + return nil +} + // Redundancy returns the effective storage redundancy of the // RedundancySettings. func (rs RedundancySettings) Redundancy() float64 { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 9ea235a11..fb554fb2c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -73,7 +73,6 @@ type Bus interface { SlabsForMigration(ctx context.Context, healthCutoff float64, set string, limit int) ([]api.UnhealthySlab, error) // settings - UpdateSetting(ctx context.Context, key string, value interface{}) error GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) diff --git a/bus/bus.go b/bus/bus.go index c5ae1113e..f5117825d 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -294,9 +294,7 @@ type ( // A SettingStore stores settings. SettingStore interface { - DeleteSetting(ctx context.Context, key string) error Setting(ctx context.Context, key string) (string, error) - Settings(ctx context.Context) ([]string, error) UpdateSetting(ctx context.Context, key, value string) error } @@ -485,10 +483,16 @@ func (b *Bus) Handler() http.Handler { "DELETE /sectors/:hk/:root": b.sectorsHostRootHandlerDELETE, - "GET /settings": b.settingsHandlerGET, - "GET /setting/:key": b.settingKeyHandlerGET, - "PUT /setting/:key": b.settingKeyHandlerPUT, - "DELETE /setting/:key": b.settingKeyHandlerDELETE, + "GET /settings/gouging": b.settingsGougingHandlerGET, + "PUT /settings/gouging": b.settingsGougingHandlerPUT, + "GET /settings/pinned": b.settingsPinnedHandlerGET, + "PUT /settings/pinned": b.settingsPinnedHandlerPUT, + "GET /settings/redundancy": b.settingsRedundancyHandlerGET, + "PUT /settings/redundancy": b.settingsRedundancyHandlerPUT, + "GET /settings/s3authentication": b.settingsS3AuthenticationHandlerGET, + "PUT /settings/s3authentication": b.settingsS3AuthenticationHandlerPUT, + "GET /settings/uploadpacking": b.settingsUploadPackingHandlerGET, + "PUT /settings/uploadpacking": b.settingsUploadPackingHandlerPUT, "POST /slabs/migration": b.slabsMigrationHandlerPOST, "GET /slabs/partial/:key": b.slabsPartialHandlerGET, @@ -738,3 +742,59 @@ func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { } return pk } + +func (b *Bus) fetchSetting(ctx context.Context, key string, value interface{}) error { + // testnets have different redundancy settings + defaultRedundancySettings := api.DefaultRedundancySettings + if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { + defaultRedundancySettings = api.DefaultRedundancySettingsTestnet + } + + defaults := map[string]interface{}{ + api.SettingGouging: api.DefaultGougingSettings, + api.SettingPricePinning: api.DefaultPricePinSettings, + api.SettingRedundancy: defaultRedundancySettings, + api.SettingUploadPacking: api.DefaultUploadPackingSettings, + } + + setting, err := b.ss.Setting(ctx, key) + if errors.Is(err, api.ErrSettingNotFound) { + val, ok := defaults[key] + if !ok { + return fmt.Errorf("%w: unknown setting '%s'", api.ErrSettingNotFound, key) + } + + bytes, _ := json.Marshal(val) + if err := b.ss.UpdateSetting(ctx, key, string(bytes)); err != nil { + b.logger.Warn(fmt.Sprintf("failed to update default setting '%s': %v", key, err)) + } + return json.Unmarshal(bytes, &val) + } else if err != nil { + return err + } + + return json.Unmarshal([]byte(setting), &value) +} + +func (b *Bus) updateSetting(ctx context.Context, key string, value string, updatePinMgr bool) error { + err := b.ss.UpdateSetting(ctx, key, value) + if err != nil { + return err + } + + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: key, + Update: value, + Timestamp: time.Now().UTC(), + }, + }) + + if updatePinMgr { + b.pinMgr.TriggerUpdate() + } + + return nil +} diff --git a/bus/client/settings.go b/bus/client/settings.go index 22714cf8b..e813a7417 100644 --- a/bus/client/settings.go +++ b/bus/client/settings.go @@ -2,65 +2,72 @@ package client import ( "context" - "fmt" "go.sia.tech/renterd/api" ) // ContractSetSettings returns the contract set settings. -func (c *Client) ContractSetSettings(ctx context.Context) (gs api.ContractSetSetting, err error) { - err = c.Setting(ctx, api.SettingContractSet, &gs) +func (c *Client) ContractSetSettings(ctx context.Context) (css api.ContractSetSetting, err error) { + err = c.c.WithContext(ctx).GET("/setting/contractset", &css) return } -// DeleteSetting will delete the setting with given key. -func (c *Client) DeleteSetting(ctx context.Context, key string) error { - return c.c.WithContext(ctx).DELETE(fmt.Sprintf("/setting/%s", key)) +// UpdateContractSetSetting updates the given setting. +func (c *Client) UpdateContractSetSetting(ctx context.Context, css api.ContractSetSetting) error { + return c.c.WithContext(ctx).PUT("/setting/contractset", css) } // GougingSettings returns the gouging settings. func (c *Client) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { - err = c.Setting(ctx, api.SettingGouging, &gs) + err = c.c.WithContext(ctx).GET("/setting/gouging", &gs) return } +// UpdateGougingSettings updates the given setting. +func (c *Client) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + return c.c.WithContext(ctx).PUT("/setting/gouging", gs) +} + // PricePinningSettings returns the contract set settings. func (c *Client) PricePinningSettings(ctx context.Context) (pps api.PricePinSettings, err error) { - err = c.Setting(ctx, api.SettingPricePinning, &pps) + err = c.c.WithContext(ctx).GET("/setting/pinned", &pps) return } -// RedundancySettings returns the redundancy settings. -func (c *Client) RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) { - err = c.Setting(ctx, api.SettingRedundancy, &rs) - return +// UpdatePinnedSettings updates the given setting. +func (c *Client) UpdatePinnedSettings(ctx context.Context, pps api.PricePinSettings) error { + return c.c.WithContext(ctx).PUT("/setting/pinned", pps) } -// S3AuthenticationSettings returns the S3 authentication settings. -func (c *Client) S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) { - err = c.Setting(ctx, api.SettingS3Authentication, &as) +// RedundancySettings returns the redundancy settings. +func (c *Client) RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) { + err = c.c.WithContext(ctx).GET("/setting/redundancy", &rs) return } -// Setting returns the value for the setting with given key. -func (c *Client) Setting(ctx context.Context, key string, value interface{}) (err error) { - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/setting/%s", key), &value) - return +// UpdateRedundancySettings updates the given setting. +func (c *Client) UpdateRedundancySettings(ctx context.Context, rs api.RedundancySettings) error { + return c.c.WithContext(ctx).PUT("/setting/redundancy", rs) } -// Settings returns the keys of all settings. -func (c *Client) Settings(ctx context.Context) (settings []string, err error) { - err = c.c.WithContext(ctx).GET("/settings", &settings) +// S3AuthenticationSettings returns the S3 authentication settings. +func (c *Client) S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) { + err = c.c.WithContext(ctx).GET("/setting/s3authentication", &as) return } -// UpdateSetting will update the given setting under the given key. -func (c *Client) UpdateSetting(ctx context.Context, key string, value interface{}) error { - return c.c.WithContext(ctx).PUT(fmt.Sprintf("/setting/%s", key), value) +// UpdateS3AuthenticationSettings updates the given setting. +func (c *Client) UpdateS3AuthenticationSettings(ctx context.Context, as api.S3AuthenticationSettings) error { + return c.c.WithContext(ctx).PUT("/setting/s3authentication", as) } // UploadPackingSettings returns the upload packing settings. func (c *Client) UploadPackingSettings(ctx context.Context) (ups api.UploadPackingSettings, err error) { - err = c.Setting(ctx, api.SettingUploadPacking, &ups) + err = c.c.WithContext(ctx).GET("/setting/uploadpacking", &ups) return } + +// UpdateUploadPackingSettings updates the given setting. +func (c *Client) UpdateUploadPackingSettings(ctx context.Context, ups api.UploadPackingSettings) error { + return c.c.WithContext(ctx).PUT("/setting/uploadpacking", ups) +} diff --git a/bus/routes.go b/bus/routes.go index f020c5944..76c7df4ec 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -33,15 +33,6 @@ import ( "go.uber.org/zap" ) -func (b *Bus) fetchSetting(ctx context.Context, key string, value interface{}) error { - if val, err := b.ss.Setting(ctx, key); err != nil { - return fmt.Errorf("could not get contract set settings: %w", err) - } else if err := json.Unmarshal([]byte(val), &value); err != nil { - b.logger.Panicf("failed to unmarshal %v settings '%s': %v", key, val, err) - } - return nil -} - func (b *Bus) consensusAcceptBlock(jc jape.Context) { var block types.Block if jc.Decode(&block) != nil { @@ -1261,6 +1252,176 @@ func (b *Bus) packedSlabsHandlerDonePOST(jc jape.Context) { jc.Check("failed to mark packed slab(s) as uploaded", b.ms.MarkPackedSlabsUploaded(jc.Request.Context(), psrp.Slabs)) } +func (b *Bus) settingsGougingHandlerGET(jc jape.Context) { + var gs api.GougingSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingGouging, &gs); errors.Is(err, api.ErrSettingNotFound) { + jc.Error(err, http.StatusNotFound) + } else if jc.Check("failed to get gouging settings", err) == nil { + jc.Encode(gs) + } +} + +func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { + var gs api.GougingSettings + if jc.Decode(&gs) != nil { + return + } else if err := gs.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update gouging settings, error: %v", err), http.StatusBadRequest) + return + } + + // marshal the setting + data, err := json.Marshal(gs) + if err != nil { + jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) + return + } + + // update the setting + if jc.Check("could not update gouging settings", b.updateSetting(jc.Request.Context(), api.SettingGouging, string(data), true)) != nil { + return + } +} + +func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { + var pps api.PricePinSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingPricePinning, &pps); errors.Is(err, api.ErrSettingNotFound) { + jc.Error(err, http.StatusNotFound) + } else if jc.Check("failed to get price pinning settings", err) == nil { + // populate the Autopilots map with the current autopilots + aps, err := b.as.Autopilots(jc.Request.Context()) + if jc.Check("failed to fetch autopilots", err) != nil { + return + } + for _, ap := range aps { + if _, exists := pps.Autopilots[ap.ID]; !exists { + pps.Autopilots[ap.ID] = api.AutopilotPins{} + } + } + jc.Encode(pps) + } +} + +func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { + var pps api.PricePinSettings + if jc.Decode(&pps) != nil { + return + } else if err := pps.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update price pinning settings, error: %v", err), http.StatusBadRequest) + return + } else if pps.Enabled { + if _, err := ibus.NewForexClient(pps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), pps.Currency); err != nil { + jc.Error(fmt.Errorf("couldn't update price pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) + return + } + } + + // marshal the setting + data, err := json.Marshal(pps) + if err != nil { + jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) + return + } + + // update the setting + if jc.Check("could not update price pinning settings", b.updateSetting(jc.Request.Context(), api.SettingPricePinning, string(data), true)) != nil { + return + } +} + +func (b *Bus) settingsRedundancyHandlerGET(jc jape.Context) { + var rs api.RedundancySettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingRedundancy, &rs); errors.Is(err, api.ErrSettingNotFound) { + jc.Error(err, http.StatusNotFound) + } else if jc.Check("failed to get redundancy settings", err) == nil { + jc.Encode(rs) + } +} + +func (b *Bus) settingsRedundancyHandlerPUT(jc jape.Context) { + var rs api.RedundancySettings + if jc.Decode(&rs) != nil { + return + } else if err := rs.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update redundancy settings, error: %v", err), http.StatusBadRequest) + return + } + + // marshal the setting + data, err := json.Marshal(rs) + if err != nil { + jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) + return + } + + // update the setting + if jc.Check("could not update redundancy settings", b.updateSetting(jc.Request.Context(), api.SettingRedundancy, string(data), false)) != nil { + return + } +} + +func (b *Bus) settingsS3AuthenticationHandlerGET(jc jape.Context) { + var s3as api.S3AuthenticationSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingS3Authentication, &s3as); errors.Is(err, api.ErrSettingNotFound) { + jc.Error(err, http.StatusNotFound) + } else if jc.Check("failed to get s3 authentication settings", err) == nil { + jc.Encode(s3as) + } +} + +func (b *Bus) settingsS3AuthenticationHandlerPUT(jc jape.Context) { + var s3as api.S3AuthenticationSettings + if jc.Decode(&s3as) != nil { + return + } else if err := s3as.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update s3 authentication settings, error: %v", err), http.StatusBadRequest) + return + } + + // marshal the setting + data, err := json.Marshal(s3as) + if err != nil { + jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) + return + } + + // update the setting + if jc.Check("could not update s3 authentication settings", b.updateSetting(jc.Request.Context(), api.SettingS3Authentication, string(data), false)) != nil { + return + } +} + +func (b *Bus) settingsUploadPackingHandlerGET(jc jape.Context) { + var ups api.UploadPackingSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingUploadPacking, &ups); errors.Is(err, api.ErrSettingNotFound) { + jc.Error(err, http.StatusNotFound) + } else if jc.Check("failed to get upload packing settings", err) == nil { + jc.Encode(ups) + } +} + +func (b *Bus) settingsUploadPackingHandlerPUT(jc jape.Context) { + var ups api.UploadPackingSettings + if jc.Decode(&ups) != nil { + return + } else if err := ups.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update upload packing settings, error: %v", err), http.StatusBadRequest) + return + } + + // marshal the setting + data, err := json.Marshal(ups) + if err != nil { + jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) + return + } + + // update the setting + if jc.Check("could not update upload packing settings", b.updateSetting(jc.Request.Context(), api.SettingUploadPacking, string(data), false)) != nil { + return + } +} + func (b *Bus) sectorsHostRootHandlerDELETE(jc jape.Context) { var hk types.PublicKey var root types.Hash256 @@ -1405,156 +1566,6 @@ func (b *Bus) slabsPartialHandlerPOST(jc jape.Context) { }) } -func (b *Bus) settingsHandlerGET(jc jape.Context) { - if settings, err := b.ss.Settings(jc.Request.Context()); jc.Check("couldn't load settings", err) == nil { - jc.Encode(settings) - } -} - -func (b *Bus) settingKeyHandlerGET(jc jape.Context) { - jc.Custom(nil, (any)(nil)) - - key := jc.PathParam("key") - if key == "" { - jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) - return - } - - setting, err := b.ss.Setting(jc.Request.Context(), jc.PathParam("key")) - if errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if err != nil { - jc.Error(err, http.StatusInternalServerError) - return - } - resp := []byte(setting) - - // populate autopilots of price pinning settings with defaults for better DX - if key == api.SettingPricePinning { - var pps api.PricePinSettings - err = json.Unmarshal([]byte(setting), &pps) - if jc.Check("failed to unmarshal price pinning settings", err) != nil { - return - } else if pps.Autopilots == nil { - pps.Autopilots = make(map[string]api.AutopilotPins) - } - // populate the Autopilots map with the current autopilots - aps, err := b.as.Autopilots(jc.Request.Context()) - if jc.Check("failed to fetch autopilots", err) != nil { - return - } - for _, ap := range aps { - if _, exists := pps.Autopilots[ap.ID]; !exists { - pps.Autopilots[ap.ID] = api.AutopilotPins{} - } - } - // encode the settings back - resp, err = json.Marshal(pps) - if jc.Check("failed to marshal price pinning settings", err) != nil { - return - } - } - jc.ResponseWriter.Header().Set("Content-Type", "application/json") - jc.ResponseWriter.Write(resp) -} - -func (b *Bus) settingKeyHandlerPUT(jc jape.Context) { - key := jc.PathParam("key") - if key == "" { - jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) - return - } - - var value interface{} - if jc.Decode(&value) != nil { - return - } - - data, err := json.Marshal(value) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) - return - } - - switch key { - case api.SettingGouging: - var gs api.GougingSettings - if err := json.Unmarshal(data, &gs); err != nil { - jc.Error(fmt.Errorf("couldn't update gouging settings, invalid request body, %t", value), http.StatusBadRequest) - return - } else if err := gs.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update gouging settings, error: %v", err), http.StatusBadRequest) - return - } - b.pinMgr.TriggerUpdate() - case api.SettingRedundancy: - var rs api.RedundancySettings - if err := json.Unmarshal(data, &rs); err != nil { - jc.Error(fmt.Errorf("couldn't update redundancy settings, invalid request body"), http.StatusBadRequest) - return - } else if err := rs.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update redundancy settings, error: %v", err), http.StatusBadRequest) - return - } - case api.SettingS3Authentication: - var s3as api.S3AuthenticationSettings - if err := json.Unmarshal(data, &s3as); err != nil { - jc.Error(fmt.Errorf("couldn't update s3 authentication settings, invalid request body"), http.StatusBadRequest) - return - } else if err := s3as.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update s3 authentication settings, error: %v", err), http.StatusBadRequest) - return - } - case api.SettingPricePinning: - var pps api.PricePinSettings - if err := json.Unmarshal(data, &pps); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid request body"), http.StatusBadRequest) - return - } else if err := pps.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid settings, error: %v", err), http.StatusBadRequest) - return - } else if pps.Enabled { - if _, err := ibus.NewForexClient(pps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), pps.Currency); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) - return - } - } - b.pinMgr.TriggerUpdate() - } - - if jc.Check("could not update setting", b.ss.UpdateSetting(jc.Request.Context(), key, string(data))) == nil { - b.broadcastAction(webhooks.Event{ - Module: api.ModuleSetting, - Event: api.EventUpdate, - Payload: api.EventSettingUpdate{ - Key: key, - Update: value, - Timestamp: time.Now().UTC(), - }, - }) - } -} - -func (b *Bus) settingKeyHandlerDELETE(jc jape.Context) { - key := jc.PathParam("key") - if key == "" { - jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) - return - } - - if jc.Check("could not delete setting", b.ss.DeleteSetting(jc.Request.Context(), key)) == nil { - b.broadcastAction(webhooks.Event{ - Module: api.ModuleSetting, - Event: api.EventDelete, - Payload: api.EventSettingDelete{ - Key: key, - Timestamp: time.Now().UTC(), - }, - }) - } -} - func (b *Bus) contractIDAncestorsHandler(jc jape.Context) { var fcid types.FileContractID if jc.DecodeParam("id", &fcid) != nil { diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 9defbc127..5ab3bf13d 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -435,7 +435,7 @@ func (n *node) Run() error { as.V4Keypairs[k] = v } // update settings - if err := n.bus.UpdateSetting(context.Background(), api.SettingS3Authentication, as); err != nil { + if err := n.bus.UpdateS3AuthenticationSettings(context.Background(), as); err != nil { return fmt.Errorf("failed to update S3 authentication settings: %w", err) } } diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index b500643d3..189a92e43 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -436,14 +436,14 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { } // Update the bus settings. - tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, test.GougingSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, test.ContractSetSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingPricePinning, test.PricePinSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, test.RedundancySettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingS3Authentication, api.S3AuthenticationSettings{ + tt.OK(busClient.UpdateGougingSettings(ctx, test.GougingSettings)) + tt.OK(busClient.UpdateContractSetSetting(ctx, test.ContractSetSettings)) + tt.OK(busClient.UpdatePinnedSettings(ctx, test.PricePinSettings)) + tt.OK(busClient.UpdateRedundancySettings(ctx, test.RedundancySettings)) + tt.OK(busClient.UpdateS3AuthenticationSettings(ctx, api.S3AuthenticationSettings{ V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, })) - tt.OK(busClient.UpdateSetting(ctx, api.SettingUploadPacking, api.UploadPackingSettings{ + tt.OK(busClient.UpdateUploadPackingSettings(ctx, api.UploadPackingSettings{ Enabled: enableUploadPacking, SlabBufferMaxSizeSoft: api.DefaultUploadPackingSettings.SlabBufferMaxSizeSoft, })) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 255999911..2c1c1e16e 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1142,7 +1142,7 @@ func TestEphemeralAccounts(t *testing.T) { w := cluster.Worker tt := cluster.tt - tt.OK(b.UpdateSetting(context.Background(), api.SettingRedundancy, api.RedundancySettings{ + tt.OK(b.UpdateRedundancySettings(context.Background(), api.RedundancySettings{ MinShards: 1, TotalShards: 1, })) diff --git a/internal/test/e2e/events_test.go b/internal/test/e2e/events_test.go index 4972adf1b..e1bc29df7 100644 --- a/internal/test/e2e/events_test.go +++ b/internal/test/e2e/events_test.go @@ -26,7 +26,6 @@ func TestEvents(t *testing.T) { api.WebhookContractRenew, api.WebhookContractSetUpdate, api.WebhookHostUpdate, - api.WebhookSettingDelete, api.WebhookSettingUpdate, } @@ -125,10 +124,7 @@ func TestEvents(t *testing.T) { // update settings gs := gp.GougingSettings gs.HostBlockHeightLeeway = 100 - tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, gs)) - - // delete setting - tt.OK(b.DeleteSetting(context.Background(), api.SettingRedundancy)) + tt.OK(b.UpdateGougingSettings(context.Background(), gs)) // update host setting h := cluster.hosts[0] @@ -182,10 +178,6 @@ func TestEvents(t *testing.T) { if update.HostBlockHeightLeeway != 100 { t.Fatalf("unexpected update %+v", update) } - case api.EventSettingDelete: - if e.Key != api.SettingRedundancy || e.Timestamp.IsZero() { - t.Fatalf("unexpected event %+v", e) - } } } } diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 5be1784cb..6126cd36e 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -70,7 +70,7 @@ func TestGouging(t *testing.T) { // update the gouging settings to limit the max storage price to 100H gs := test.GougingSettings gs.MaxStoragePrice = types.NewCurrency64(100) - if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + if err := b.UpdateGougingSettings(context.Background(), gs); err != nil { t.Fatal(err) } @@ -118,7 +118,7 @@ func TestGouging(t *testing.T) { } // set optimised settings - tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) + tt.OK(b.UpdateGougingSettings(context.Background(), resp.Recommendation.GougingSettings)) // evaluate optimised settings resp, err = cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, resp.Recommendation.GougingSettings, test.RedundancySettings) diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 3f20e22ad..5775d2292 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -825,7 +825,7 @@ func TestS3SettingsValidate(t *testing.T) { }, } for i, test := range tests { - err := cluster.Bus.UpdateSetting(context.Background(), api.SettingS3Authentication, api.S3AuthenticationSettings{ + err := cluster.Bus.UpdateS3AuthenticationSettings(context.Background(), api.S3AuthenticationSettings{ V4Keypairs: map[string]string{ test.id: test.key, }, diff --git a/internal/worker/cache.go b/internal/worker/cache.go index dfc749d2a..2ec207dc9 100644 --- a/internal/worker/cache.go +++ b/internal/worker/cache.go @@ -183,9 +183,6 @@ func (c *cache) HandleEvent(event webhooks.Event) (err error) { case api.EventSettingUpdate: log = log.With("key", e.Key, "ts", e.Timestamp) err = c.handleSettingUpdate(e) - case api.EventSettingDelete: - log = log.With("key", e.Key, "ts", e.Timestamp) - c.handleSettingDelete(e) default: log.Info("unhandled event", e) return @@ -310,12 +307,6 @@ func (c *cache) handleHostUpdate(e api.EventHostUpdate) { c.cache.Set(cacheKeyDownloadContracts, contracts) } -func (c *cache) handleSettingDelete(e api.EventSettingDelete) { - if e.Key == api.SettingGouging || e.Key == api.SettingRedundancy { - c.cache.Invalidate(cacheKeyGougingParams) - } -} - func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) (err error) { // return early if the cache doesn't have gouging params to update value, found, _ := c.cache.Get(cacheKeyGougingParams) diff --git a/internal/worker/cache_test.go b/internal/worker/cache_test.go index 9bc8d682d..0fa3c10d8 100644 --- a/internal/worker/cache_test.go +++ b/internal/worker/cache_test.go @@ -170,7 +170,6 @@ func TestWorkerCache(t *testing.T) { {Module: api.ModuleContract, Event: api.EventRenew, Payload: nil}, {Module: api.ModuleHost, Event: api.EventUpdate, Payload: nil}, {Module: api.ModuleSetting, Event: api.EventUpdate, Payload: nil}, - {Module: api.ModuleSetting, Event: api.EventDelete, Payload: nil}, } { if err := c.HandleEvent(event); err != nil { t.Fatal(err) diff --git a/stores/settingsdb.go b/stores/settingsdb.go index 7a895108c..ea31b25bd 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -7,23 +7,6 @@ import ( sql "go.sia.tech/renterd/stores/sql" ) -// DeleteSetting implements the bus.SettingStore interface. -func (s *SQLStore) DeleteSetting(ctx context.Context, key string) error { - s.settingsMu.Lock() - defer s.settingsMu.Unlock() - - // delete from database first - if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.DeleteSettings(ctx, key) - }); err != nil { - return err - } - - // delete from cache - delete(s.settings, key) - return nil -} - // Setting implements the bus.SettingStore interface. func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { // Check cache first. @@ -47,15 +30,6 @@ func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { return value, nil } -// Settings implements the bus.SettingStore interface. -func (s *SQLStore) Settings(ctx context.Context) (settings []string, err error) { - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - settings, err = tx.Settings(ctx) - return err - }) - return -} - // UpdateSetting implements the bus.SettingStore interface. func (s *SQLStore) UpdateSetting(ctx context.Context, key, value string) error { // update db first diff --git a/stores/settingsdb_test.go b/stores/settingsdb_test.go index cf2582579..9eda8f546 100644 --- a/stores/settingsdb_test.go +++ b/stores/settingsdb_test.go @@ -2,10 +2,7 @@ package stores import ( "context" - "errors" "testing" - - "go.sia.tech/renterd/api" ) // TestSQLSettingStore tests the bus.SettingStore methods on the SQLSettingStore. @@ -13,52 +10,24 @@ func TestSQLSettingStore(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() - // assert there are no settings - ctx := context.Background() - if keys, err := ss.Settings(ctx); err != nil { - t.Fatal(err) - } else if len(keys) != 0 { - t.Fatalf("unexpected number of settings, %v != 0", len(keys)) - } - // add a setting - if err := ss.UpdateSetting(ctx, "foo", "bar"); err != nil { + if err := ss.UpdateSetting(context.Background(), "foo", "bar"); err != nil { t.Fatal(err) } - // assert it's returned - if keys, err := ss.Settings(ctx); err != nil { - t.Fatal(err) - } else if len(keys) != 1 { - t.Fatalf("unexpected number of settings, %v != 1", len(keys)) - } else if keys[0] != "foo" { - t.Fatalf("unexpected key, %s != 'foo'", keys[0]) - } - // assert we can query the setting by key - if value, err := ss.Setting(ctx, "foo"); err != nil { + if value, err := ss.Setting(context.Background(), "foo"); err != nil { t.Fatal(err) } else if value != "bar" { t.Fatalf("unexpected value, %s != 'bar'", value) } // assert we can update the setting - if err := ss.UpdateSetting(ctx, "foo", "barbaz"); err != nil { + if err := ss.UpdateSetting(context.Background(), "foo", "barbaz"); err != nil { t.Fatal(err) - } else if value, err := ss.Setting(ctx, "foo"); err != nil { + } else if value, err := ss.Setting(context.Background(), "foo"); err != nil { t.Fatal(err) } else if value != "barbaz" { t.Fatalf("unexpected value, %s != 'barbaz'", value) } - - // delete the setting - if err := ss.DeleteSetting(ctx, "foo"); err != nil { - t.Fatal(err) - } else if _, err := ss.Setting(ctx, "foo"); !errors.Is(err, api.ErrSettingNotFound) { - t.Fatal("should fail with api.ErrSettingNotFound", err) - } else if keys, err := ss.Settings(ctx); err != nil { - t.Fatal(err) - } else if len(keys) != 0 { - t.Fatalf("unexpected number of settings, %v != 0", len(keys)) - } } diff --git a/stores/sql/database.go b/stores/sql/database.go index cc2aab0df..de0a45987 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -150,9 +150,6 @@ type ( // prefix and returns 'true' if any object was deleted. DeleteObjects(ctx context.Context, bucket, prefix string, limit int64) (bool, error) - // DeleteSettings deletes the settings with the given key. - DeleteSettings(ctx context.Context, key string) error - // DeleteWebhook deletes the webhook with the matching module, event and // URL of the provided webhook. If the webhook doesn't exist, // webhooks.ErrWebhookNotFound is returned. @@ -328,9 +325,6 @@ type ( // Setting returns the setting with the given key from the database. Setting(ctx context.Context, key string) (string, error) - // Settings returns all available settings from the database. - Settings(ctx context.Context) ([]string, error) - // Slab returns the slab with the given ID or api.ErrSlabNotFound. Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) diff --git a/stores/sql/main.go b/stores/sql/main.go index bb03bd86d..4dd8ee95c 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -541,13 +541,6 @@ func DeleteMetadata(ctx context.Context, tx sql.Tx, objID int64) error { return err } -func DeleteSettings(ctx context.Context, tx sql.Tx, key string) error { - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", key); err != nil { - return fmt.Errorf("failed to delete setting '%s': %w", key, err) - } - return nil -} - func DeleteWebhook(ctx context.Context, tx sql.Tx, wh webhooks.Webhook) error { res, err := tx.Exec(ctx, "DELETE FROM webhooks WHERE module = ? AND event = ? AND url = ?", wh.Module, wh.Event, wh.URL) if err != nil { @@ -2212,23 +2205,6 @@ func Setting(ctx context.Context, tx sql.Tx, key string) (string, error) { return value, nil } -func Settings(ctx context.Context, tx sql.Tx) ([]string, error) { - rows, err := tx.Query(ctx, "SELECT `key` FROM settings") - if err != nil { - return nil, fmt.Errorf("failed to query settings: %w", err) - } - defer rows.Close() - var settings []string - for rows.Next() { - var setting string - if err := rows.Scan(&setting); err != nil { - return nil, fmt.Errorf("failed to scan setting key") - } - settings = append(settings, setting) - } - return settings, nil -} - func SetUncleanShutdown(ctx context.Context, tx sql.Tx) error { _, err := tx.Exec(ctx, "UPDATE ephemeral_accounts SET clean_shutdown = 0, requires_sync = 1") if err != nil { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 08ff0010e..b45a24a0f 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -340,10 +340,6 @@ func (tx *MainDatabaseTx) InsertMultipartUpload(ctx context.Context, bucket, key return ssql.InsertMultipartUpload(ctx, tx, bucket, key, ec, mimeType, metadata) } -func (tx *MainDatabaseTx) DeleteSettings(ctx context.Context, key string) error { - return ssql.DeleteSettings(ctx, tx, key) -} - func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { return ssql.DeleteWebhook(ctx, tx, wh) } @@ -823,10 +819,6 @@ func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, erro return ssql.Setting(ctx, tx, key) } -func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { - return ssql.Settings(ctx, tx) -} - func (tx *MainDatabaseTx) SetUncleanShutdown(ctx context.Context) error { return ssql.SetUncleanShutdown(ctx, tx) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index b72ec5e8c..4a5e9481a 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -332,10 +332,6 @@ func (tx *MainDatabaseTx) DeleteHostSector(ctx context.Context, hk types.PublicK return ssql.DeleteHostSector(ctx, tx, hk, root) } -func (tx *MainDatabaseTx) DeleteSettings(ctx context.Context, key string) error { - return ssql.DeleteSettings(ctx, tx, key) -} - func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { return ssql.DeleteWebhook(ctx, tx, wh) } @@ -822,10 +818,6 @@ func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, erro return ssql.Setting(ctx, tx, key) } -func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { - return ssql.Settings(ctx, tx) -} - func (tx *MainDatabaseTx) SetUncleanShutdown(ctx context.Context) error { return ssql.SetUncleanShutdown(ctx, tx) } diff --git a/worker/s3/s3.go b/worker/s3/s3.go index d5cbb71a3..efa921030 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -43,7 +43,6 @@ type Bus interface { MultipartUploadParts(ctx context.Context, bucket, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) - UpdateSetting(ctx context.Context, key string, value interface{}) error UploadParams(ctx context.Context) (api.UploadParams, error) } From 7542b550a17607d657a0518cd2ec50dd941c4928 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 28 Aug 2024 13:49:51 +0200 Subject: [PATCH 14/98] bus: combine settings --- api/setting.go | 69 +++++++------ autopilot/autopilot.go | 14 +-- bus/bus.go | 154 +++--------------------------- bus/client/settings.go | 50 +++------- bus/routes.go | 82 +++++----------- cmd/renterd/node.go | 19 ++-- internal/bus/pinmanager.go | 8 +- internal/bus/pinmanager_test.go | 6 +- internal/test/config.go | 10 +- internal/test/e2e/cluster.go | 25 +++-- internal/test/e2e/cluster_test.go | 13 ++- internal/test/e2e/s3_test.go | 8 +- internal/worker/cache.go | 2 +- worker/mocks_test.go | 4 +- worker/s3/authentication.go | 4 +- worker/s3/s3.go | 2 +- 16 files changed, 156 insertions(+), 314 deletions(-) diff --git a/api/setting.go b/api/setting.go index 34f5dfd31..3b81eb87e 100644 --- a/api/setting.go +++ b/api/setting.go @@ -10,12 +10,10 @@ import ( ) const ( - SettingContractSet = "contractset" - SettingGouging = "gouging" - SettingPricePinning = "pricepinning" - SettingRedundancy = "redundancy" - SettingS3Authentication = "s3authentication" - SettingUploadPacking = "uploadpacking" + SettingGouging = "gouging" + SettingPinned = "pinned" + SettingS3 = "s3" + SettingUploads = "uploads" ) const ( @@ -53,13 +51,18 @@ var ( // DefaultPricePinSettings define the default price pin settings the bus is // configured with on startup. These values can be adjusted using the // settings API. - DefaultPricePinSettings = PricePinSettings{ + DefaultPricePinSettings = PinnedSettings{ Enabled: false, Currency: "usd", ForexEndpointURL: "https://api.siascan.com/exchange-rate/siacoin", Threshold: 0.05, } + DefaultUploadSettings = UploadSettings{ + Packing: DefaultUploadPackingSettings, + Redundancy: DefaultRedundancySettings, + } + // DefaultUploadPackingSettings define the default upload packing settings // the bus is configured with on startup. DefaultUploadPackingSettings = UploadPackingSettings{ @@ -86,12 +89,6 @@ var ( ) type ( - // ContractSetSetting contains the default contract set used by the worker for - // uploads and migrations. - ContractSetSetting struct { - Default string `json:"default"` - } - // GougingSettings contain some price settings used in price gouging. GougingSettings struct { // MaxRPCPrice is the maximum allowed base price for RPCs @@ -132,10 +129,10 @@ type ( MigrationSurchargeMultiplier uint64 `json:"migrationSurchargeMultiplier"` } - // PricePinSettings holds the configuration for pinning certain settings to - // a specific currency (e.g., USD). It uses a Forex API to fetch the current + // PinnedSettings holds the configuration for pinning certain settings to a + // specific currency (e.g., USD). It uses a Forex API to fetch the current // exchange rate, allowing users to set prices in USD instead of SC. - PricePinSettings struct { + PinnedSettings struct { // Enabled can be used to either enable or temporarily disable price // pinning. If enabled, both the currency and the Forex endpoint URL // must be valid. @@ -160,6 +157,23 @@ type ( GougingSettingsPins GougingSettingsPins `json:"gougingSettingsPins"` } + // UploadSettings contains various settings related to uploads. + UploadSettings struct { + DefaultContractSet string `json:"defaultContractSet"` + Packing UploadPackingSettings `json:"packing"` + Redundancy RedundancySettings `json:"redundancy"` + } + + UploadPackingSettings struct { + Enabled bool `json:"enabled"` + SlabBufferMaxSizeSoft int64 `json:"slabBufferMaxSizeSoft"` + } + + RedundancySettings struct { + MinShards int `json:"minShards"` + TotalShards int `json:"totalShards"` + } + // AutopilotPins contains the available autopilot settings that can be // pinned. AutopilotPins struct { @@ -180,22 +194,15 @@ type ( Value float64 `json:"value"` } - // RedundancySettings contain settings that dictate an object's redundancy. - RedundancySettings struct { - MinShards int `json:"minShards"` - TotalShards int `json:"totalShards"` + // S3Settings contains various settings related to the S3 API. + S3Settings struct { + Authentication S3AuthenticationSettings `json:"authentication"` } // S3AuthenticationSettings contains S3 auth settings. S3AuthenticationSettings struct { V4Keypairs map[string]string `json:"v4Keypairs"` } - - // UploadPackingSettings contains upload packing settings. - UploadPackingSettings struct { - Enabled bool `json:"enabled"` - SlabBufferMaxSizeSoft int64 `json:"slabBufferMaxSizeSoft"` - } ) // IsPinned returns true if the pin is enabled and the value is greater than 0. @@ -204,7 +211,7 @@ func (p Pin) IsPinned() bool { } // Validate returns an error if the price pin settings are not considered valid. -func (pps PricePinSettings) Validate() error { +func (pps PinnedSettings) Validate() error { if pps.ForexEndpointURL == "" { return fmt.Errorf("price pin settings must have a forex endpoint URL") } @@ -281,8 +288,12 @@ func (rs RedundancySettings) Validate() error { // Validate returns an error if the authentication settings are not considered // valid. -func (s3as S3AuthenticationSettings) Validate() error { - for accessKeyID, secretAccessKey := range s3as.V4Keypairs { +func (s3s S3Settings) Validate() error { + return s3s.Authentication.Validate() +} + +func (s3a S3AuthenticationSettings) Validate() error { + for accessKeyID, secretAccessKey := range s3a.V4Keypairs { if accessKeyID == "" { return fmt.Errorf("AccessKeyID cannot be empty") } else if len(accessKeyID) < S3MinAccessKeyLen || len(accessKeyID) > S3MaxAccessKeyLen { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index fb554fb2c..00728d6ba 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -74,7 +74,7 @@ type Bus interface { // settings GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) - RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) + UploadSettings(ctx context.Context) (us api.UploadSettings, err error) // syncer SyncerPeers(ctx context.Context) (resp []string, err error) @@ -847,10 +847,10 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta return nil, fmt.Errorf("could not fetch consensus state, err: %v", err) } - // fetch redundancy settings - rs, err := ap.bus.RedundancySettings(ctx) + // fetch upload settings + us, err := ap.bus.UploadSettings(ctx) if err != nil { - return nil, fmt.Errorf("could not fetch redundancy settings, err: %v", err) + return nil, fmt.Errorf("could not fetch upload settings, err: %v", err) } // fetch gouging settings @@ -900,7 +900,7 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta return &contractor.MaintenanceState{ GS: gs, - RS: rs, + RS: us.Redundancy, AP: autopilot, Address: address, @@ -935,9 +935,9 @@ func compatV105Host(ctx context.Context, cfg api.ContractsConfig, b Bus, hk type if err != nil { return fmt.Errorf("failed to fetch gouging settings from bus: %w", err) } - _, err = b.RedundancySettings(ctx) + _, err = b.UploadSettings(ctx) if err != nil { - return fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) + return fmt.Errorf("failed to fetch upload settings from bus: %w", err) } _, err = b.ConsensusState(ctx) if err != nil { diff --git a/bus/bus.go b/bus/bus.go index f5117825d..af64411fb 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -11,7 +11,6 @@ import ( "math/big" "net" "net/http" - "strings" "time" "go.sia.tech/core/consensus" @@ -357,11 +356,6 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa rhp2: rhp2.New(rhp.NewFallbackDialer(store, net.Dialer{}, l), l), } - // init settings - if err := b.initSettings(ctx); err != nil { - return nil, err - } - // create account manager b.accountsMgr, err = ibus.NewAccountManager(ctx, store, l) if err != nil { @@ -483,16 +477,14 @@ func (b *Bus) Handler() http.Handler { "DELETE /sectors/:hk/:root": b.sectorsHostRootHandlerDELETE, - "GET /settings/gouging": b.settingsGougingHandlerGET, - "PUT /settings/gouging": b.settingsGougingHandlerPUT, - "GET /settings/pinned": b.settingsPinnedHandlerGET, - "PUT /settings/pinned": b.settingsPinnedHandlerPUT, - "GET /settings/redundancy": b.settingsRedundancyHandlerGET, - "PUT /settings/redundancy": b.settingsRedundancyHandlerPUT, - "GET /settings/s3authentication": b.settingsS3AuthenticationHandlerGET, - "PUT /settings/s3authentication": b.settingsS3AuthenticationHandlerPUT, - "GET /settings/uploadpacking": b.settingsUploadPackingHandlerGET, - "PUT /settings/uploadpacking": b.settingsUploadPackingHandlerPUT, + "GET /settings/gouging": b.settingsGougingHandlerGET, + "PUT /settings/gouging": b.settingsGougingHandlerPUT, + "GET /settings/pinned": b.settingsPinnedHandlerGET, + "PUT /settings/pinned": b.settingsPinnedHandlerPUT, + "GET /settings/s3": b.settingsS3HandlerGET, + "PUT /settings/s3": b.settingsS3HandlerPUT, + "GET /settings/uploads": b.settingsRedundancyHandlerGET, + "PUT /settings/uploads": b.settingsRedundancyHandlerPUT, "POST /slabs/migration": b.slabsMigrationHandlerPOST, "GET /slabs/partial/:key": b.slabsPartialHandlerGET, @@ -611,120 +603,6 @@ func (b *Bus) formContract(ctx context.Context, hostSettings rhpv2.HostSettings, return contract, nil } -// initSettings loads the default settings if the setting is not already set and -// ensures the settings are valid -func (b *Bus) initSettings(ctx context.Context) error { - // testnets have different redundancy settings - defaultRedundancySettings := api.DefaultRedundancySettings - if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { - defaultRedundancySettings = api.DefaultRedundancySettingsTestnet - } - - // load default settings if the setting is not already set - for key, value := range map[string]interface{}{ - api.SettingGouging: api.DefaultGougingSettings, - api.SettingPricePinning: api.DefaultPricePinSettings, - api.SettingRedundancy: defaultRedundancySettings, - api.SettingUploadPacking: api.DefaultUploadPackingSettings, - } { - if _, err := b.ss.Setting(ctx, key); errors.Is(err, api.ErrSettingNotFound) { - if bytes, err := json.Marshal(value); err != nil { - panic("failed to marshal default settings") // should never happen - } else if err := b.ss.UpdateSetting(ctx, key, string(bytes)); err != nil { - return err - } - } - } - - // check redundancy settings for validity - var rs api.RedundancySettings - if rss, err := b.ss.Setting(ctx, api.SettingRedundancy); err != nil { - return err - } else if err := json.Unmarshal([]byte(rss), &rs); err != nil { - return err - } else if err := rs.Validate(); err != nil { - b.logger.Warn(fmt.Sprintf("invalid redundancy setting found '%v', overwriting the redundancy settings with the default settings", rss)) - bytes, _ := json.Marshal(defaultRedundancySettings) - if err := b.ss.UpdateSetting(ctx, api.SettingRedundancy, string(bytes)); err != nil { - return err - } - } - - // check gouging settings for validity - var gs api.GougingSettings - if gss, err := b.ss.Setting(ctx, api.SettingGouging); err != nil { - return err - } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { - return err - } else if err := gs.Validate(); err != nil { - // compat: apply default EA gouging settings - gs.MinMaxEphemeralAccountBalance = api.DefaultGougingSettings.MinMaxEphemeralAccountBalance - gs.MinPriceTableValidity = api.DefaultGougingSettings.MinPriceTableValidity - gs.MinAccountExpiry = api.DefaultGougingSettings.MinAccountExpiry - if err := gs.Validate(); err == nil { - b.logger.Info(fmt.Sprintf("updating gouging settings with default EA settings: %+v", gs)) - bytes, _ := json.Marshal(gs) - if err := b.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)); err != nil { - return err - } - } else { - // compat: apply default host block leeway settings - gs.HostBlockHeightLeeway = api.DefaultGougingSettings.HostBlockHeightLeeway - if err := gs.Validate(); err == nil { - b.logger.Info(fmt.Sprintf("updating gouging settings with default HostBlockHeightLeeway settings: %v", gs)) - bytes, _ := json.Marshal(gs) - if err := b.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)); err != nil { - return err - } - } else { - b.logger.Warn(fmt.Sprintf("invalid gouging setting found '%v', overwriting the gouging settings with the default settings", gss)) - bytes, _ := json.Marshal(api.DefaultGougingSettings) - if err := b.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)); err != nil { - return err - } - } - } - } - - // compat: default price pin settings - var pps api.PricePinSettings - if pss, err := b.ss.Setting(ctx, api.SettingPricePinning); err != nil { - return err - } else if err := json.Unmarshal([]byte(pss), &pps); err != nil { - return err - } else if err := pps.Validate(); err != nil { - // overwrite values with defaults - var updates []string - if pps.ForexEndpointURL == "" { - pps.ForexEndpointURL = api.DefaultPricePinSettings.ForexEndpointURL - updates = append(updates, fmt.Sprintf("set PricePinSettings.ForexEndpointURL to %v", pps.ForexEndpointURL)) - } - if pps.Currency == "" { - pps.Currency = api.DefaultPricePinSettings.Currency - updates = append(updates, fmt.Sprintf("set PricePinSettings.Currency to %v", pps.Currency)) - } - if pps.Threshold == 0 { - pps.Threshold = api.DefaultPricePinSettings.Threshold - updates = append(updates, fmt.Sprintf("set PricePinSettings.Threshold to %v", pps.Threshold)) - } - - var updated []byte - if err := pps.Validate(); err == nil { - b.logger.Info(fmt.Sprintf("updating price pinning settings with default values: %v", strings.Join(updates, ", "))) - updated, _ = json.Marshal(pps) - } else { - b.logger.Warn(fmt.Sprintf("updated price pinning settings are invalid (%v), they have been overwritten with the default settings", err)) - updated, _ = json.Marshal(api.DefaultPricePinSettings) - } - - if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { - return err - } - } - - return nil -} - func (b *Bus) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { seed := blake2b.Sum256(append(b.deriveSubKey("renterkey"), hostKey[:]...)) pk := types.NewPrivateKeyFromSeed(seed[:]) @@ -744,17 +622,15 @@ func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { } func (b *Bus) fetchSetting(ctx context.Context, key string, value interface{}) error { - // testnets have different redundancy settings - defaultRedundancySettings := api.DefaultRedundancySettings - if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { - defaultRedundancySettings = api.DefaultRedundancySettingsTestnet + defaults := map[string]interface{}{ + api.SettingGouging: api.DefaultGougingSettings, + api.SettingPinned: api.DefaultPricePinSettings, + api.SettingUploads: api.DefaultUploadSettings, } - defaults := map[string]interface{}{ - api.SettingGouging: api.DefaultGougingSettings, - api.SettingPricePinning: api.DefaultPricePinSettings, - api.SettingRedundancy: defaultRedundancySettings, - api.SettingUploadPacking: api.DefaultUploadPackingSettings, + // testnets have different redundancy settings + if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { + defaults[api.SettingUploads] = api.DefaultRedundancySettingsTestnet } setting, err := b.ss.Setting(ctx, key) diff --git a/bus/client/settings.go b/bus/client/settings.go index e813a7417..35c48eac2 100644 --- a/bus/client/settings.go +++ b/bus/client/settings.go @@ -6,17 +6,6 @@ import ( "go.sia.tech/renterd/api" ) -// ContractSetSettings returns the contract set settings. -func (c *Client) ContractSetSettings(ctx context.Context) (css api.ContractSetSetting, err error) { - err = c.c.WithContext(ctx).GET("/setting/contractset", &css) - return -} - -// UpdateContractSetSetting updates the given setting. -func (c *Client) UpdateContractSetSetting(ctx context.Context, css api.ContractSetSetting) error { - return c.c.WithContext(ctx).PUT("/setting/contractset", css) -} - // GougingSettings returns the gouging settings. func (c *Client) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { err = c.c.WithContext(ctx).GET("/setting/gouging", &gs) @@ -29,45 +18,34 @@ func (c *Client) UpdateGougingSettings(ctx context.Context, gs api.GougingSettin } // PricePinningSettings returns the contract set settings. -func (c *Client) PricePinningSettings(ctx context.Context) (pps api.PricePinSettings, err error) { +func (c *Client) PricePinningSettings(ctx context.Context) (pps api.PinnedSettings, err error) { err = c.c.WithContext(ctx).GET("/setting/pinned", &pps) return } // UpdatePinnedSettings updates the given setting. -func (c *Client) UpdatePinnedSettings(ctx context.Context, pps api.PricePinSettings) error { +func (c *Client) UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error { return c.c.WithContext(ctx).PUT("/setting/pinned", pps) } -// RedundancySettings returns the redundancy settings. -func (c *Client) RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) { - err = c.c.WithContext(ctx).GET("/setting/redundancy", &rs) - return -} - -// UpdateRedundancySettings updates the given setting. -func (c *Client) UpdateRedundancySettings(ctx context.Context, rs api.RedundancySettings) error { - return c.c.WithContext(ctx).PUT("/setting/redundancy", rs) -} - -// S3AuthenticationSettings returns the S3 authentication settings. -func (c *Client) S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) { - err = c.c.WithContext(ctx).GET("/setting/s3authentication", &as) +// S3Settings returns the S3 settings. +func (c *Client) S3Settings(ctx context.Context) (as api.S3Settings, err error) { + err = c.c.WithContext(ctx).GET("/setting/s3", &as) return } -// UpdateS3AuthenticationSettings updates the given setting. -func (c *Client) UpdateS3AuthenticationSettings(ctx context.Context, as api.S3AuthenticationSettings) error { - return c.c.WithContext(ctx).PUT("/setting/s3authentication", as) +// UpdateS3Settings updates the given setting. +func (c *Client) UpdateS3Settings(ctx context.Context, as api.S3Settings) error { + return c.c.WithContext(ctx).PUT("/setting/s3", as) } -// UploadPackingSettings returns the upload packing settings. -func (c *Client) UploadPackingSettings(ctx context.Context) (ups api.UploadPackingSettings, err error) { - err = c.c.WithContext(ctx).GET("/setting/uploadpacking", &ups) +// UploadSettings returns the upload settings. +func (c *Client) UploadSettings(ctx context.Context) (css api.UploadSettings, err error) { + err = c.c.WithContext(ctx).GET("/setting/upload", &css) return } -// UpdateUploadPackingSettings updates the given setting. -func (c *Client) UpdateUploadPackingSettings(ctx context.Context, ups api.UploadPackingSettings) error { - return c.c.WithContext(ctx).PUT("/setting/uploadpacking", ups) +// UpdateUploadSettings update the given setting. +func (c *Client) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { + return c.c.WithContext(ctx).PUT("/setting/upload", us) } diff --git a/bus/routes.go b/bus/routes.go index 76c7df4ec..a95d62ac3 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1284,8 +1284,8 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { } func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { - var pps api.PricePinSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingPricePinning, &pps); errors.Is(err, api.ErrSettingNotFound) { + var pps api.PinnedSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingPinned, &pps); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) } else if jc.Check("failed to get price pinning settings", err) == nil { // populate the Autopilots map with the current autopilots @@ -1303,7 +1303,7 @@ func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { } func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { - var pps api.PricePinSettings + var pps api.PinnedSettings if jc.Decode(&pps) != nil { return } else if err := pps.Validate(); err != nil { @@ -1324,14 +1324,14 @@ func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { } // update the setting - if jc.Check("could not update price pinning settings", b.updateSetting(jc.Request.Context(), api.SettingPricePinning, string(data), true)) != nil { + if jc.Check("could not update price pinning settings", b.updateSetting(jc.Request.Context(), api.SettingPinned, string(data), true)) != nil { return } } func (b *Bus) settingsRedundancyHandlerGET(jc jape.Context) { var rs api.RedundancySettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingRedundancy, &rs); errors.Is(err, api.ErrSettingNotFound) { + if err := b.fetchSetting(jc.Request.Context(), api.SettingUploads, &rs); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) } else if jc.Check("failed to get redundancy settings", err) == nil { jc.Encode(rs) @@ -1355,22 +1355,22 @@ func (b *Bus) settingsRedundancyHandlerPUT(jc jape.Context) { } // update the setting - if jc.Check("could not update redundancy settings", b.updateSetting(jc.Request.Context(), api.SettingRedundancy, string(data), false)) != nil { + if jc.Check("could not update redundancy settings", b.updateSetting(jc.Request.Context(), api.SettingUploads, string(data), false)) != nil { return } } -func (b *Bus) settingsS3AuthenticationHandlerGET(jc jape.Context) { - var s3as api.S3AuthenticationSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingS3Authentication, &s3as); errors.Is(err, api.ErrSettingNotFound) { +func (b *Bus) settingsS3HandlerGET(jc jape.Context) { + var s3as api.S3Settings + if err := b.fetchSetting(jc.Request.Context(), api.SettingS3, &s3as); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) } else if jc.Check("failed to get s3 authentication settings", err) == nil { jc.Encode(s3as) } } -func (b *Bus) settingsS3AuthenticationHandlerPUT(jc jape.Context) { - var s3as api.S3AuthenticationSettings +func (b *Bus) settingsS3HandlerPUT(jc jape.Context) { + var s3as api.S3Settings if jc.Decode(&s3as) != nil { return } else if err := s3as.Validate(); err != nil { @@ -1386,38 +1386,7 @@ func (b *Bus) settingsS3AuthenticationHandlerPUT(jc jape.Context) { } // update the setting - if jc.Check("could not update s3 authentication settings", b.updateSetting(jc.Request.Context(), api.SettingS3Authentication, string(data), false)) != nil { - return - } -} - -func (b *Bus) settingsUploadPackingHandlerGET(jc jape.Context) { - var ups api.UploadPackingSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploadPacking, &ups); errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get upload packing settings", err) == nil { - jc.Encode(ups) - } -} - -func (b *Bus) settingsUploadPackingHandlerPUT(jc jape.Context) { - var ups api.UploadPackingSettings - if jc.Decode(&ups) != nil { - return - } else if err := ups.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update upload packing settings, error: %v", err), http.StatusBadRequest) - return - } - - // marshal the setting - data, err := json.Marshal(ups) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) - return - } - - // update the setting - if jc.Check("could not update upload packing settings", b.updateSetting(jc.Request.Context(), api.SettingUploadPacking, string(data), false)) != nil { + if jc.Check("could not update s3 authentication settings", b.updateSetting(jc.Request.Context(), api.SettingS3, string(data), false)) != nil { return } } @@ -1555,14 +1524,14 @@ func (b *Bus) slabsPartialHandlerPOST(jc jape.Context) { if jc.Check("failed to add partial slab", err) != nil { return } - var pus api.UploadPackingSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploadPacking, &pus); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + var us api.UploadSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingUploads, &us); err != nil && !errors.Is(err, api.ErrSettingNotFound) { jc.Error(fmt.Errorf("could not get upload packing settings: %w", err), http.StatusInternalServerError) return } jc.Encode(api.AddPartialSlabResponse{ Slabs: slabs, - SlabBufferMaxSizeSoftReached: bufferSize >= pus.SlabBufferMaxSizeSoft, + SlabBufferMaxSizeSoftReached: bufferSize >= us.Packing.SlabBufferMaxSizeSoft, }) } @@ -1588,22 +1557,15 @@ func (b *Bus) paramsHandlerUploadGET(jc jape.Context) { return } - var contractSet string - var css api.ContractSetSetting - if err := b.fetchSetting(jc.Request.Context(), api.SettingContractSet, &css); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - jc.Error(fmt.Errorf("could not get contract set settings: %w", err), http.StatusInternalServerError) - return - } else if err == nil { - contractSet = css.Default - } - var uploadPacking bool - var pus api.UploadPackingSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploadPacking, &pus); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - jc.Error(fmt.Errorf("could not get upload packing settings: %w", err), http.StatusInternalServerError) + var contractSet string + var us api.UploadSettings + if err := b.fetchSetting(jc.Request.Context(), api.SettingUploads, &us); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + jc.Error(fmt.Errorf("could not get upload settings: %w", err), http.StatusInternalServerError) return } else if err == nil { - uploadPacking = pus.Enabled + contractSet = us.DefaultContractSet + uploadPacking = us.Packing.Enabled } jc.Encode(api.UploadParams{ @@ -1650,7 +1612,7 @@ func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { } var rs api.RedundancySettings - if rss, err := b.ss.Setting(ctx, api.SettingRedundancy); err != nil { + if rss, err := b.ss.Setting(ctx, api.SettingUploads); err != nil { return api.GougingParams{}, err } else if err := json.Unmarshal([]byte(rss), &rs); err != nil { b.logger.Panicf("failed to unmarshal redundancy settings '%s': %v", rss, err) diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 5ab3bf13d..28d8e791d 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -413,30 +413,31 @@ func (n *node) Run() error { // set initial S3 keys if n.cfg.S3.Enabled && !n.cfg.S3.DisableAuth { - as, err := n.bus.S3AuthenticationSettings(context.Background()) + s3s, err := n.bus.S3Settings(context.Background()) if err != nil && !strings.Contains(err.Error(), api.ErrSettingNotFound.Error()) { - return fmt.Errorf("failed to fetch S3 authentication settings: %w", err) - } else if as.V4Keypairs == nil { - as.V4Keypairs = make(map[string]string) + return fmt.Errorf("failed to fetch S3 settings: %w", err) + } else if s3s.Authentication.V4Keypairs == nil { + s3s.Authentication.V4Keypairs = make(map[string]string) } // S3 key pair validation was broken at one point, we need to remove the // invalid key pairs here to ensure we don't fail when we update the // setting below. - for k, v := range as.V4Keypairs { + for k, v := range s3s.Authentication.V4Keypairs { if err := (api.S3AuthenticationSettings{V4Keypairs: map[string]string{k: v}}).Validate(); err != nil { n.logger.Infof("removing invalid S3 keypair for AccessKeyID %s, reason: %v", k, err) - delete(as.V4Keypairs, k) + delete(s3s.Authentication.V4Keypairs, k) } } // merge keys for k, v := range n.cfg.S3.KeypairsV4 { - as.V4Keypairs[k] = v + s3s.Authentication.V4Keypairs[k] = v } + // update settings - if err := n.bus.UpdateS3AuthenticationSettings(context.Background(), as); err != nil { - return fmt.Errorf("failed to update S3 authentication settings: %w", err) + if err := n.bus.UpdateS3Settings(context.Background(), s3s); err != nil { + return fmt.Errorf("failed to update S3 settings: %w", err) } } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index c128a8392..9274f0920 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -103,10 +103,10 @@ func (pm *pinManager) averageRate() decimal.Decimal { return decimal.NewFromFloat(median) } -func (pm *pinManager) pinnedSettings(ctx context.Context) (api.PricePinSettings, error) { - var ps api.PricePinSettings - if pss, err := pm.s.Setting(ctx, api.SettingPricePinning); err != nil { - return api.PricePinSettings{}, err +func (pm *pinManager) pinnedSettings(ctx context.Context) (api.PinnedSettings, error) { + var ps api.PinnedSettings + if pss, err := pm.s.Setting(ctx, api.SettingPinned); err != nil { + return api.PinnedSettings{}, err } else if err := json.Unmarshal([]byte(pss), &ps); err != nil { pm.logger.Panicf("failed to unmarshal pinned settings '%s': %v", pss, err) } diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index e5158836d..33e4962eb 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -123,7 +123,7 @@ func newTestStore() *mockPinStore { // add default price pin - and gouging settings b, _ := json.Marshal(api.DefaultPricePinSettings) - s.settings[api.SettingPricePinning] = string(b) + s.settings[api.SettingPinned] = string(b) b, _ = json.Marshal(api.DefaultGougingSettings) s.settings[api.SettingGouging] = string(b) @@ -152,9 +152,9 @@ func (ms *mockPinStore) gougingSettings() api.GougingSettings { return gs } -func (ms *mockPinStore) updatPinnedSettings(pps api.PricePinSettings) { +func (ms *mockPinStore) updatPinnedSettings(pps api.PinnedSettings) { b, _ := json.Marshal(pps) - ms.UpdateSetting(context.Background(), api.SettingPricePinning, string(b)) + ms.UpdateSetting(context.Background(), api.SettingPinned, string(b)) time.Sleep(2 * testUpdateInterval) } diff --git a/internal/test/config.go b/internal/test/config.go index 1b5d926a0..877b6168e 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -33,10 +33,7 @@ var ( }, } - ContractSet = "testset" - ContractSetSettings = api.ContractSetSetting{ - Default: ContractSet, - } + ContractSet = "testset" GougingSettings = api.GougingSettings{ MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC @@ -59,6 +56,11 @@ var ( TotalShards: 3, } + UploadSettings = api.UploadSettings{ + DefaultContractSet: ContractSet, + Redundancy: RedundancySettings, + } + S3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" S3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" S3Credentials = credentials.NewStaticV4(S3AccessKeyID, S3SecretAccessKey, "") diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 189a92e43..d84b87b17 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -435,18 +435,25 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { })) } + // Build upload settings. + us := test.UploadSettings + us.Packing = api.UploadPackingSettings{ + Enabled: enableUploadPacking, + SlabBufferMaxSizeSoft: api.DefaultUploadPackingSettings.SlabBufferMaxSizeSoft, + } + + // Build S3 settings. + s3 := api.S3Settings{ + Authentication: api.S3AuthenticationSettings{ + V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, + }, + } + // Update the bus settings. tt.OK(busClient.UpdateGougingSettings(ctx, test.GougingSettings)) - tt.OK(busClient.UpdateContractSetSetting(ctx, test.ContractSetSettings)) tt.OK(busClient.UpdatePinnedSettings(ctx, test.PricePinSettings)) - tt.OK(busClient.UpdateRedundancySettings(ctx, test.RedundancySettings)) - tt.OK(busClient.UpdateS3AuthenticationSettings(ctx, api.S3AuthenticationSettings{ - V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, - })) - tt.OK(busClient.UpdateUploadPackingSettings(ctx, api.UploadPackingSettings{ - Enabled: enableUploadPacking, - SlabBufferMaxSizeSoft: api.DefaultUploadPackingSettings.SlabBufferMaxSizeSoft, - })) + tt.OK(busClient.UpdateUploadSettings(ctx, us)) + tt.OK(busClient.UpdateS3Settings(ctx, s3)) // Fund the bus. if funding { diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2c1c1e16e..902c922e4 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -178,10 +178,10 @@ func TestNewTestCluster(t *testing.T) { tt := cluster.tt // Upload packing should be disabled by default. - ups, err := b.UploadPackingSettings(context.Background()) + us, err := b.UploadSettings(context.Background()) tt.OK(err) - if ups.Enabled { - t.Fatalf("expected upload packing to be disabled by default, got %v", ups.Enabled) + if us.Packing.Enabled { + t.Fatalf("expected upload packing to be disabled by default, got %v", us.Packing.Enabled) } // PricePinningSettings should have default values @@ -1142,10 +1142,13 @@ func TestEphemeralAccounts(t *testing.T) { w := cluster.Worker tt := cluster.tt - tt.OK(b.UpdateRedundancySettings(context.Background(), api.RedundancySettings{ + us := test.UploadSettings + us.Redundancy = api.RedundancySettings{ MinShards: 1, TotalShards: 1, - })) + } + tt.OK(b.UpdateUploadSettings(context.Background(), us)) + // add a host hosts := cluster.AddHosts(1) h, err := b.Host(context.Background(), hosts[0].PublicKey()) diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 5775d2292..272ead406 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -825,9 +825,11 @@ func TestS3SettingsValidate(t *testing.T) { }, } for i, test := range tests { - err := cluster.Bus.UpdateS3AuthenticationSettings(context.Background(), api.S3AuthenticationSettings{ - V4Keypairs: map[string]string{ - test.id: test.key, + err := cluster.Bus.UpdateS3Settings(context.Background(), api.S3Settings{ + Authentication: api.S3AuthenticationSettings{ + V4Keypairs: map[string]string{ + test.id: test.key, + }, }, }) if err != nil && !test.shouldFail { diff --git a/internal/worker/cache.go b/internal/worker/cache.go index 2ec207dc9..f6c1ea574 100644 --- a/internal/worker/cache.go +++ b/internal/worker/cache.go @@ -333,7 +333,7 @@ func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) (err error) { gp.GougingSettings = gs c.cache.Set(cacheKeyGougingParams, gp) - case api.SettingRedundancy: + case api.SettingUploads: var rs api.RedundancySettings if err := json.Unmarshal(data, &rs); err != nil { return fmt.Errorf("couldn't update redundancy settings, invalid request body, %t", e.Update) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 13e5fd733..203afe7e1 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -678,8 +678,8 @@ func (*s3Mock) MultipartUploadParts(ctx context.Context, bucket, object string, return api.MultipartListPartsResponse{}, nil } -func (*s3Mock) S3AuthenticationSettings(context.Context) (as api.S3AuthenticationSettings, err error) { - return api.S3AuthenticationSettings{}, nil +func (*s3Mock) S3Settings(context.Context) (as api.S3Settings, err error) { + return api.S3Settings{}, nil } func (*s3Mock) UpdateSetting(context.Context, string, interface{}) error { diff --git a/worker/s3/authentication.go b/worker/s3/authentication.go index 58ebad677..066e27e53 100644 --- a/worker/s3/authentication.go +++ b/worker/s3/authentication.go @@ -117,11 +117,11 @@ func (b *authenticatedBackend) permsFromCtx(ctx context.Context, bucket string) } func (b *authenticatedBackend) reloadV4Keys(ctx context.Context) error { - as, err := b.backend.b.S3AuthenticationSettings(ctx) + s3, err := b.backend.b.S3Settings(ctx) if err != nil { return err } - signature.ReloadKeys(as.V4Keypairs) + signature.ReloadKeys(s3.Authentication.V4Keypairs) return nil } diff --git a/worker/s3/s3.go b/worker/s3/s3.go index efa921030..c0e2c054e 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -42,7 +42,7 @@ type Bus interface { MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) MultipartUploadParts(ctx context.Context, bucket, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) - S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) + S3Settings(ctx context.Context) (as api.S3Settings, err error) UploadParams(ctx context.Context) (api.UploadParams, error) } From 2cadb86682f8fd3289aea59e60e86eecf9811e82 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Aug 2024 17:38:49 +0200 Subject: [PATCH 15/98] bus: merge objects listing endpoints and add new endpoint to fetch object --- api/object.go | 58 +++++-------- bus/bus.go | 15 ++-- bus/client/objects.go | 26 +++--- bus/routes.go | 69 ++++----------- internal/test/e2e/cluster_test.go | 132 ++++++++++++----------------- internal/test/e2e/metadata_test.go | 8 +- internal/test/e2e/s3_test.go | 4 +- stores/metadata.go | 21 ++--- stores/metadata_test.go | 92 ++++++++++---------- stores/sql/database.go | 2 +- stores/sql/main.go | 46 +++++----- stores/sql/mysql/main.go | 4 +- stores/sql/sqlite/main.go | 4 +- worker/alerts.go | 4 +- worker/bench_test.go | 4 +- worker/client/client.go | 16 ---- worker/mocks_test.go | 10 +-- worker/s3/backend.go | 73 ++++------------ worker/s3/s3.go | 3 +- worker/upload_test.go | 40 ++++----- worker/worker.go | 65 +++++--------- 21 files changed, 269 insertions(+), 427 deletions(-) diff --git a/api/object.go b/api/object.go index a5cef0422..bb5944c2e 100644 --- a/api/object.go +++ b/api/object.go @@ -47,6 +47,10 @@ var ( // ErrSlabNotFound is returned when a slab can't be retrieved from the // database. ErrSlabNotFound = errors.New("slab not found") + + // ErrUnsupportedDelimiter is returned when an unsupported delimiter is + // provided. + ErrUnsupportedDelimiter = errors.New("unsupported delimiter") ) type ( @@ -99,16 +103,6 @@ type ( Metadata ObjectUserMetadata } - // ObjectsDeleteRequest is the request type for the /bus/objects/list endpoint. - ObjectsListRequest struct { - Bucket string `json:"bucket"` - Limit int `json:"limit"` - SortBy string `json:"sortBy"` - SortDir string `json:"sortDir"` - Prefix string `json:"prefix"` - Marker string `json:"marker"` - } - // ObjectsListResponse is the response type for the /bus/objects/list endpoint. ObjectsListResponse struct { HasMore bool `json:"hasMore"` @@ -207,32 +201,24 @@ type ( } HeadObjectOptions struct { - IgnoreDelim bool - Range *DownloadRange + Range *DownloadRange } DownloadObjectOptions struct { - GetObjectOptions Range *DownloadRange } GetObjectOptions struct { - Prefix string - Offset int - Limit int - IgnoreDelim bool - Marker string OnlyMetadata bool - SortBy string - SortDir string } ListObjectOptions struct { - Prefix string - Marker string - Limit int - SortBy string - SortDir string + Delimiter string + Limit int + Marker string + Prefix string + SortBy string + SortDir string } SearchObjectOptions struct { @@ -297,7 +283,6 @@ func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { } func (opts DownloadObjectOptions) ApplyValues(values url.Values) { - opts.GetObjectOptions.Apply(values) } func (opts DownloadObjectOptions) ApplyHeaders(h http.Header) { @@ -317,9 +302,6 @@ func (opts DeleteObjectOptions) Apply(values url.Values) { } func (opts HeadObjectOptions) Apply(values url.Values) { - if opts.IgnoreDelim { - values.Set("ignoreDelim", "true") - } } func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { @@ -333,23 +315,23 @@ func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { } func (opts GetObjectOptions) Apply(values url.Values) { - if opts.Prefix != "" { - values.Set("prefix", opts.Prefix) + if opts.OnlyMetadata { + values.Set("onlyMetadata", "true") } - if opts.Offset != 0 { - values.Set("offset", fmt.Sprint(opts.Offset)) +} + +func (opts ListObjectOptions) Apply(values url.Values) { + if opts.Delimiter != "" { + values.Set("delimiter", opts.Delimiter) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - if opts.IgnoreDelim { - values.Set("ignoreDelim", "true") - } if opts.Marker != "" { values.Set("marker", opts.Marker) } - if opts.OnlyMetadata { - values.Set("onlymetadata", "true") + if opts.Prefix != "" { + values.Set("prefix", opts.Prefix) } if opts.SortBy != "" { values.Set("sortBy", opts.SortBy) diff --git a/bus/bus.go b/bus/bus.go index 1807575e9..39cd4f409 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -226,10 +226,9 @@ type ( UpdateBucketPolicy(ctx context.Context, bucketName string, policy api.BucketPolicy) error CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) - ListObjects(ctx context.Context, bucketName, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) + ListObjects(ctx context.Context, bucketName, prefix, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) Object(ctx context.Context, bucketName, path string) (api.Object, error) ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) - ObjectEntries(ctx context.Context, bucketName, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) RemoveObject(ctx context.Context, bucketName, path string) error @@ -440,12 +439,12 @@ func (b *Bus) Handler() http.Handler { "POST /multipart/listuploads": b.multipartHandlerListUploadsPOST, "POST /multipart/listparts": b.multipartHandlerListPartsPOST, - "GET /objects/*path": b.objectsHandlerGET, - "PUT /objects/*path": b.objectsHandlerPUT, - "DELETE /objects/*path": b.objectsHandlerDELETE, - "POST /objects/copy": b.objectsCopyHandlerPOST, - "POST /objects/rename": b.objectsRenameHandlerPOST, - "POST /objects/list": b.objectsListHandlerPOST, + "GET /object/*key": b.objectHandlerGET, + "GET /objects/*prefix": b.objectsHandlerPOST, + "PUT /objects/*path": b.objectsHandlerPUT, + "DELETE /objects/*path": b.objectsHandlerDELETE, + "POST /objects/copy": b.objectsCopyHandlerPOST, + "POST /objects/rename": b.objectsRenameHandlerPOST, "GET /params/gouging": b.paramsHandlerGougingGET, "GET /params/upload": b.paramsHandlerUploadGET, diff --git a/bus/client/objects.go b/bus/client/objects.go index fca893a49..0415e2831 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -49,21 +49,8 @@ func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api return } -// ListOBjects lists objects in the given bucket. -func (c *Client) ListObjects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { - err = c.c.WithContext(ctx).POST("/objects/list", api.ObjectsListRequest{ - Bucket: bucket, - Limit: opts.Limit, - Prefix: opts.Prefix, - Marker: opts.Marker, - SortBy: opts.SortBy, - SortDir: opts.SortDir, - }, &resp) - return -} - // Objects returns the object at given path. -func (c *Client) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.ObjectsResponse, err error) { +func (c *Client) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.Object, err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) @@ -71,7 +58,16 @@ func (c *Client) Object(ctx context.Context, bucket, path string, opts api.GetOb path = api.ObjectPathEscape(path) path += "?" + values.Encode() - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", path), &res) + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/object/%s", path), &res) + return +} + +// Objects lists objects in the given bucket. +func (c *Client) Objects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { + values := url.Values{} + values.Set("bucket", bucket) + opts.Apply(values) + err = c.c.WithContext(ctx).GET("/objects?"+values.Encode(), &resp) return } diff --git a/bus/routes.go b/bus/routes.go index 4eee7ce44..799914492 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1035,16 +1035,8 @@ func (b *Bus) searchObjectsHandlerGET(jc jape.Context) { jc.Encode(keys) } -func (b *Bus) objectsHandlerGET(jc jape.Context) { - var ignoreDelim bool - if jc.DecodeForm("ignoreDelim", &ignoreDelim) != nil { - return - } - path := jc.PathParam("path") - if strings.HasSuffix(path, "/") && !ignoreDelim { - b.objectEntriesHandlerGET(jc, path) - return - } +func (b *Bus) objectHandlerGET(jc jape.Context) { + path := jc.PathParam("key") bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return @@ -1056,6 +1048,7 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { var o api.Object var err error + if onlymetadata { o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, path) } else { @@ -1070,48 +1063,40 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { jc.Encode(api.ObjectsResponse{Object: &o}) } -func (b *Bus) objectEntriesHandlerGET(jc jape.Context, path string) { +func (b *Bus) objectsHandlerPOST(jc jape.Context) { + var limit int + var marker, delim, prefix, sortBy, sortDir string bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return } - - var prefix string - if jc.DecodeForm("prefix", &prefix) != nil { + if jc.DecodeForm("delimiter", &limit) != nil { return } - - var sortBy string - if jc.DecodeForm("sortBy", &sortBy) != nil { + if jc.DecodeForm("limit", &limit) != nil { return } - - var sortDir string - if jc.DecodeForm("sortDir", &sortDir) != nil { + if jc.DecodeForm("marker", &marker) != nil { return } - - var marker string - if jc.DecodeForm("marker", &marker) != nil { + if jc.DecodeForm("prefix", &prefix) != nil { return } - - var offset int - if jc.DecodeForm("offset", &offset) != nil { + if jc.DecodeForm("sortBy", &sortBy) != nil { return } - limit := -1 - if jc.DecodeForm("limit", &limit) != nil { + if jc.DecodeForm("sortDir", &sortDir) != nil { return } - // look for object entries - entries, hasMore, err := b.ms.ObjectEntries(jc.Request.Context(), bucket, path, prefix, sortBy, sortDir, marker, offset, limit) - if jc.Check("couldn't list object entries", err) != nil { + resp, err := b.ms.ListObjects(jc.Request.Context(), bucket, prefix, delim, sortBy, sortDir, marker, limit) + if errors.Is(err, api.ErrUnsupportedDelimiter) { + jc.Error(err, http.StatusBadRequest) + return + } else if jc.Check("failed to query objects", err) != nil { return } - - jc.Encode(api.ObjectsResponse{Entries: entries, HasMore: hasMore}) + jc.Encode(resp) } func (b *Bus) objectsHandlerPUT(jc jape.Context) { @@ -1139,24 +1124,6 @@ func (b *Bus) objectsCopyHandlerPOST(jc jape.Context) { jc.Encode(om) } -func (b *Bus) objectsListHandlerPOST(jc jape.Context) { - var req api.ObjectsListRequest - if jc.Decode(&req) != nil { - return - } - if req.Bucket == "" { - req.Bucket = api.DefaultBucketName - } - resp, err := b.ms.ListObjects(jc.Request.Context(), req.Bucket, req.Prefix, req.SortBy, req.SortDir, req.Marker, req.Limit) - if errors.Is(err, api.ErrMarkerNotFound) { - jc.Error(err, http.StatusBadRequest) - return - } else if jc.Check("couldn't list objects", err) != nil { - return - } - jc.Encode(resp) -} - func (b *Bus) objectsRenameHandlerPOST(jc jape.Context) { var orr api.ObjectsRenameRequest if jc.Decode(&orr) != nil { diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 6d47d0590..ef527c975 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -117,7 +117,7 @@ func TestListObjects(t *testing.T) { } for _, test := range tests { // use the bus client - res, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ Prefix: test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, @@ -136,7 +136,7 @@ func TestListObjects(t *testing.T) { if len(res.Objects) > 0 { marker := "" for offset := 0; offset < len(test.want); offset++ { - res, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ Prefix: test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, @@ -162,7 +162,7 @@ func TestListObjects(t *testing.T) { } // list invalid marker - _, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + _, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ Marker: "invalid", SortBy: api.ObjectSortByHealth, }) @@ -481,34 +481,37 @@ func TestObjectEntries(t *testing.T) { } for _, test := range tests { // use the bus client - res, err := b.Object(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, + res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Delimiter: "/", + Prefix: test.path + test.prefix, + SortBy: test.sortBy, + SortDir: test.sortDir, }) if err != nil { t.Fatal(err, test.path) } - assertMetadata(res.Entries) + assertMetadata(res.Objects) - if !(len(res.Entries) == 0 && len(test.want) == 0) && !reflect.DeepEqual(res.Entries, test.want) { - t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Entries, test.want) + if !(len(res.Objects) == 0 && len(test.want) == 0) && !reflect.DeepEqual(res.Objects, test.want) { + t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Objects, test.want) } + var marker string for offset := 0; offset < len(test.want); offset++ { - res, err := b.Object(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, + res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Prefix: test.path + test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, - Offset: offset, + Marker: marker, Limit: 1, }) + marker = res.NextMarker if err != nil { t.Fatal(err) } - assertMetadata(res.Entries) + assertMetadata(res.Objects) - if len(res.Entries) != 1 || res.Entries[0] != test.want[offset] { - t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Entries, test.want[offset]) + if len(res.Objects) != 1 || res.Objects[0] != test.want[offset] { + t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Objects, test.want[offset]) } moreRemaining := len(test.want)-offset-1 > 0 if res.HasMore != moreRemaining { @@ -520,8 +523,8 @@ func TestObjectEntries(t *testing.T) { continue } - res, err = b.Object(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, + res, err = b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Prefix: test.path + test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, Marker: test.want[offset].Name, @@ -530,10 +533,10 @@ func TestObjectEntries(t *testing.T) { if err != nil { t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %vmarker: %v\n\nerr: %v", test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, err) } - assertMetadata(res.Entries) + assertMetadata(res.Objects) - if len(res.Entries) != 1 || res.Entries[0] != test.want[offset+1] { - t.Errorf("\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.want[offset].Name, res.Entries, test.want[offset+1]) + if len(res.Objects) != 1 || res.Objects[0] != test.want[offset+1] { + t.Errorf("\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.want[offset].Name, res.Objects, test.want[offset+1]) } moreRemaining = len(test.want)-offset-2 > 0 @@ -541,31 +544,6 @@ func TestObjectEntries(t *testing.T) { t.Errorf("invalid value for hasMore (%t) at marker (%s) test (%+v)", res.HasMore, test.want[offset].Name, test) } } - - // use the worker client - got, err := w.ObjectEntries(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, - }) - if err != nil { - t.Fatal(err) - } - assertMetadata(got) - - if !(len(got) == 0 && len(test.want) == 0) && !reflect.DeepEqual(got, test.want) { - t.Errorf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) - } - for _, entry := range got { - if !strings.HasSuffix(entry.Name, "/") { - buf := new(bytes.Buffer) - if err := w.DownloadObject(context.Background(), buf, api.DefaultBucketName, entry.Name, api.DownloadObjectOptions{}); err != nil { - t.Fatal(err) - } else if buf.Len() != int(entry.Size) { - t.Fatal("unexpected", buf.Len(), entry.Size) - } - } - } } // delete all uploads @@ -574,10 +552,12 @@ func TestObjectEntries(t *testing.T) { } // assert root dir is empty - if entries, err := w.ObjectEntries(context.Background(), api.DefaultBucketName, "/", api.GetObjectOptions{}); err != nil { + if resp, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Prefix: "/", + }); err != nil { t.Fatal(err) - } else if len(entries) != 0 { - t.Fatal("there should be no entries left", entries) + } else if len(resp.Objects) != 0 { + t.Fatal("there should be no entries left", resp.Objects) } } @@ -788,37 +768,33 @@ func TestUploadDownloadExtended(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(file2), api.DefaultBucketName, "fileÅ›/file2", api.UploadObjectOptions{})) // fetch all entries from the worker - entries, err := cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{}) + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Delimiter: "/", + Prefix: "fileÅ›/", + }) tt.OK(err) - if len(entries) != 2 { - t.Fatal("expected two entries to be returned", len(entries)) + if len(resp.Objects) != 2 { + t.Fatal("expected two entries to be returned", len(resp.Objects)) } - for _, entry := range entries { + for _, entry := range resp.Objects { if entry.MimeType != "application/octet-stream" { t.Fatal("wrong mime type", entry.MimeType) } } // fetch entries with "file" prefix - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{Prefix: "file"}) + res, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{Prefix: "file"}) tt.OK(err) - if len(res.Entries) != 2 { - t.Fatal("expected two entry to be returned", len(entries)) + if len(res.Objects) != 2 { + t.Fatal("expected two entry to be returned", len(res.Objects)) } // fetch entries with "fileÅ›" prefix - res, err = cluster.Bus.Object(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{Prefix: "foo"}) + res, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{Prefix: "fileÅ›/"}) tt.OK(err) - if len(res.Entries) != 0 { - t.Fatal("expected no entries to be returned", len(entries)) - } - - // fetch entries from the worker for unexisting path - entries, err = cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "bar/", api.GetObjectOptions{}) - tt.OK(err) - if len(entries) != 0 { - t.Fatal("expected no entries to be returned", len(entries)) + if len(res.Objects) != 0 { + t.Fatal("expected no entries to be returned", len(res.Objects)) } // prepare two files, a small one and a large one @@ -971,11 +947,9 @@ func TestUploadDownloadSpending(t *testing.T) { tt.OK(err) var found bool - for _, entry := range res.Entries { - if entry.Name == fmt.Sprintf("/%s", path) { - found = true - break - } + if res.Name == fmt.Sprintf("/%s", path) { + found = true + break } if !found { t.Fatal("uploaded object not found in bus") @@ -1422,7 +1396,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // upload 3 objects so every host has 3 sectors var err error - var res api.ObjectsResponse + var res api.Object shards := make(map[types.PublicKey][]object.Sector) for i := 0; i < 3; i++ { // upload object @@ -1448,7 +1422,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] - tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object.Object, api.AddObjectOptions{})) + tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object, api.AddObjectOptions{})) // assert we can download this object tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, "frankenstein", api.DownloadObjectOptions{})) @@ -1708,15 +1682,17 @@ func TestUploadPacking(t *testing.T) { if err != nil { t.Fatal(err) } - if res.Object.Size != int64(len(data)) { - t.Fatal("unexpected size after upload", res.Object.Size, len(data)) + if res.Size != int64(len(data)) { + t.Fatal("unexpected size after upload", res.Size, len(data)) } - entries, err := w.ObjectEntries(context.Background(), api.DefaultBucketName, "/", api.GetObjectOptions{}) + resp, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Delimiter: "/", + }) if err != nil { t.Fatal(err) } var found bool - for _, entry := range entries { + for _, entry := range resp.Objects { if entry.Name == "/"+name { if entry.Size != int64(len(data)) { t.Fatal("unexpected size after upload", entry.Size, len(data)) @@ -1726,7 +1702,7 @@ func TestUploadPacking(t *testing.T) { } } if !found { - t.Fatal("object not found in list", name, entries) + t.Fatal("object not found in list", name, resp.Objects) } } diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index 4dd6c1229..cbbabf053 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -44,8 +44,8 @@ func TestObjectMetadata(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(or.Object.Metadata, opts.Metadata) { - t.Fatal("metadata mismatch", or.Object.Metadata) + if !reflect.DeepEqual(or.Metadata, opts.Metadata) { + t.Fatal("metadata mismatch", or.Metadata) } // get the object from the worker and assert it has the metadata @@ -62,7 +62,7 @@ func TestObjectMetadata(t *testing.T) { // HeadObject retrieves the modtime from a http header so it's not as // accurate as the modtime from the object GET endpoint which returns it in // the body. - orModtime, err := time.Parse(http.TimeFormat, or.Object.ModTime.Std().Format(http.TimeFormat)) + orModtime, err := time.Parse(http.TimeFormat, or.ModTime.Std().Format(http.TimeFormat)) if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestObjectMetadata(t *testing.T) { if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ - ContentType: or.Object.ContentType(), + ContentType: or.ContentType(), Etag: gor.Etag, LastModified: api.TimeRFC3339(orModtime), Range: &api.ContentRange{Offset: 1, Length: 1, Size: int64(len(data))}, diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 3f20e22ad..1e3ee8499 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -83,8 +83,8 @@ func TestS3Basic(t *testing.T) { tt.OK(err) if busObject.Object == nil { t.Fatal("expected object to exist") - } else if busObject.Object.ETag != uploadInfo.ETag { - t.Fatalf("expected ETag %q, got %q", uploadInfo.ETag, busObject.Object.ETag) + } else if busObject.ETag != uploadInfo.ETag { + t.Fatalf("expected ETag %q, got %q", uploadInfo.ETag, busObject.ETag) } _, err = s3.PutObject(context.Background(), bucket+"nonexistent", objPath, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}) diff --git a/stores/metadata.go b/stores/metadata.go index fe26bc29e..e716e09c7 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -318,14 +318,6 @@ func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, return } -func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) (metadata []api.ObjectMetadata, hasMore bool, err error) { - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - metadata, hasMore, err = tx.ObjectEntries(ctx, bucket, path, prefix, sortBy, sortDir, marker, offset, limit) - return err - }) - return -} - func (s *SQLStore) Object(ctx context.Context, bucket, path string) (obj api.Object, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { obj, err = tx.Object(ctx, bucket, path) @@ -794,12 +786,15 @@ func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []types } } -// TODO: we can use ObjectEntries instead of ListObject if we want to use '/' as -// a delimiter for now (see backend.go) but it would be interesting to have -// arbitrary 'delim' support in ListObjects. -func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { +func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - resp, err = tx.ListObjects(ctx, bucket, prefix, sortBy, sortDir, marker, limit) + if delim == "" { + resp, err = tx.ListObjects(ctx, bucket, prefix, sortBy, sortDir, marker, limit) + } else if delim == "/" { + resp, err = tx.ObjectEntries(ctx, bucket, prefix, sortBy, sortDir, marker, limit) + } else { + return fmt.Errorf("unsupported delimiter: '%s'", delim) + } return err }) return diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 6d0639e5d..35cce7796 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1379,7 +1379,8 @@ func TestObjectHealth(t *testing.T) { } // assert health is returned correctly by ObjectEntries - entries, _, err := ss.ObjectEntries(context.Background(), api.DefaultBucketName, "/", "", "", "", "", 0, -1) + resp, err := ss.ListObjects(context.Background(), api.DefaultBucketName, "/", "", "", "", "", -1) + entries := resp.Objects if err != nil { t.Fatal(err) } else if len(entries) != 1 { @@ -1430,8 +1431,9 @@ func TestObjectHealth(t *testing.T) { } } -// TestObjectEntries is a test for the ObjectEntries method. -func TestObjectEntries(t *testing.T) { +// TestListObjectsWithPrefix is a test for the TestListObjects method +// with '/' as the prefix. +func TestListObjectsWithPrefix(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() @@ -1530,21 +1532,25 @@ func TestObjectEntries(t *testing.T) { {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { - got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } + got := resp.Objects assertMetadata(got) if !(len(got) == 0 && len(test.want) == 0) && !reflect.DeepEqual(got, test.want) { t.Fatalf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) } + var marker string for offset := 0; offset < len(test.want); offset++ { - got, hasMore, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", offset, 1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } + marker = resp.NextMarker + got := resp.Objects assertMetadata(got) if len(got) != 1 || got[0] != test.want[offset] { @@ -1552,8 +1558,8 @@ func TestObjectEntries(t *testing.T) { } moreRemaining := len(test.want)-offset-1 > 0 - if hasMore != moreRemaining { - t.Fatalf("invalid value for hasMore (%t) at offset (%d) test (%+v)", hasMore, offset, test) + if resp.HasMore != moreRemaining { + t.Fatalf("invalid value for hasMore (%t) at offset (%d) test (%+v)", resp.HasMore, offset, test) } // make sure we stay within bounds @@ -1561,7 +1567,7 @@ func TestObjectEntries(t *testing.T) { continue } - got, hasMore, err = ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, 0, 1) + resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, 1) if err != nil { t.Fatal(err) } @@ -1572,14 +1578,14 @@ func TestObjectEntries(t *testing.T) { } moreRemaining = len(test.want)-offset-2 > 0 - if hasMore != moreRemaining { - t.Fatalf("invalid value for hasMore (%t) at marker (%s) test (%+v)", hasMore, test.want[offset].Name, test) + if resp.HasMore != moreRemaining { + t.Fatalf("invalid value for hasMore (%t) at marker (%s) test (%+v)", resp.HasMore, test.want[offset].Name, test) } } } } -func TestObjectEntriesExplicitDir(t *testing.T) { +func TestListObjectsExplicitDir(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() @@ -1627,12 +1633,12 @@ func TestObjectEntriesExplicitDir(t *testing.T) { {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Name: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, } for _, test := range tests { - got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) + got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } - for i := range got { - got[i].ModTime = api.TimeRFC3339{} // ignore time for comparison + for i := range got.Objects { + got.Objects[i].ModTime = api.TimeRFC3339{} // ignore time for comparison } if !reflect.DeepEqual(got, test.want) { t.Fatalf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) @@ -3320,15 +3326,15 @@ func TestBucketObjects(t *testing.T) { } // List the objects in the buckets. - if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } else if entries[0].Size != 1 { t.Fatal("unexpected size", entries[0].Size) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } else if entries[0].Size != 2 { t.Fatal("unexpected size", entries[0].Size) @@ -3352,15 +3358,15 @@ func TestBucketObjects(t *testing.T) { // Rename object foo/bar in bucket 1 to foo/baz but not in bucket 2. if err := ss.RenameObjectBlocking(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/foo/baz" { t.Fatal("unexpected name", entries[0].Name) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/foo/bar" { t.Fatal("unexpected name", entries[0].Name) @@ -3369,15 +3375,15 @@ func TestBucketObjects(t *testing.T) { // Rename foo/bar in bucket 2 using the batch rename. if err := ss.RenameObjectsBlocking(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/foo/baz" { t.Fatal("unexpected name", entries[0].Name) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/foo/bam" { t.Fatal("unexpected name", entries[0].Name) @@ -3388,30 +3394,30 @@ func TestBucketObjects(t *testing.T) { t.Fatal(err) } else if err := ss.RemoveObjectBlocking(context.Background(), b1, "/foo/baz"); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) > 0 { + } else if entries := resp.Objects; len(entries) > 0 { t.Fatal("expected 0 entries", len(entries)) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } // Delete all files in bucket 2. - if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/", "", "", "", "", 0, -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 2 { + } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) } else if err := ss.RemoveObjectsBlocking(context.Background(), b2, "/"); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 0 { + } else if entries := resp.Objects; len(entries) != 0 { t.Fatal("expected 0 entries", len(entries)) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } @@ -3460,9 +3466,9 @@ func TestCopyObject(t *testing.T) { // Copy it within the same bucket. if om, err := ss.CopyObject(ctx, "src", "src", "/foo", "/bar", "", nil); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(ctx, "src", "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(ctx, "src", "/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 2 { + } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/bar" || entries[1].Name != "/foo" { t.Fatal("unexpected names", entries[0].Name, entries[1].Name) @@ -3473,9 +3479,9 @@ func TestCopyObject(t *testing.T) { // Copy it cross buckets. if om, err := ss.CopyObject(ctx, "src", "dst", "/foo", "/bar", "", nil); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(ctx, "dst", "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(ctx, "dst", "/", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } else if entries[0].Name != "/bar" { t.Fatal("unexpected names", entries[0].Name, entries[1].Name) @@ -3559,7 +3565,7 @@ func TestMarkSlabUploadedAfterRenew(t *testing.T) { } } -func TestListObjects(t *testing.T) { +func TestListObjectsNoPrefix(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { @@ -3633,7 +3639,7 @@ func TestListObjects(t *testing.T) { } } for _, test := range tests { - res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, test.sortBy, test.sortDir, "", -1) + res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } @@ -3648,7 +3654,7 @@ func TestListObjects(t *testing.T) { if len(res.Objects) > 0 { marker := "" for offset := 0; offset < len(test.want); offset++ { - res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, test.sortBy, test.sortDir, marker, 1) + res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } diff --git a/stores/sql/database.go b/stores/sql/database.go index 3e1917502..e314d848f 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -218,7 +218,7 @@ type ( Object(ctx context.Context, bucket, key string) (api.Object, error) // ObjectEntries queries the database for objects in a given dir. - ObjectEntries(ctx context.Context, bucket, key, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) + ObjectEntries(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) // ObjectMetadata returns an object's metadata. ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) diff --git a/stores/sql/main.go b/stores/sql/main.go index 655859bfc..d7241d7f4 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -1353,17 +1353,15 @@ func dirID(ctx context.Context, tx sql.Tx, dirPath string) (int64, error) { return id, nil } -func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - // sanity check we are passing a directory - if !strings.HasSuffix(path, "/") { - panic("path must end in /") +func ObjectEntries(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + // split prefix into path and object prefix + path := "/" // root of bucket + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + path = prefix[:idx+1] + prefix = prefix[idx+1:] } - - // sanity check we are passing sane paging parameters - usingMarker := marker != "" - usingOffset := offset > 0 - if usingMarker && usingOffset { - return nil, false, errors.New("fetching entries using a marker and an offset is not supported at the same time") + if !strings.HasSuffix(path, "/") { + panic("path must end with /") } // fetch one more to see if there are more entries @@ -1384,9 +1382,9 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor // fetch directory id dirID, err := dirID(ctx, tx, path) if errors.Is(err, dsql.ErrNoRows) { - return []api.ObjectMetadata{}, false, nil + return api.ObjectsListResponse{}, nil } else if err != nil { - return nil, false, fmt.Errorf("failed to fetch directory id: %w", err) + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch directory id: %w", err) } args := []any{ @@ -1443,7 +1441,7 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor } }) if err != nil { - return nil, false, fmt.Errorf("failed to query marker: %w", err) + return api.ObjectsListResponse{}, fmt.Errorf("failed to query marker: %w", err) } else if len(markerExprs) > 0 { whereExpr = "WHERE " + strings.Join(markerExprs, " AND ") } @@ -1452,11 +1450,11 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor // apply sorting orderByExprs, err := orderByObject(sortBy, sortDir) if err != nil { - return nil, false, fmt.Errorf("failed to apply sorting: %w", err) + return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) } // apply offset and limit - args = append(args, limit, offset) + args = append(args, limit) // objectsQuery consists of 2 parts // 1. fetch all objects in requested directory @@ -1481,7 +1479,7 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor ) AS o %s ORDER BY %s - LIMIT ? OFFSET ? + LIMIT ? `, tx.SelectObjectMetadataExpr(), prefixExpr, @@ -1491,7 +1489,7 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor strings.Join(orderByExprs, ", "), ), args...) if err != nil { - return nil, false, fmt.Errorf("failed to fetch objects: %w", err) + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) } defer rows.Close() @@ -1499,19 +1497,27 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor for rows.Next() { om, err := tx.ScanObjectMetadata(rows) if err != nil { - return nil, false, fmt.Errorf("failed to scan object metadata: %w", err) + return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) } objects = append(objects, om) } // trim last element if we have more var hasMore bool + var nextMarker string if len(objects) == limit { - hasMore = true objects = objects[:len(objects)-1] + if len(objects) > 0 { + hasMore = true + nextMarker = objects[len(objects)-1].Name + } } - return objects, hasMore, nil + return api.ObjectsListResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Objects: objects, + }, nil } func ObjectMetadata(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index de8b97bfa..e8cf69780 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -530,8 +530,8 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - return ssql.ObjectEntries(ctx, tx, bucket, path, prefix, sortBy, sortDir, marker, offset, limit) +func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ObjectEntries(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 739fb47f4..674bf8237 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -527,8 +527,8 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - return ssql.ObjectEntries(ctx, tx, bucket, path, prefix, sortBy, sortDir, marker, offset, limit) +func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ObjectEntries(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { diff --git a/worker/alerts.go b/worker/alerts.go index 5ab5092b7..664698fb6 100644 --- a/worker/alerts.go +++ b/worker/alerts.go @@ -13,7 +13,7 @@ func randomAlertID() types.Hash256 { return frand.Entropy256() } -func newDownloadFailedAlert(bucket, path, prefix, marker string, offset, length, contracts int64, err error) alerts.Alert { +func newDownloadFailedAlert(bucket, path string, offset, length, contracts int64, err error) alerts.Alert { return alerts.Alert{ ID: randomAlertID(), Severity: alerts.SeverityError, @@ -21,8 +21,6 @@ func newDownloadFailedAlert(bucket, path, prefix, marker string, offset, length, Data: map[string]any{ "bucket": bucket, "path": path, - "prefix": prefix, - "marker": marker, "offset": offset, "length": length, "contracts": contracts, diff --git a/worker/bench_test.go b/worker/bench_test.go index cc0034415..60a238dfc 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -42,10 +42,10 @@ func BenchmarkDownloaderSingleObject(b *testing.B) { b.Fatal(err) } - b.SetBytes(o.Object.Size) + b.SetBytes(o.Size) b.ResetTimer() for i := 0; i < b.N; i++ { - err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { b.Fatal(err) } diff --git a/worker/client/client.go b/worker/client/client.go index 7df0a6052..a65e7136e 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -2,7 +2,6 @@ package client import ( "context" - "encoding/json" "errors" "fmt" "io" @@ -178,21 +177,6 @@ func (c *Client) MigrateSlab(ctx context.Context, slab object.Slab, set string) return } -// ObjectEntries returns the entries at the given path, which must end in /. -func (c *Client) ObjectEntries(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (entries []api.ObjectMetadata, err error) { - path = api.ObjectPathEscape(path) - body, _, err := c.object(ctx, bucket, path, api.DownloadObjectOptions{ - GetObjectOptions: opts, - }) - if err != nil { - return nil, err - } - defer io.Copy(io.Discard, body) - defer body.Close() - err = json.NewDecoder(body).Decode(&entries) - return -} - // State returns the current state of the worker. func (c *Client) State() (state api.WorkerStateResponse, err error) { err = c.c.GET("/state", &state) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 0b0d53351..c861d6a51 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -429,18 +429,18 @@ func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minS return []object.SlabSlice{ss}, os.totalSlabBufferSize() > os.slabBufferMaxSizeSoft, nil } -func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { +func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.Object, error) { os.mu.Lock() defer os.mu.Unlock() // check if the bucket exists if _, exists := os.objects[bucket]; !exists { - return api.ObjectsResponse{}, api.ErrBucketNotFound + return api.Object{}, api.ErrBucketNotFound } // check if the object exists if _, exists := os.objects[bucket][path]; !exists { - return api.ObjectsResponse{}, api.ErrObjectNotFound + return api.Object{}, api.ErrObjectNotFound } // clone to ensure the store isn't unwillingly modified @@ -451,10 +451,10 @@ func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts panic(err) } - return api.ObjectsResponse{Object: &api.Object{ + return api.Object{ ObjectMetadata: api.ObjectMetadata{Name: path, Size: o.TotalSize()}, Object: &o, - }}, nil + }, nil } func (os *objectStoreMock) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, error) { diff --git a/worker/s3/backend.go b/worker/s3/backend.go index a8dd1cb22..88a880828 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -82,9 +82,6 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 return nil, gofakes3.ErrorMessage(gofakes3.ErrNotImplemented, "delimiter must be '/' but was "+prefix.Delimiter) } - // Workaround for empty prefix - prefix.HasPrefix = prefix.Prefix != "" - // Adjust MaxKeys if page.MaxKeys == 0 { page.MaxKeys = maxKeysDefault @@ -95,59 +92,23 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 page.Marker = "/" + page.Marker } - var objects []api.ObjectMetadata - var err error - response := gofakes3.NewObjectList() - if prefix.HasDelimiter { - // Handle request with delimiter. - opts := api.GetObjectOptions{} - if page.HasMarker { - opts.Marker = page.Marker - opts.Limit = int(page.MaxKeys) - } - var path string // root of bucket - adjustedPrefix := prefix.Prefix - if idx := strings.LastIndex(adjustedPrefix, prefix.Delimiter); idx != -1 { - path = adjustedPrefix[:idx+1] - adjustedPrefix = adjustedPrefix[idx+1:] - } - if adjustedPrefix != "" { - opts.Prefix = adjustedPrefix - } - var res api.ObjectsResponse - res, err = s.b.Object(ctx, bucketName, path, opts) - if utils.IsErr(err, api.ErrBucketNotFound) { - return nil, gofakes3.BucketNotFound(bucketName) - } else if err != nil { - return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) - } - objects = res.Entries - response.IsTruncated = res.HasMore - if response.IsTruncated { - response.NextMarker = objects[len(objects)-1].Name - } - } else { - // Handle request without delimiter. - opts := api.ListObjectOptions{ - Limit: int(page.MaxKeys), - Marker: page.Marker, - Prefix: "/" + prefix.Prefix, - } - - var res api.ObjectsListResponse - res, err = s.b.ListObjects(ctx, bucketName, opts) - if utils.IsErr(err, api.ErrBucketNotFound) { - return nil, gofakes3.BucketNotFound(bucketName) - } else if err != nil { - return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) - } - objects = res.Objects - response.IsTruncated = res.HasMore - response.NextMarker = res.NextMarker - } - if err != nil { + resp, err := s.b.Objects(ctx, bucketName, api.ListObjectOptions{ + Delimiter: prefix.Delimiter, + Limit: int(page.MaxKeys), + Marker: page.Marker, + Prefix: prefix.Prefix, + }) + if utils.IsErr(err, api.ErrBucketNotFound) { + return nil, gofakes3.BucketNotFound(bucketName) + } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } + objects := resp.Objects + + // prepare response + response := gofakes3.NewObjectList() + response.IsTruncated = resp.HasMore + response.NextMarker = resp.NextMarker // Remove the leading slash from the marker since we also do that for the // name of each object @@ -306,9 +267,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // HeadObject should return a NotFound() error if the object does not // exist. func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.w.HeadObject(ctx, bucketName, objectName, api.HeadObjectOptions{ - IgnoreDelim: true, - }) + res, err := s.w.HeadObject(ctx, bucketName, objectName, api.HeadObjectOptions{}) if utils.IsErr(err, api.ErrObjectNotFound) { return nil, gofakes3.KeyNotFound(objectName) } else if err != nil { diff --git a/worker/s3/s3.go b/worker/s3/s3.go index d5cbb71a3..77986b54f 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -33,8 +33,7 @@ type Bus interface { AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) - ListObjects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.ObjectsResponse, err error) + Objects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) diff --git a/worker/upload_test.go b/worker/upload_test.go index 5bad0941a..c67044101 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -54,13 +54,13 @@ func TestUpload(t *testing.T) { // build used hosts used := make(map[types.PublicKey]struct{}) - for _, shard := range o.Object.Object.Slabs[0].Shards { + for _, shard := range o.Object.Slabs[0].Shards { used[shard.LatestHost] = struct{}{} } // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -86,7 +86,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), filtered) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -103,7 +103,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it fails buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), filtered) if !errors.Is(err, errDownloadNotEnoughHosts) { t.Fatal("expected not enough hosts error", err) } @@ -165,7 +165,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -201,7 +201,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -294,10 +294,10 @@ func TestMigrateLostSector(t *testing.T) { o, err := os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab := o.Object.Object.Slabs[0] + slab := o.Object.Slabs[0] // build usedHosts hosts usedHosts := make(map[types.PublicKey]struct{}) @@ -320,7 +320,7 @@ func TestMigrateLostSector(t *testing.T) { } // encrypt the shards - o.Object.Object.Slabs[0].Slab.Encrypt(shards) + o.Object.Slabs[0].Slab.Encrypt(shards) // filter it down to the shards we need to migrate shards = shards[:1] @@ -336,7 +336,7 @@ func TestMigrateLostSector(t *testing.T) { // migrate the shard away from the bad host mem := mm.AcquireMemory(context.Background(), rhpv2.SectorSize) - err = ul.UploadShards(context.Background(), o.Object.Object.Slabs[0].Slab, []int{0}, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) + err = ul.UploadShards(context.Background(), o.Object.Slabs[0].Slab, []int{0}, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) if err != nil { t.Fatal(err) } @@ -345,10 +345,10 @@ func TestMigrateLostSector(t *testing.T) { o, err = os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab = o.Object.Object.Slabs[0] + slab = o.Object.Slabs[0] // assert the bad shard is on a good host now shard := slab.Shards[0] @@ -395,10 +395,10 @@ func TestUploadShards(t *testing.T) { o, err := os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab := o.Object.Object.Slabs[0] + slab := o.Object.Slabs[0] // build usedHosts hosts usedHosts := make(map[types.PublicKey]struct{}) @@ -423,7 +423,7 @@ func TestUploadShards(t *testing.T) { } // encrypt the shards - o.Object.Object.Slabs[0].Slab.Encrypt(shards) + o.Object.Slabs[0].Slab.Encrypt(shards) // filter it down to the shards we need to migrate for i, si := range badIndices { @@ -443,7 +443,7 @@ func TestUploadShards(t *testing.T) { // migrate those shards away from bad hosts mem := mm.AcquireMemory(context.Background(), uint64(len(badIndices))*rhpv2.SectorSize) - err = ul.UploadShards(context.Background(), o.Object.Object.Slabs[0].Slab, badIndices, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) + err = ul.UploadShards(context.Background(), o.Object.Slabs[0].Slab, badIndices, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) if err != nil { t.Fatal(err) } @@ -452,10 +452,10 @@ func TestUploadShards(t *testing.T) { o, err = os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab = o.Object.Object.Slabs[0] + slab = o.Object.Slabs[0] // assert none of the shards are on bad hosts for i, shard := range slab.Shards { @@ -479,7 +479,7 @@ func TestUploadShards(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), contracts) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), contracts) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -621,7 +621,7 @@ func TestUploadRegression(t *testing.T) { // download data for good measure var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { diff --git a/worker/worker.go b/worker/worker.go index 7502f646b..8d1166465 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -34,7 +34,6 @@ import ( "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker/client" - "go.sia.tech/renterd/worker/s3" "go.uber.org/zap" "golang.org/x/crypto/blake2b" ) @@ -70,8 +69,6 @@ func NewClient(address, password string) *Client { type ( Bus interface { - s3.Bus - alerts.Alerter gouging.ConsensusState webhooks.Broadcaster @@ -122,7 +119,7 @@ type ( // NOTE: used by worker Bucket(_ context.Context, bucket string) (api.Bucket, error) - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) + Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.Object, error) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) @@ -764,8 +761,7 @@ func (w *Worker) objectsHandlerHEAD(jc jape.Context) { // fetch object metadata hor, err := w.HeadObject(jc.Request.Context(), bucket, path, api.HeadObjectOptions{ - IgnoreDelim: ignoreDelim, - Range: &dr, + Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -820,27 +816,9 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { return } - opts := api.GetObjectOptions{ - Prefix: prefix, - Marker: marker, - Offset: off, - Limit: limit, - IgnoreDelim: ignoreDelim, - SortBy: sortBy, - SortDir: sortDir, - } - path := jc.PathParam("path") - if path == "" || strings.HasSuffix(path, "/") { - // list directory - res, err := w.bus.Object(ctx, bucket, path, opts) - if utils.IsErr(err, api.ErrObjectNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("couldn't get object or entries", err) != nil { - return - } - jc.Encode(res.Entries) + if path == "" { + jc.Error(errors.New("no path provided"), http.StatusBadRequest) return } @@ -857,8 +835,7 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { } gor, err := w.GetObject(ctx, bucket, path, api.DownloadObjectOptions{ - GetObjectOptions: opts, - Range: &dr, + Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -1387,16 +1364,15 @@ func isErrHostUnreachable(err error) bool { utils.IsErr(err, errors.New("cannot assign requested address")) } -func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.ObjectsResponse, error) { +func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.Object, error) { // fetch object res, err := w.bus.Object(ctx, bucket, path, api.GetObjectOptions{ - IgnoreDelim: opts.IgnoreDelim, OnlyMetadata: onlyMetadata, }) if err != nil { - return nil, api.ObjectsResponse{}, fmt.Errorf("couldn't fetch object: %w", err) + return nil, api.Object{}, fmt.Errorf("couldn't fetch object: %w", err) } else if res.Object == nil { - return nil, api.ObjectsResponse{}, errors.New("object is a directory") + return nil, api.Object{}, errors.New("object is a directory") } // adjust length @@ -1404,21 +1380,21 @@ func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetada opts.Range = &api.DownloadRange{Offset: 0, Length: -1} } if opts.Range.Length == -1 { - opts.Range.Length = res.Object.Size - opts.Range.Offset + opts.Range.Length = res.Size - opts.Range.Offset } // check size of object against range - if opts.Range.Offset+opts.Range.Length > res.Object.Size { - return nil, api.ObjectsResponse{}, http_range.ErrInvalid + if opts.Range.Offset+opts.Range.Length > res.Size { + return nil, api.Object{}, http_range.ErrInvalid } return &api.HeadObjectResponse{ - ContentType: res.Object.MimeType, - Etag: res.Object.ETag, - LastModified: res.Object.ModTime, - Range: opts.Range.ContentRange(res.Object.Size), - Size: res.Object.Size, - Metadata: res.Object.Metadata, + ContentType: res.MimeType, + Etag: res.ETag, + LastModified: res.ModTime, + Range: opts.Range.ContentRange(res.Size), + Size: res.Size, + Metadata: res.Metadata, }, res, nil } @@ -1459,13 +1435,12 @@ func (w *Worker) FundAccount(ctx context.Context, fcid types.FileContractID, hk func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { // head object hor, res, err := w.headObject(ctx, bucket, path, false, api.HeadObjectOptions{ - IgnoreDelim: opts.IgnoreDelim, - Range: opts.Range, + Range: opts.Range, }) if err != nil { return nil, fmt.Errorf("couldn't fetch object: %w", err) } - obj := *res.Object.Object + obj := *res.Object // adjust range if opts.Range == nil { @@ -1502,7 +1477,7 @@ func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.Do if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errDownloadCancelled) && !errors.Is(err, io.ErrClosedPipe) { - w.registerAlert(newDownloadFailedAlert(bucket, path, opts.Prefix, opts.Marker, offset, length, int64(len(contracts)), err)) + w.registerAlert(newDownloadFailedAlert(bucket, path, offset, length, int64(len(contracts)), err)) } return fmt.Errorf("failed to download object: %w", err) } From fe1564f58254f2421f1d7c2384d1ae35ff8c3654 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Aug 2024 18:03:52 +0200 Subject: [PATCH 16/98] bus: fix jape --- bus/bus.go | 12 ++++++------ bus/routes.go | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 39cd4f409..9249629c0 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -439,12 +439,12 @@ func (b *Bus) Handler() http.Handler { "POST /multipart/listuploads": b.multipartHandlerListUploadsPOST, "POST /multipart/listparts": b.multipartHandlerListPartsPOST, - "GET /object/*key": b.objectHandlerGET, - "GET /objects/*prefix": b.objectsHandlerPOST, - "PUT /objects/*path": b.objectsHandlerPUT, - "DELETE /objects/*path": b.objectsHandlerDELETE, - "POST /objects/copy": b.objectsCopyHandlerPOST, - "POST /objects/rename": b.objectsRenameHandlerPOST, + "GET /object/*key": b.objectHandlerGET, + "GET /objects": b.objectsHandlerGET, + "PUT /objects/*key": b.objectsHandlerPUT, + "DELETE /objects/*key": b.objectsHandlerDELETE, + "POST /objects/copy": b.objectsCopyHandlerPOST, + "POST /objects/rename": b.objectsRenameHandlerPOST, "GET /params/gouging": b.paramsHandlerGougingGET, "GET /params/upload": b.paramsHandlerUploadGET, diff --git a/bus/routes.go b/bus/routes.go index 799914492..13770da8c 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1060,10 +1060,10 @@ func (b *Bus) objectHandlerGET(jc jape.Context) { } else if jc.Check("couldn't load object", err) != nil { return } - jc.Encode(api.ObjectsResponse{Object: &o}) + jc.Encode(o) } -func (b *Bus) objectsHandlerPOST(jc jape.Context) { +func (b *Bus) objectsHandlerGET(jc jape.Context) { var limit int var marker, delim, prefix, sortBy, sortDir string bucket := api.DefaultBucketName @@ -1106,7 +1106,7 @@ func (b *Bus) objectsHandlerPUT(jc jape.Context) { } else if aor.Bucket == "" { aor.Bucket = api.DefaultBucketName } - jc.Check("couldn't store object", b.ms.UpdateObject(jc.Request.Context(), aor.Bucket, jc.PathParam("path"), aor.ContractSet, aor.ETag, aor.MimeType, aor.Metadata, aor.Object)) + jc.Check("couldn't store object", b.ms.UpdateObject(jc.Request.Context(), aor.Bucket, jc.PathParam("key"), aor.ContractSet, aor.ETag, aor.MimeType, aor.Metadata, aor.Object)) } func (b *Bus) objectsCopyHandlerPOST(jc jape.Context) { @@ -1165,9 +1165,9 @@ func (b *Bus) objectsHandlerDELETE(jc jape.Context) { } var err error if batch { - err = b.ms.RemoveObjects(jc.Request.Context(), bucket, jc.PathParam("path")) + err = b.ms.RemoveObjects(jc.Request.Context(), bucket, jc.PathParam("key")) } else { - err = b.ms.RemoveObject(jc.Request.Context(), bucket, jc.PathParam("path")) + err = b.ms.RemoveObject(jc.Request.Context(), bucket, jc.PathParam("key")) } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) From f43d5d7b86b90421a81ccc9ce19602d157d0ee3d Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 09:34:12 +0200 Subject: [PATCH 17/98] stores: add compat code --- .github/ISSUE_TEMPLATE/bug_report.yml | 8 +- README.md | 40 +------ api/setting.go | 7 ++ bus/bus.go | 120 +++++++++++++------- bus/client/settings.go | 16 +-- bus/routes.go | 151 ++++++++++++-------------- internal/bus/pinmanager.go | 35 ++---- internal/bus/pinmanager_test.go | 72 ++++++------ stores/settingsdb.go | 115 +++++++++++++++++++- stores/settingsdb_test.go | 33 ------ stores/sql/database.go | 3 + stores/sql/main.go | 7 ++ stores/sql/mysql/main.go | 4 + stores/sql/sqlite/main.go | 4 + 14 files changed, 339 insertions(+), 276 deletions(-) delete mode 100644 stores/settingsdb_test.go diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 04504c086..5219b07cf 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -66,10 +66,10 @@ body: description: | The configuration of your bus ```bash - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/contractset - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/gouging - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/redundancy - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/uploadpacking + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/contractset + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/gouging + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/redundancy + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/uploadpacking ``` placeholder: Paste the output of the above commands here validations: diff --git a/README.md b/README.md index c20749935..7508620c5 100644 --- a/README.md +++ b/README.md @@ -558,49 +558,13 @@ formed. } ``` -### Contract Set - -The contract set settings on the bus allow specifying a default contract set. -This contract set will be returned by the `bus` through the upload parameters, -and decides what contracts data is upload or migrated to by default. This -setting does not have a default value, it can be updated using the settings API: - -- `GET /api/bus/setting/contractset` -- `PUT /api/bus/setting/contractset` - -```json -{ - "default": "autopilot" -} -``` - -In most cases the default set should match the set from your autopilot -configuration in order for migrations to work properly. The contract set can be -overridden by passing it as a query string parameter to the worker's upload and -migrate endpoints. - -- `PUT /api/worker/objects/foo?contractset=foo` - -### Redundancy - -The default redundancy on mainnet is 30-10, on testnet it is 6-2. The redundancy -can be updated using the settings API: - -- `GET /api/bus/setting/redundancy` -- `PUT /api/bus/setting/redundancy` - -The redundancy can also be passed through query string parameters on the upload -endpoint in the worker API: - -- `PUT /api/worker/objects/foo?minshards=2&totalshards=5` - ### Gouging The default gouging settings are listed below. The gouging settings can be updated using the settings API: -- `GET /api/bus/setting/gouging` -- `PUT /api/bus/setting/gouging` +- `GET /api/bus/settings/gouging` +- `PUT /api/bus/settings/gouging` ```json { diff --git a/api/setting.go b/api/setting.go index 3b81eb87e..f3e0f2b63 100644 --- a/api/setting.go +++ b/api/setting.go @@ -246,6 +246,13 @@ func (gs GougingSettings) Validate() error { return nil } +func (us UploadSettings) Validate() error { + return errors.Join( + us.Packing.Validate(), + us.Redundancy.Validate(), + ) +} + // Validate returns an error if the upload packing settings are not considered // valid. func (up UploadPackingSettings) Validate() error { diff --git a/bus/bus.go b/bus/bus.go index 81c52f99c..e1e5563f9 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -5,7 +5,6 @@ package bus import ( "context" - "encoding/json" "errors" "fmt" "net" @@ -278,8 +277,21 @@ type ( // A SettingStore stores settings. SettingStore interface { - Setting(ctx context.Context, key string) (string, error) - UpdateSetting(ctx context.Context, key, value string) error + GougingSettings(ctx context.Context) (api.GougingSettings, error) + UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error + + PinnedSettings(ctx context.Context) (api.PinnedSettings, error) + UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error + + UploadSettings(ctx context.Context) (api.UploadSettings, error) + UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error + + S3Settings(ctx context.Context) (api.S3Settings, error) + UpdateS3Settings(ctx context.Context, s3as api.S3Settings) error + + // required for compat + Setting(ctx context.Context, key string, out interface{}) error + DeleteSetting(ctx context.Context, key string) error } WalletMetricsRecorder interface { @@ -357,6 +369,11 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa // create wallet metrics recorder b.walletMetricsRecorder = ibus.NewWalletMetricRecorder(store, w, defaultWalletRecordMetricInterval, l) + // migrate settings to V2 types + if err := b.compatV2Settings(ctx); err != nil { + return nil, err + } + return b, nil } @@ -457,8 +474,8 @@ func (b *Bus) Handler() http.Handler { "PUT /settings/pinned": b.settingsPinnedHandlerPUT, "GET /settings/s3": b.settingsS3HandlerGET, "PUT /settings/s3": b.settingsS3HandlerPUT, - "GET /settings/uploads": b.settingsRedundancyHandlerGET, - "PUT /settings/uploads": b.settingsRedundancyHandlerPUT, + "GET /settings/uploads": b.settingsUploadsHandlerGET, + "PUT /settings/uploads": b.settingsUploadsHandlerPUT, "POST /slabs/migration": b.slabsMigrationHandlerPOST, "GET /slabs/partial/:key": b.slabsPartialHandlerGET, @@ -594,56 +611,75 @@ func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { return pk } -func (b *Bus) fetchSetting(ctx context.Context, key string, value interface{}) error { - defaults := map[string]interface{}{ - api.SettingGouging: api.DefaultGougingSettings, - api.SettingPinned: api.DefaultPricePinSettings, - api.SettingUploads: api.DefaultUploadSettings, +func (b *Bus) compatV2Settings(ctx context.Context) error { + // escape early if all settings are present + if !errors.Is(errors.Join( + b.ss.Setting(ctx, api.SettingGouging, struct{}{}), + b.ss.Setting(ctx, api.SettingPinned, struct{}{}), + b.ss.Setting(ctx, api.SettingS3, struct{}{}), + b.ss.Setting(ctx, api.SettingUploads, struct{}{}), + ), api.ErrAutopilotNotFound) { + return nil } - // testnets have different redundancy settings - if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { - defaults[api.SettingUploads] = api.DefaultRedundancySettingsTestnet + // migrate S3 settings + var s3as api.S3AuthenticationSettings + if err := b.ss.Setting(ctx, "s3authentication", &s3as); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + return err + } else if err == nil { + s3s := api.S3Settings{Authentication: s3as} + if err := b.ss.UpdateS3Settings(ctx, s3s); err != nil { + return err + } } - setting, err := b.ss.Setting(ctx, key) - if errors.Is(err, api.ErrSettingNotFound) { - val, ok := defaults[key] - if !ok { - return fmt.Errorf("%w: unknown setting '%s'", api.ErrSettingNotFound, key) + // migrate pinned settings + var pps api.PinnedSettings + if err := b.ss.Setting(ctx, "pricepinning", &pps); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + return err + } else if errors.Is(err, api.ErrSettingNotFound) { + if err := b.ss.UpdatePinnedSettings(ctx, api.DefaultPricePinSettings); err != nil { + return err } - - bytes, _ := json.Marshal(val) - if err := b.ss.UpdateSetting(ctx, key, string(bytes)); err != nil { - b.logger.Warn(fmt.Sprintf("failed to update default setting '%s': %v", key, err)) + } else { + if err := b.ss.UpdatePinnedSettings(ctx, pps); err != nil { + return err } - return json.Unmarshal(bytes, &val) - } else if err != nil { - return err } - return json.Unmarshal([]byte(setting), &value) -} + // migrate upload settings + us := api.DefaultUploadSettings + var css struct { + Default string `json:"default"` + } -func (b *Bus) updateSetting(ctx context.Context, key string, value string, updatePinMgr bool) error { - err := b.ss.UpdateSetting(ctx, key, value) - if err != nil { + // override default contract set on default upload settings + if err := b.ss.Setting(ctx, "contractset", &css); err != nil && !errors.Is(err, api.ErrSettingNotFound) { return err + } else if err == nil { + us.DefaultContractSet = css.Default } - b.broadcastAction(webhooks.Event{ - Module: api.ModuleSetting, - Event: api.EventUpdate, - Payload: api.EventSettingUpdate{ - Key: key, - Update: value, - Timestamp: time.Now().UTC(), - }, - }) + // override redundancy settings on default upload settings + var rs api.RedundancySettings + if err := b.ss.Setting(ctx, "redundancy", &rs); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + return err + } else if errors.Is(err, api.ErrSettingNotFound) { + // default redundancy settings for testnet are different from mainnet + if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { + us.Redundancy = api.DefaultRedundancySettingsTestnet + } + } else { + us.Redundancy = rs + } - if updatePinMgr { - b.pinMgr.TriggerUpdate() + // override upload packing settings on default upload settings + var ups api.UploadPackingSettings + if err := b.ss.Setting(ctx, "uploadpacking", &ups); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + return err + } else if err == nil { + us.Packing = ups } - return nil + return b.ss.UpdateUploadSettings(ctx, us) } diff --git a/bus/client/settings.go b/bus/client/settings.go index 35c48eac2..5723c3cdd 100644 --- a/bus/client/settings.go +++ b/bus/client/settings.go @@ -8,44 +8,44 @@ import ( // GougingSettings returns the gouging settings. func (c *Client) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { - err = c.c.WithContext(ctx).GET("/setting/gouging", &gs) + err = c.c.WithContext(ctx).GET("/settings/gouging", &gs) return } // UpdateGougingSettings updates the given setting. func (c *Client) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { - return c.c.WithContext(ctx).PUT("/setting/gouging", gs) + return c.c.WithContext(ctx).PUT("/settings/gouging", gs) } // PricePinningSettings returns the contract set settings. func (c *Client) PricePinningSettings(ctx context.Context) (pps api.PinnedSettings, err error) { - err = c.c.WithContext(ctx).GET("/setting/pinned", &pps) + err = c.c.WithContext(ctx).GET("/settings/pinned", &pps) return } // UpdatePinnedSettings updates the given setting. func (c *Client) UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error { - return c.c.WithContext(ctx).PUT("/setting/pinned", pps) + return c.c.WithContext(ctx).PUT("/settings/pinned", pps) } // S3Settings returns the S3 settings. func (c *Client) S3Settings(ctx context.Context) (as api.S3Settings, err error) { - err = c.c.WithContext(ctx).GET("/setting/s3", &as) + err = c.c.WithContext(ctx).GET("/settings/s3", &as) return } // UpdateS3Settings updates the given setting. func (c *Client) UpdateS3Settings(ctx context.Context, as api.S3Settings) error { - return c.c.WithContext(ctx).PUT("/setting/s3", as) + return c.c.WithContext(ctx).PUT("/settings/s3", as) } // UploadSettings returns the upload settings. func (c *Client) UploadSettings(ctx context.Context) (css api.UploadSettings, err error) { - err = c.c.WithContext(ctx).GET("/setting/upload", &css) + err = c.c.WithContext(ctx).GET("/settings/upload", &css) return } // UpdateUploadSettings update the given setting. func (c *Client) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { - return c.c.WithContext(ctx).PUT("/setting/upload", us) + return c.c.WithContext(ctx).PUT("/settings/upload", us) } diff --git a/bus/routes.go b/bus/routes.go index e4ba44a2d..593aa1112 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1253,8 +1253,7 @@ func (b *Bus) packedSlabsHandlerDonePOST(jc jape.Context) { } func (b *Bus) settingsGougingHandlerGET(jc jape.Context) { - var gs api.GougingSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingGouging, &gs); errors.Is(err, api.ErrSettingNotFound) { + if gs, err := b.ss.GougingSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) } else if jc.Check("failed to get gouging settings", err) == nil { jc.Encode(gs) @@ -1268,24 +1267,22 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { } else if err := gs.Validate(); err != nil { jc.Error(fmt.Errorf("couldn't update gouging settings, error: %v", err), http.StatusBadRequest) return - } - - // marshal the setting - data, err := json.Marshal(gs) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) - return - } - - // update the setting - if jc.Check("could not update gouging settings", b.updateSetting(jc.Request.Context(), api.SettingGouging, string(data), true)) != nil { - return + } else if jc.Check("could not update gouging settings", b.ss.UpdateGougingSettings(jc.Request.Context(), gs)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: api.SettingGouging, + Update: gs, + Timestamp: time.Now().UTC(), + }, + }) + b.pinMgr.TriggerUpdate() } } func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { - var pps api.PinnedSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingPinned, &pps); errors.Is(err, api.ErrSettingNotFound) { + if pps, err := b.ss.PinnedSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) } else if jc.Check("failed to get price pinning settings", err) == nil { // populate the Autopilots map with the current autopilots @@ -1315,79 +1312,73 @@ func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { return } } - - // marshal the setting - data, err := json.Marshal(pps) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) - return - } - - // update the setting - if jc.Check("could not update price pinning settings", b.updateSetting(jc.Request.Context(), api.SettingPinned, string(data), true)) != nil { - return + if jc.Check("could not update price pinning settings", b.ss.UpdatePinnedSettings(jc.Request.Context(), pps)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: api.SettingPinned, + Update: pps, + Timestamp: time.Now().UTC(), + }, + }) + b.pinMgr.TriggerUpdate() } } -func (b *Bus) settingsRedundancyHandlerGET(jc jape.Context) { - var rs api.RedundancySettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploads, &rs); errors.Is(err, api.ErrSettingNotFound) { +func (b *Bus) settingsUploadsHandlerGET(jc jape.Context) { + if us, err := b.ss.UploadSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get redundancy settings", err) == nil { - jc.Encode(rs) + } else if jc.Check("failed to get upload settings", err) == nil { + jc.Encode(us) } } -func (b *Bus) settingsRedundancyHandlerPUT(jc jape.Context) { - var rs api.RedundancySettings - if jc.Decode(&rs) != nil { - return - } else if err := rs.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update redundancy settings, error: %v", err), http.StatusBadRequest) - return - } - - // marshal the setting - data, err := json.Marshal(rs) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) +func (b *Bus) settingsUploadsHandlerPUT(jc jape.Context) { + var us api.UploadSettings + if jc.Decode(&us) != nil { return - } - - // update the setting - if jc.Check("could not update redundancy settings", b.updateSetting(jc.Request.Context(), api.SettingUploads, string(data), false)) != nil { + } else if err := us.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update upload settings, error: %v", err), http.StatusBadRequest) return + } else if jc.Check("could not update upload settings", b.ss.UpdateUploadSettings(jc.Request.Context(), us)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: api.SettingUploads, + Update: us, + Timestamp: time.Now().UTC(), + }, + }) } } func (b *Bus) settingsS3HandlerGET(jc jape.Context) { - var s3as api.S3Settings - if err := b.fetchSetting(jc.Request.Context(), api.SettingS3, &s3as); errors.Is(err, api.ErrSettingNotFound) { + if s3s, err := b.ss.S3Settings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get s3 authentication settings", err) == nil { - jc.Encode(s3as) + } else if jc.Check("failed to get S3 settings", err) == nil { + jc.Encode(s3s) } } func (b *Bus) settingsS3HandlerPUT(jc jape.Context) { - var s3as api.S3Settings - if jc.Decode(&s3as) != nil { - return - } else if err := s3as.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update s3 authentication settings, error: %v", err), http.StatusBadRequest) - return - } - - // marshal the setting - data, err := json.Marshal(s3as) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) + var s3s api.S3Settings + if jc.Decode(&s3s) != nil { return - } - - // update the setting - if jc.Check("could not update s3 authentication settings", b.updateSetting(jc.Request.Context(), api.SettingS3, string(data), false)) != nil { + } else if err := s3s.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update S3 settings, error: %v", err), http.StatusBadRequest) return + } else if jc.Check("could not update S3 settings", b.ss.UpdateS3Settings(jc.Request.Context(), s3s)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: api.SettingS3, + Update: s3s, + Timestamp: time.Now().UTC(), + }, + }) } } @@ -1524,8 +1515,8 @@ func (b *Bus) slabsPartialHandlerPOST(jc jape.Context) { if jc.Check("failed to add partial slab", err) != nil { return } - var us api.UploadSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploads, &us); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + us, err := b.ss.UploadSettings(jc.Request.Context()) + if err != nil && !errors.Is(err, api.ErrSettingNotFound) { jc.Error(fmt.Errorf("could not get upload packing settings: %w", err), http.StatusInternalServerError) return } @@ -1559,8 +1550,8 @@ func (b *Bus) paramsHandlerUploadGET(jc jape.Context) { var uploadPacking bool var contractSet string - var us api.UploadSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploads, &us); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + us, err := b.ss.UploadSettings(jc.Request.Context()) + if err != nil && !errors.Is(err, api.ErrSettingNotFound) { jc.Error(fmt.Errorf("could not get upload settings: %w", err), http.StatusInternalServerError) return } else if err == nil { @@ -1604,18 +1595,14 @@ func (b *Bus) paramsHandlerGougingGET(jc jape.Context) { } func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { - var gs api.GougingSettings - if gss, err := b.ss.Setting(ctx, api.SettingGouging); err != nil { + gs, err := b.ss.GougingSettings(ctx) + if err != nil { return api.GougingParams{}, err - } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { - b.logger.Panicf("failed to unmarshal gouging settings '%s': %v", gss, err) } - var rs api.RedundancySettings - if rss, err := b.ss.Setting(ctx, api.SettingUploads); err != nil { + us, err := b.ss.UploadSettings(ctx) + if err != nil { return api.GougingParams{}, err - } else if err := json.Unmarshal([]byte(rss), &rs); err != nil { - b.logger.Panicf("failed to unmarshal redundancy settings '%s': %v", rss, err) } cs, err := b.consensusState(ctx) @@ -1626,7 +1613,7 @@ func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { return api.GougingParams{ ConsensusState: cs, GougingSettings: gs, - RedundancySettings: rs, + RedundancySettings: us.Redundancy, TransactionFee: b.cm.RecommendedFee(), }, nil } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index ff51812dc..3bc579f28 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -2,7 +2,6 @@ package bus import ( "context" - "encoding/json" "errors" "fmt" "sync" @@ -20,9 +19,13 @@ import ( type ( Store interface { Autopilot(ctx context.Context, id string) (api.Autopilot, error) - Setting(ctx context.Context, key string) (string, error) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error - UpdateSetting(ctx context.Context, key, value string) error + + GougingSettings(ctx context.Context) (api.GougingSettings, error) + UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error + + PinnedSettings(ctx context.Context) (api.PinnedSettings, error) + UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error } ) @@ -106,16 +109,6 @@ func (pm *pinManager) averageRate() decimal.Decimal { return decimal.NewFromFloat(median) } -func (pm *pinManager) pinnedSettings(ctx context.Context) (api.PinnedSettings, error) { - var ps api.PinnedSettings - if pss, err := pm.s.Setting(ctx, api.SettingPinned); err != nil { - return api.PinnedSettings{}, err - } else if err := json.Unmarshal([]byte(pss), &ps); err != nil { - pm.logger.Panicf("failed to unmarshal pinned settings '%s': %v", pss, err) - } - return ps, nil -} - func (pm *pinManager) rateExceedsThreshold(threshold float64) bool { pm.mu.Lock() defer pm.mu.Unlock() @@ -241,11 +234,8 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin var updated bool // fetch gouging settings - var gs api.GougingSettings - if gss, err := pm.s.Setting(ctx, api.SettingGouging); err != nil { - return err - } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { - pm.logger.Panicf("failed to unmarshal gouging settings '%s': %v", gss, err) + gs, err := pm.s.GougingSettings(ctx) + if err != nil { return err } @@ -292,15 +282,14 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin } // validate settings - err := gs.Validate() + err = gs.Validate() if err != nil { pm.logger.Warnw("failed to update gouging setting, new settings make the setting invalid", zap.Error(err)) return err } // update settings - bytes, _ := json.Marshal(gs) - err = pm.s.UpdateSetting(ctx, api.SettingGouging, string(bytes)) + err = pm.s.UpdateGougingSettings(ctx, gs) // broadcast event if err == nil { @@ -309,7 +298,7 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin Event: api.EventUpdate, Payload: api.EventSettingUpdate{ Key: api.SettingGouging, - Update: string(bytes), + Update: gs, Timestamp: time.Now().UTC(), }, }) @@ -322,7 +311,7 @@ func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { pm.logger.Debugw("updating prices", zap.Bool("forced", forced)) // fetch pinned settings - settings, err := pm.pinnedSettings(ctx) + settings, err := pm.s.PinnedSettings(ctx) if errors.Is(err, api.ErrSettingNotFound) { pm.logger.Debug("price pinning not configured, skipping price update") return nil diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 33e4962eb..37523dd6e 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -111,22 +111,18 @@ func (api *mockForexAPI) setUnreachable(unreachable bool) { type mockPinStore struct { mu sync.Mutex - settings map[string]string + gs api.GougingSettings + ps api.PinnedSettings autopilots map[string]api.Autopilot } func newTestStore() *mockPinStore { s := &mockPinStore{ autopilots: make(map[string]api.Autopilot), - settings: make(map[string]string), + gs: api.DefaultGougingSettings, + ps: api.DefaultPricePinSettings, } - // add default price pin - and gouging settings - b, _ := json.Marshal(api.DefaultPricePinSettings) - s.settings[api.SettingPinned] = string(b) - b, _ = json.Marshal(api.DefaultGougingSettings) - s.settings[api.SettingGouging] = string(b) - // add default autopilot s.autopilots[testAutopilotID] = api.Autopilot{ ID: testAutopilotID, @@ -140,34 +136,30 @@ func newTestStore() *mockPinStore { return s } -func (ms *mockPinStore) gougingSettings() api.GougingSettings { - val, err := ms.Setting(context.Background(), api.SettingGouging) - if err != nil { - panic(err) - } - var gs api.GougingSettings - if err := json.Unmarshal([]byte(val), &gs); err != nil { - panic(err) - } - return gs +func (ms *mockPinStore) GougingSettings(ctx context.Context) (api.GougingSettings, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + return ms.gs, nil } -func (ms *mockPinStore) updatPinnedSettings(pps api.PinnedSettings) { - b, _ := json.Marshal(pps) - ms.UpdateSetting(context.Background(), api.SettingPinned, string(b)) - time.Sleep(2 * testUpdateInterval) +func (ms *mockPinStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + ms.mu.Lock() + defer ms.mu.Unlock() + ms.gs = gs + return nil } -func (ms *mockPinStore) Setting(ctx context.Context, key string) (string, error) { +func (ms *mockPinStore) PinnedSettings(ctx context.Context) (api.PinnedSettings, error) { ms.mu.Lock() defer ms.mu.Unlock() - return ms.settings[key], nil + return ms.ps, nil } -func (ms *mockPinStore) UpdateSetting(ctx context.Context, key, value string) error { +func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { ms.mu.Lock() defer ms.mu.Unlock() - ms.settings[key] = value + ms.ps = ps + time.Sleep(2 * testUpdateInterval) return nil } @@ -221,7 +213,7 @@ func TestPinManager(t *testing.T) { pps.Currency = "usd" pps.Threshold = 0.5 pps.ForexEndpointURL = forex.s.URL - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) // assert price manager is running now if cnt := len(rates()); cnt < 1 { @@ -230,30 +222,30 @@ func TestPinManager(t *testing.T) { // update exchange rate and fetch current gouging settings forex.setRate(2.5) - gs := ms.gougingSettings() + gs, _ := ms.GougingSettings(context.Background()) // configure all pins but disable them for now pps.GougingSettingsPins.MaxDownload = api.Pin{Value: 3, Pinned: false} pps.GougingSettingsPins.MaxStorage = api.Pin{Value: 3, Pinned: false} pps.GougingSettingsPins.MaxUpload = api.Pin{Value: 3, Pinned: false} - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) // assert gouging settings are unchanged - if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + if gss, _ := ms.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } // enable the max download pin, with the threshold at 0.5 it should remain unchanged pps.GougingSettingsPins.MaxDownload.Pinned = true - ms.updatPinnedSettings(pps) - if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + ms.UpdatePinnedSettings(context.Background(), pps) + if gss, _ := ms.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } // lower the threshold, gouging settings should be updated pps.Threshold = 0.05 - ms.updatPinnedSettings(pps) - if gss := ms.gougingSettings(); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { + ms.UpdatePinnedSettings(context.Background(), pps) + if gss, _ := ms.GougingSettings(context.Background()); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) } @@ -261,10 +253,10 @@ func TestPinManager(t *testing.T) { pps.GougingSettingsPins.MaxDownload.Pinned = true pps.GougingSettingsPins.MaxStorage.Pinned = true pps.GougingSettingsPins.MaxUpload.Pinned = true - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) // assert they're all updated - if gss := ms.gougingSettings(); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || + if gss, _ := ms.GougingSettings(context.Background()); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || gss.MaxStoragePrice.Equals(gs.MaxStoragePrice) || gss.MaxUploadPrice.Equals(gs.MaxUploadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss, gs) @@ -284,7 +276,7 @@ func TestPinManager(t *testing.T) { }, } pps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) // assert autopilot was not updated if app, _ := ms.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { @@ -294,7 +286,7 @@ func TestPinManager(t *testing.T) { // enable the pin pins.Allowance.Pinned = true pps.Autopilots[testAutopilotID] = pins - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) // assert autopilot was updated if app, _ := ms.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { @@ -305,7 +297,7 @@ func TestPinManager(t *testing.T) { forex.setUnreachable(true) // assert alert was registered - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) res, _ := a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) == 0 { t.Fatalf("expected 1 alert, got %d", len(a.alerts)) @@ -315,7 +307,7 @@ func TestPinManager(t *testing.T) { forex.setUnreachable(false) // assert alert was dismissed - ms.updatPinnedSettings(pps) + ms.UpdatePinnedSettings(context.Background(), pps) res, _ = a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) != 0 { t.Fatalf("expected 0 alerts, got %d", len(a.alerts)) diff --git a/stores/settingsdb.go b/stores/settingsdb.go index ea31b25bd..407a53efd 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -2,14 +2,118 @@ package stores import ( "context" + "encoding/json" "fmt" + "go.sia.tech/renterd/api" sql "go.sia.tech/renterd/stores/sql" ) -// Setting implements the bus.SettingStore interface. -func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { - // Check cache first. +func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, _ error) { + value, err := s.fetchSetting(ctx, api.SettingGouging) + if err != nil { + return api.GougingSettings{}, err + } + + if err := json.Unmarshal([]byte(value), &gs); err != nil { + s.logger.Panicf("failed to unmarshal gouging settings '%s': %v", value, err) + return api.GougingSettings{}, err + } + return +} + +func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + data, err := json.Marshal(gs) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + return s.updateSetting(ctx, api.SettingGouging, string(data)) +} + +func (s *SQLStore) PinnedSettings(ctx context.Context) (pps api.PinnedSettings, _ error) { + value, err := s.fetchSetting(ctx, api.SettingPinned) + if err != nil { + return api.PinnedSettings{}, err + } + + if err := json.Unmarshal([]byte(value), &pps); err != nil { + s.logger.Panicf("failed to unmarshal pinned settings '%s': %v", value, err) + return api.PinnedSettings{}, err + } + return +} + +func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error { + data, err := json.Marshal(pps) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + return s.updateSetting(ctx, api.SettingPinned, string(data)) +} + +func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, _ error) { + value, err := s.fetchSetting(ctx, api.SettingUploads) + if err != nil { + return api.UploadSettings{}, err + } + + if err := json.Unmarshal([]byte(value), &us); err != nil { + s.logger.Panicf("failed to unmarshal upload settings '%s': %v", value, err) + return api.UploadSettings{}, err + } + return +} + +func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { + data, err := json.Marshal(us) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + return s.updateSetting(ctx, api.SettingUploads, string(data)) +} + +func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, _ error) { + value, err := s.fetchSetting(ctx, api.SettingS3) + if err != nil { + return api.S3Settings{}, err + } + + if err := json.Unmarshal([]byte(value), &ss); err != nil { + s.logger.Panicf("failed to unmarshal s3 settings '%s': %v", value, err) + return api.S3Settings{}, err + } + return +} + +func (s *SQLStore) UpdateS3Settings(ctx context.Context, ss api.S3Settings) error { + data, err := json.Marshal(ss) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + return s.updateSetting(ctx, api.SettingS3, string(data)) +} + +func (s *SQLStore) DeleteSetting(ctx context.Context, key string) (err error) { + return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.DeleteSetting(ctx, key) + }) +} + +func (s *SQLStore) Setting(ctx context.Context, key string, out interface{}) (err error) { + var value string + err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + value, err = tx.Setting(ctx, key) + return err + }) + if err != nil { + return fmt.Errorf("failed to fetch setting from db: %w", err) + } + + return json.Unmarshal([]byte(value), &out) +} + +func (s *SQLStore) fetchSetting(ctx context.Context, key string) (string, error) { + // check cache first s.settingsMu.Lock() defer s.settingsMu.Unlock() value, ok := s.settings[key] @@ -17,7 +121,7 @@ func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { return value, nil } - // Check database. + // check database var err error err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { value, err = tx.Setting(ctx, key) @@ -30,8 +134,7 @@ func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { return value, nil } -// UpdateSetting implements the bus.SettingStore interface. -func (s *SQLStore) UpdateSetting(ctx context.Context, key, value string) error { +func (s *SQLStore) updateSetting(ctx context.Context, key, value string) error { // update db first s.settingsMu.Lock() defer s.settingsMu.Unlock() diff --git a/stores/settingsdb_test.go b/stores/settingsdb_test.go deleted file mode 100644 index 9eda8f546..000000000 --- a/stores/settingsdb_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package stores - -import ( - "context" - "testing" -) - -// TestSQLSettingStore tests the bus.SettingStore methods on the SQLSettingStore. -func TestSQLSettingStore(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - // add a setting - if err := ss.UpdateSetting(context.Background(), "foo", "bar"); err != nil { - t.Fatal(err) - } - - // assert we can query the setting by key - if value, err := ss.Setting(context.Background(), "foo"); err != nil { - t.Fatal(err) - } else if value != "bar" { - t.Fatalf("unexpected value, %s != 'bar'", value) - } - - // assert we can update the setting - if err := ss.UpdateSetting(context.Background(), "foo", "barbaz"); err != nil { - t.Fatal(err) - } else if value, err := ss.Setting(context.Background(), "foo"); err != nil { - t.Fatal(err) - } else if value != "barbaz" { - t.Fatalf("unexpected value, %s != 'barbaz'", value) - } -} diff --git a/stores/sql/database.go b/stores/sql/database.go index e4a8ab967..b1f45ef70 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -150,6 +150,9 @@ type ( // prefix and returns 'true' if any object was deleted. DeleteObjects(ctx context.Context, bucket, prefix string, limit int64) (bool, error) + // DeleteSetting deletes the setting with the given key. + DeleteSetting(ctx context.Context, key string) error + // DeleteWebhook deletes the webhook with the matching module, event and // URL of the provided webhook. If the webhook doesn't exist, // webhooks.ErrWebhookNotFound is returned. diff --git a/stores/sql/main.go b/stores/sql/main.go index e0d158552..02f876214 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -548,6 +548,13 @@ func DeleteMetadata(ctx context.Context, tx sql.Tx, objID int64) error { return err } +func DeleteSetting(ctx context.Context, tx sql.Tx, key string) error { + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", key); err != nil { + return fmt.Errorf("failed to delete setting '%s': %w", key, err) + } + return nil +} + func DeleteWebhook(ctx context.Context, tx sql.Tx, wh webhooks.Webhook) error { res, err := tx.Exec(ctx, "DELETE FROM webhooks WHERE module = ? AND event = ? AND url = ?", wh.Module, wh.Event, wh.URL) if err != nil { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 5be1a9c3f..0c5b0dce0 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -340,6 +340,10 @@ func (tx *MainDatabaseTx) InsertMultipartUpload(ctx context.Context, bucket, key return ssql.InsertMultipartUpload(ctx, tx, bucket, key, ec, mimeType, metadata) } +func (tx *MainDatabaseTx) DeleteSetting(ctx context.Context, key string) error { + return ssql.DeleteSetting(ctx, tx, key) +} + func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { return ssql.DeleteWebhook(ctx, tx, wh) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 50ea2619f..fa253a004 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -332,6 +332,10 @@ func (tx *MainDatabaseTx) DeleteHostSector(ctx context.Context, hk types.PublicK return ssql.DeleteHostSector(ctx, tx, hk, root) } +func (tx *MainDatabaseTx) DeleteSetting(ctx context.Context, key string) error { + return ssql.DeleteSetting(ctx, tx, key) +} + func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { return ssql.DeleteWebhook(ctx, tx, wh) } From 0e43e8d03bf57b94401c790d8c352c9997b82d65 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 09:42:44 +0200 Subject: [PATCH 18/98] all: update docs and defaults --- .github/ISSUE_TEMPLATE/bug_report.yml | 6 ++-- README.md | 23 -------------- api/setting.go | 45 ++++++++++----------------- bus/bus.go | 2 +- internal/bus/pinmanager_test.go | 4 +-- internal/test/config.go | 2 +- internal/test/e2e/cluster.go | 2 +- 7 files changed, 25 insertions(+), 59 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 5219b07cf..a5ccf759f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -66,10 +66,10 @@ body: description: | The configuration of your bus ```bash - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/contractset curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/gouging - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/redundancy - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/uploadpacking + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/pinned + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/s3 + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/uploads ``` placeholder: Paste the output of the above commands here validations: diff --git a/README.md b/README.md index 7508620c5..698a7c4b5 100644 --- a/README.md +++ b/README.md @@ -558,29 +558,6 @@ formed. } ``` -### Gouging - -The default gouging settings are listed below. The gouging settings can be -updated using the settings API: - -- `GET /api/bus/settings/gouging` -- `PUT /api/bus/settings/gouging` - -```json -{ - "hostBlockHeightLeeway": 6, // 6 blocks - "maxContractPrice": "15000000000000000000000000", // 15 SC per contract - "maxDownloadPrice": "3000000000000000000000000000", // 3000 SC per 1 TB - "maxRPCPrice": "1000000000000000000000", // 1mS per RPC - "maxStoragePrice": "631593542824", // 3000 SC per TB per month - "maxUploadPrice": "3000000000000000000000000000", // 3000 SC per 1 TB - "migrationSurchargeMultiplier": 10, // overpay up to 10x for sectors migrations on critical slabs - "minAccountExpiry": 86400000000000, // 1 day - "minMaxEphemeralAccountBalance": "1000000000000000000000000", // 1 SC - "minPriceTableValidity": 300000000000 // 5 minutes -} -``` - ### Blocklist Unfortunately the Sia blockchain is subject to hosts that announced themselves diff --git a/api/setting.go b/api/setting.go index f3e0f2b63..4627a31a7 100644 --- a/api/setting.go +++ b/api/setting.go @@ -32,9 +32,7 @@ var ( ErrSettingNotFound = errors.New("setting not found") // DefaultGougingSettings define the default gouging settings the bus is - // configured with on startup. These values can be adjusted using the - // settings API. - // + // configured with on startup. DefaultGougingSettings = GougingSettings{ MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(15), // 15 SC per contract @@ -48,40 +46,31 @@ var ( MigrationSurchargeMultiplier: 10, // 10x } - // DefaultPricePinSettings define the default price pin settings the bus is - // configured with on startup. These values can be adjusted using the - // settings API. - DefaultPricePinSettings = PinnedSettings{ + // DefaultPinnedSettings define the default pin settings the bus is + // configured with on startup. + DefaultPinnedSettings = PinnedSettings{ Enabled: false, Currency: "usd", ForexEndpointURL: "https://api.siascan.com/exchange-rate/siacoin", Threshold: 0.05, } + // DefaultUploadSettings define the default upload settings the bus is + // configured with on startup. DefaultUploadSettings = UploadSettings{ - Packing: DefaultUploadPackingSettings, - Redundancy: DefaultRedundancySettings, + Packing: UploadPackingSettings{ + Enabled: true, + SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB + }, + Redundancy: RedundancySettings{ + MinShards: 10, + TotalShards: 30, + }, } - // DefaultUploadPackingSettings define the default upload packing settings - // the bus is configured with on startup. - DefaultUploadPackingSettings = UploadPackingSettings{ - Enabled: true, - SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB - } - - // DefaultRedundancySettings define the default redundancy settings the bus - // is configured with on startup. These values can be adjusted using the - // settings API. - // - // NOTE: default redundancy settings for testnet are different from mainnet. - DefaultRedundancySettings = RedundancySettings{ - MinShards: 10, - TotalShards: 30, - } - - // Same as DefaultRedundancySettings but for running on testnet networks due - // to their reduced number of hosts. + // DefaultRedundancySettingsTestnet defines redundancy settings for the + // testnet, these are lower due to the reduced number of hosts on the + // testnet. DefaultRedundancySettingsTestnet = RedundancySettings{ MinShards: 2, TotalShards: 6, diff --git a/bus/bus.go b/bus/bus.go index e1e5563f9..47de9c44f 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -638,7 +638,7 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { if err := b.ss.Setting(ctx, "pricepinning", &pps); err != nil && !errors.Is(err, api.ErrSettingNotFound) { return err } else if errors.Is(err, api.ErrSettingNotFound) { - if err := b.ss.UpdatePinnedSettings(ctx, api.DefaultPricePinSettings); err != nil { + if err := b.ss.UpdatePinnedSettings(ctx, api.DefaultPinnedSettings); err != nil { return err } } else { diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 37523dd6e..a90d76e38 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -120,7 +120,7 @@ func newTestStore() *mockPinStore { s := &mockPinStore{ autopilots: make(map[string]api.Autopilot), gs: api.DefaultGougingSettings, - ps: api.DefaultPricePinSettings, + ps: api.DefaultPinnedSettings, } // add default autopilot @@ -208,7 +208,7 @@ func TestPinManager(t *testing.T) { } // enable price pinning - pps := api.DefaultPricePinSettings + pps := api.DefaultPinnedSettings pps.Enabled = true pps.Currency = "usd" pps.Threshold = 0.5 diff --git a/internal/test/config.go b/internal/test/config.go index f55c8a9e0..b33c2b0d6 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -49,7 +49,7 @@ var ( MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC } - PricePinSettings = api.DefaultPricePinSettings + PricePinSettings = api.DefaultPinnedSettings RedundancySettings = api.RedundancySettings{ MinShards: 2, diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index ab057e445..e79a1e605 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -446,7 +446,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { us := test.UploadSettings us.Packing = api.UploadPackingSettings{ Enabled: enableUploadPacking, - SlabBufferMaxSizeSoft: api.DefaultUploadPackingSettings.SlabBufferMaxSizeSoft, + SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB, } // Build S3 settings. From 8c76a212257f907fbd6507f90924f514dfc6c768 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 10:14:25 +0200 Subject: [PATCH 19/98] all: cleanup PR --- api/setting.go | 2 +- bus/bus.go | 8 ++++---- bus/routes.go | 9 ++++++--- internal/worker/cache.go | 14 +++++++------- stores/settingsdb.go | 4 ++-- 5 files changed, 20 insertions(+), 17 deletions(-) diff --git a/api/setting.go b/api/setting.go index 4627a31a7..dd2cb78af 100644 --- a/api/setting.go +++ b/api/setting.go @@ -13,7 +13,7 @@ const ( SettingGouging = "gouging" SettingPinned = "pinned" SettingS3 = "s3" - SettingUploads = "uploads" + SettingUpload = "upload" ) const ( diff --git a/bus/bus.go b/bus/bus.go index 47de9c44f..ff5320f70 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -474,8 +474,8 @@ func (b *Bus) Handler() http.Handler { "PUT /settings/pinned": b.settingsPinnedHandlerPUT, "GET /settings/s3": b.settingsS3HandlerGET, "PUT /settings/s3": b.settingsS3HandlerPUT, - "GET /settings/uploads": b.settingsUploadsHandlerGET, - "PUT /settings/uploads": b.settingsUploadsHandlerPUT, + "GET /settings/upload": b.settingsUploadHandlerGET, + "PUT /settings/upload": b.settingsUploadHandlerPUT, "POST /slabs/migration": b.slabsMigrationHandlerPOST, "GET /slabs/partial/:key": b.slabsPartialHandlerGET, @@ -617,8 +617,8 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { b.ss.Setting(ctx, api.SettingGouging, struct{}{}), b.ss.Setting(ctx, api.SettingPinned, struct{}{}), b.ss.Setting(ctx, api.SettingS3, struct{}{}), - b.ss.Setting(ctx, api.SettingUploads, struct{}{}), - ), api.ErrAutopilotNotFound) { + b.ss.Setting(ctx, api.SettingUpload, struct{}{}), + ), api.ErrSettingNotFound) { return nil } diff --git a/bus/routes.go b/bus/routes.go index 593aa1112..e681b1b93 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1290,6 +1290,9 @@ func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { if jc.Check("failed to fetch autopilots", err) != nil { return } + if pps.Autopilots == nil { + pps.Autopilots = make(map[string]api.AutopilotPins) + } for _, ap := range aps { if _, exists := pps.Autopilots[ap.ID]; !exists { pps.Autopilots[ap.ID] = api.AutopilotPins{} @@ -1326,7 +1329,7 @@ func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { } } -func (b *Bus) settingsUploadsHandlerGET(jc jape.Context) { +func (b *Bus) settingsUploadHandlerGET(jc jape.Context) { if us, err := b.ss.UploadSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) } else if jc.Check("failed to get upload settings", err) == nil { @@ -1334,7 +1337,7 @@ func (b *Bus) settingsUploadsHandlerGET(jc jape.Context) { } } -func (b *Bus) settingsUploadsHandlerPUT(jc jape.Context) { +func (b *Bus) settingsUploadHandlerPUT(jc jape.Context) { var us api.UploadSettings if jc.Decode(&us) != nil { return @@ -1346,7 +1349,7 @@ func (b *Bus) settingsUploadsHandlerPUT(jc jape.Context) { Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingUploads, + Key: api.SettingUpload, Update: us, Timestamp: time.Now().UTC(), }, diff --git a/internal/worker/cache.go b/internal/worker/cache.go index f6c1ea574..ebb071acd 100644 --- a/internal/worker/cache.go +++ b/internal/worker/cache.go @@ -333,15 +333,15 @@ func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) (err error) { gp.GougingSettings = gs c.cache.Set(cacheKeyGougingParams, gp) - case api.SettingUploads: - var rs api.RedundancySettings - if err := json.Unmarshal(data, &rs); err != nil { - return fmt.Errorf("couldn't update redundancy settings, invalid request body, %t", e.Update) - } else if err := rs.Validate(); err != nil { - return fmt.Errorf("couldn't update redundancy settings, error: %v", err) + case api.SettingUpload: + var us api.UploadSettings + if err := json.Unmarshal(data, &us); err != nil { + return fmt.Errorf("couldn't update upload settings, invalid request body, %t", e.Update) + } else if err := us.Validate(); err != nil { + return fmt.Errorf("couldn't update upload settings, error: %v", err) } - gp.RedundancySettings = rs + gp.RedundancySettings = us.Redundancy c.cache.Set(cacheKeyGougingParams, gp) default: } diff --git a/stores/settingsdb.go b/stores/settingsdb.go index 407a53efd..658b6de1f 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -52,7 +52,7 @@ func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, pps api.PinnedSetti } func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, _ error) { - value, err := s.fetchSetting(ctx, api.SettingUploads) + value, err := s.fetchSetting(ctx, api.SettingUpload) if err != nil { return api.UploadSettings{}, err } @@ -69,7 +69,7 @@ func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettin if err != nil { return fmt.Errorf("couldn't marshal the given value, error: %v", err) } - return s.updateSetting(ctx, api.SettingUploads, string(data)) + return s.updateSetting(ctx, api.SettingUpload, string(data)) } func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, _ error) { From 36a8d70ff5d09100373eca6945cd47c48f5326db Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 09:51:08 +0200 Subject: [PATCH 20/98] stores: fix TestListObject tests --- stores/metadata_test.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 35cce7796..ba0dc4710 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1431,9 +1431,9 @@ func TestObjectHealth(t *testing.T) { } } -// TestListObjectsWithPrefix is a test for the TestListObjects method -// with '/' as the prefix. -func TestListObjectsWithPrefix(t *testing.T) { +// TestListObjectsWithDelimiterSlash is a test for the +// TestListObjects method with '/' as the prefix. +func TestListObjectsWithDelimiterSlash(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() @@ -1532,7 +1532,7 @@ func TestListObjectsWithPrefix(t *testing.T) { {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { - resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } @@ -1545,7 +1545,7 @@ func TestListObjectsWithPrefix(t *testing.T) { var marker string for offset := 0; offset < len(test.want); offset++ { - resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, marker, 1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } @@ -1567,10 +1567,11 @@ func TestListObjectsWithPrefix(t *testing.T) { continue } - resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, 1) + resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, test.want[offset].Name, 1) if err != nil { t.Fatal(err) } + got = resp.Objects assertMetadata(got) if len(got) != 1 || got[0] != test.want[offset+1] { @@ -1633,14 +1634,14 @@ func TestListObjectsExplicitDir(t *testing.T) { {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Name: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, } for _, test := range tests { - got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", -1) + got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } for i := range got.Objects { got.Objects[i].ModTime = api.TimeRFC3339{} // ignore time for comparison } - if !reflect.DeepEqual(got, test.want) { + if !reflect.DeepEqual(got.Objects, test.want) { t.Fatalf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) } } @@ -3565,7 +3566,7 @@ func TestMarkSlabUploadedAfterRenew(t *testing.T) { } } -func TestListObjectsNoPrefix(t *testing.T) { +func TestListObjectsNoDelimiter(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { From c45aa0beb485808e3d083fdb67465cd07675c12b Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 10:27:35 +0200 Subject: [PATCH 21/98] all: cleanup PR --- api/setting.go | 46 ++++++++++++++++++++++----------------------- bus/bus.go | 29 +++++++++++++++++++++++++--- cmd/renterd/node.go | 32 ------------------------------- 3 files changed, 48 insertions(+), 59 deletions(-) diff --git a/api/setting.go b/api/setting.go index dd2cb78af..237a96f5f 100644 --- a/api/setting.go +++ b/api/setting.go @@ -30,7 +30,9 @@ var ( // ErrSettingNotFound is returned if a requested setting is not present in the // database. ErrSettingNotFound = errors.New("setting not found") +) +var ( // DefaultGougingSettings define the default gouging settings the bus is // configured with on startup. DefaultGougingSettings = GougingSettings{ @@ -55,6 +57,22 @@ var ( Threshold: 0.05, } + // DefaultRedundancySettingsTestnet defines redundancy settings for the + // testnet, these are lower due to the reduced number of hosts on the + // testnet. + DefaultRedundancySettingsTestnet = RedundancySettings{ + MinShards: 2, + TotalShards: 6, + } + + // DefaultS3Settings defines the 3 settings the bus is configured with on + // startup. + DefaultS3Settings = S3Settings{ + Authentication: S3AuthenticationSettings{ + V4Keypairs: map[string]string{}, + }, + } + // DefaultUploadSettings define the default upload settings the bus is // configured with on startup. DefaultUploadSettings = UploadSettings{ @@ -67,14 +85,6 @@ var ( TotalShards: 30, }, } - - // DefaultRedundancySettingsTestnet defines redundancy settings for the - // testnet, these are lower due to the reduced number of hosts on the - // testnet. - DefaultRedundancySettingsTestnet = RedundancySettings{ - MinShards: 2, - TotalShards: 6, - } ) type ( @@ -235,20 +245,12 @@ func (gs GougingSettings) Validate() error { return nil } +// Validate returns an error if the upload settings are not considered valid. func (us UploadSettings) Validate() error { - return errors.Join( - us.Packing.Validate(), - us.Redundancy.Validate(), - ) -} - -// Validate returns an error if the upload packing settings are not considered -// valid. -func (up UploadPackingSettings) Validate() error { - if up.Enabled && up.SlabBufferMaxSizeSoft <= 0 { + if us.Packing.Enabled && us.Packing.SlabBufferMaxSizeSoft <= 0 { return errors.New("SlabBufferMaxSizeSoft must be greater than zero when upload packing is enabled") } - return nil + return us.Redundancy.Validate() } // Redundancy returns the effective storage redundancy of the @@ -285,11 +287,7 @@ func (rs RedundancySettings) Validate() error { // Validate returns an error if the authentication settings are not considered // valid. func (s3s S3Settings) Validate() error { - return s3s.Authentication.Validate() -} - -func (s3a S3AuthenticationSettings) Validate() error { - for accessKeyID, secretAccessKey := range s3a.V4Keypairs { + for accessKeyID, secretAccessKey := range s3s.Authentication.V4Keypairs { if accessKeyID == "" { return fmt.Errorf("AccessKeyID cannot be empty") } else if len(accessKeyID) < S3MinAccessKeyLen || len(accessKeyID) > S3MaxAccessKeyLen { diff --git a/bus/bus.go b/bus/bus.go index ff5320f70..dc6dff267 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -622,13 +622,28 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { return nil } + // migrate gouging settings + if _, err := b.ss.GougingSettings(ctx); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + return err + } else if errors.Is(err, api.ErrSettingNotFound) { + if err := b.ss.UpdateGougingSettings(ctx, api.DefaultGougingSettings); err != nil { + return err + } + } + // migrate S3 settings var s3as api.S3AuthenticationSettings if err := b.ss.Setting(ctx, "s3authentication", &s3as); err != nil && !errors.Is(err, api.ErrSettingNotFound) { return err + } else if errors.Is(err, api.ErrSettingNotFound) { + if err := b.ss.UpdateS3Settings(ctx, api.DefaultS3Settings); err != nil { + return err + } } else if err == nil { s3s := api.S3Settings{Authentication: s3as} - if err := b.ss.UpdateS3Settings(ctx, s3s); err != nil { + if err := s3s.Validate(); err != nil { + return fmt.Errorf("failed to migrate S3 setting: %w", err) + } else if err := b.ss.UpdateS3Settings(ctx, s3s); err != nil { return err } } @@ -642,7 +657,9 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { return err } } else { - if err := b.ss.UpdatePinnedSettings(ctx, pps); err != nil { + if err := pps.Validate(); err != nil { + return fmt.Errorf("failed to migrate pinned setting: %w", err) + } else if err := b.ss.UpdatePinnedSettings(ctx, pps); err != nil { return err } } @@ -681,5 +698,11 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { us.Packing = ups } - return b.ss.UpdateUploadSettings(ctx, us) + if err := us.Validate(); err != nil { + return fmt.Errorf("failed to migrate upload setting: %w", err) + } else if err := b.ss.UpdateUploadSettings(ctx, us); err != nil { + return err + } + + return nil } diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index e87116c93..5510788bb 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -9,7 +9,6 @@ import ( "os" "path/filepath" "runtime" - "strings" "time" "go.sia.tech/core/consensus" @@ -21,7 +20,6 @@ import ( "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" - "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" "go.sia.tech/renterd/build" "go.sia.tech/renterd/bus" @@ -411,36 +409,6 @@ func (n *node) Run() error { } } - // set initial S3 keys - if n.cfg.S3.Enabled && !n.cfg.S3.DisableAuth { - s3s, err := n.bus.S3Settings(context.Background()) - if err != nil && !strings.Contains(err.Error(), api.ErrSettingNotFound.Error()) { - return fmt.Errorf("failed to fetch S3 settings: %w", err) - } else if s3s.Authentication.V4Keypairs == nil { - s3s.Authentication.V4Keypairs = make(map[string]string) - } - - // S3 key pair validation was broken at one point, we need to remove the - // invalid key pairs here to ensure we don't fail when we update the - // setting below. - for k, v := range s3s.Authentication.V4Keypairs { - if err := (api.S3AuthenticationSettings{V4Keypairs: map[string]string{k: v}}).Validate(); err != nil { - n.logger.Infof("removing invalid S3 keypair for AccessKeyID %s, reason: %v", k, err) - delete(s3s.Authentication.V4Keypairs, k) - } - } - - // merge keys - for k, v := range n.cfg.S3.KeypairsV4 { - s3s.Authentication.V4Keypairs[k] = v - } - - // update settings - if err := n.bus.UpdateS3Settings(context.Background(), s3s); err != nil { - return fmt.Errorf("failed to update S3 settings: %w", err) - } - } - // start S3 server if n.s3Srv != nil { go n.s3Srv.Serve(n.s3Listener) From 7d7257b9a505eb3b4ca926514bd2c86db5d267a7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 10:42:28 +0200 Subject: [PATCH 22/98] sql: combine ObjectEntries and ListObjects in interface --- bus/routes.go | 4 +- internal/test/e2e/cluster_test.go | 34 +- stores/metadata.go | 8 +- stores/sql/database.go | 5 +- stores/sql/main.go | 687 +++++++++++++++--------------- stores/sql/mysql/main.go | 8 +- stores/sql/sqlite/main.go | 8 +- 7 files changed, 375 insertions(+), 379 deletions(-) diff --git a/bus/routes.go b/bus/routes.go index 13770da8c..73c65cc72 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1064,15 +1064,15 @@ func (b *Bus) objectHandlerGET(jc jape.Context) { } func (b *Bus) objectsHandlerGET(jc jape.Context) { - var limit int var marker, delim, prefix, sortBy, sortDir string bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return } - if jc.DecodeForm("delimiter", &limit) != nil { + if jc.DecodeForm("delimiter", &delim) != nil { return } + limit := -1 if jc.DecodeForm("limit", &limit) != nil { return } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index ef527c975..b6f7da3f9 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -34,7 +34,7 @@ import ( "lukechampine.com/frand" ) -func TestListObjects(t *testing.T) { +func TestListObjectsWithNoDelimiter(t *testing.T) { if testing.Short() { t.SkipNow() } @@ -378,11 +378,11 @@ func TestNewTestCluster(t *testing.T) { } } -// TestObjectEntries is an integration test that verifies objects are uploaded, -// download and deleted from and to the paths we would expect. It is similar to -// the TestObjectEntries unit test, but uses the worker and bus client to verify -// paths are passed correctly. -func TestObjectEntries(t *testing.T) { +// TestListObjectsWithDelimiterSlash is an integration test that verifies +// objects are uploaded, download and deleted from and to the paths we +// would expect. It is similar to the TestObjectEntries unit test, but uses +// the worker and bus client to verify paths are passed correctly. +func TestListObjectsWithDelimiterSlash(t *testing.T) { if testing.Short() { t.SkipNow() } @@ -498,11 +498,12 @@ func TestObjectEntries(t *testing.T) { var marker string for offset := 0; offset < len(test.want); offset++ { res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: test.path + test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, - Marker: marker, - Limit: 1, + Delimiter: "/", + Prefix: test.path + test.prefix, + SortBy: test.sortBy, + SortDir: test.sortDir, + Marker: marker, + Limit: 1, }) marker = res.NextMarker if err != nil { @@ -524,11 +525,12 @@ func TestObjectEntries(t *testing.T) { } res, err = b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: test.path + test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, - Marker: test.want[offset].Name, - Limit: 1, + Delimiter: "/", + Prefix: test.path + test.prefix, + SortBy: test.sortBy, + SortDir: test.sortDir, + Marker: test.want[offset].Name, + Limit: 1, }) if err != nil { t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %vmarker: %v\n\nerr: %v", test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, err) diff --git a/stores/metadata.go b/stores/metadata.go index e716e09c7..c84e6c646 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -788,13 +788,7 @@ func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []types func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - if delim == "" { - resp, err = tx.ListObjects(ctx, bucket, prefix, sortBy, sortDir, marker, limit) - } else if delim == "/" { - resp, err = tx.ObjectEntries(ctx, bucket, prefix, sortBy, sortDir, marker, limit) - } else { - return fmt.Errorf("unsupported delimiter: '%s'", delim) - } + resp, err = tx.ListObjects(ctx, bucket, prefix, delim, sortBy, sortDir, marker, limit) return err }) return diff --git a/stores/sql/database.go b/stores/sql/database.go index e314d848f..a9d4e8a06 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -193,7 +193,7 @@ type ( ListBuckets(ctx context.Context) ([]api.Bucket, error) // ListObjects returns a list of objects from the given bucket. - ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) + ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) // MakeDirsForPath creates all directories for a given object's path. MakeDirsForPath(ctx context.Context, path string) (int64, error) @@ -217,9 +217,6 @@ type ( // Object returns an object from the database. Object(ctx context.Context, bucket, key string) (api.Object, error) - // ObjectEntries queries the database for objects in a given dir. - ObjectEntries(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) - // ObjectMetadata returns an object's metadata. ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) diff --git a/stores/sql/main.go b/stores/sql/main.go index d7241d7f4..202108c9e 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -1013,102 +1013,15 @@ func orderByObject(sortBy, sortDir string) (orderByExprs []string, _ error) { return orderByExprs, nil } -func ListObjects(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - // fetch one more to see if there are more entries - if limit <= -1 { - limit = math.MaxInt - } else if limit != math.MaxInt { - limit++ - } - - // establish sane defaults for sorting - if sortBy == "" { - sortBy = api.ObjectSortByName - } - if sortDir == "" { - sortDir = api.ObjectSortDirAsc - } - - // filter by bucket - whereExprs := []string{"o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)"} - whereArgs := []any{bucket} - - // apply prefix - if prefix != "" { - whereExprs = append(whereExprs, "o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?") - whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) - } - - // apply sorting - orderByExprs, err := orderByObject(sortBy, sortDir) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) - } - - // apply marker - markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { - err := tx.QueryRow(ctx, fmt.Sprintf(` - SELECT o.%s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE b.name = ? AND o.object_id = ? - `, col), bucket, marker).Scan(dst) - if errors.Is(err, dsql.ErrNoRows) { - return api.ErrMarkerNotFound - } else { - return err - } - }) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to get marker exprs: %w", err) - } - whereExprs = append(whereExprs, markerExprs...) - whereArgs = append(whereArgs, markerArgs...) - - // apply limit - whereArgs = append(whereArgs, limit) - - // run query - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM objects o - WHERE %s - ORDER BY %s - LIMIT ? - `, - tx.SelectObjectMetadataExpr(), - strings.Join(whereExprs, " AND "), - strings.Join(orderByExprs, ", ")), - whereArgs...) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - - var hasMore bool - var nextMarker string - if len(objects) == limit { - objects = objects[:len(objects)-1] - if len(objects) > 0 { - hasMore = true - nextMarker = objects[len(objects)-1].Name - } +func ListObjects(ctx context.Context, tx Tx, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { + if delim == "" { + resp, err = listObjectsNoDelim(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) + } else if delim == "/" { + resp, err = listObjectsSlashDelim(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) + } else { + err = fmt.Errorf("unsupported delimiter: '%s'", delim) } - - return api.ObjectsListResponse{ - HasMore: hasMore, - NextMarker: nextMarker, - Objects: objects, - }, nil + return } func MultipartUpload(ctx context.Context, tx sql.Tx, uploadID string) (api.MultipartUpload, error) { @@ -1353,266 +1266,99 @@ func dirID(ctx context.Context, tx sql.Tx, dirPath string) (int64, error) { return id, nil } -func ObjectEntries(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - // split prefix into path and object prefix - path := "/" // root of bucket - if idx := strings.LastIndex(prefix, "/"); idx != -1 { - path = prefix[:idx+1] - prefix = prefix[idx+1:] - } - if !strings.HasSuffix(path, "/") { - panic("path must end with /") - } - - // fetch one more to see if there are more entries - if limit <= -1 { - limit = math.MaxInt - } else if limit != math.MaxInt { - limit++ - } - - // establish sane defaults for sorting - if sortBy == "" { - sortBy = api.ObjectSortByName - } - if sortDir == "" { - sortDir = api.ObjectSortDirAsc +func ObjectMetadata(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) { + // fetch object id + var objID int64 + if err := tx.QueryRow(ctx, ` + SELECT o.id + FROM objects o + INNER JOIN buckets b ON b.id = o.db_bucket_id + WHERE o.object_id = ? AND b.name = ? + `, key, bucket).Scan(&objID); errors.Is(err, dsql.ErrNoRows) { + return api.Object{}, api.ErrObjectNotFound + } else if err != nil { + return api.Object{}, fmt.Errorf("failed to fetch object id: %w", err) } - // fetch directory id - dirID, err := dirID(ctx, tx, path) - if errors.Is(err, dsql.ErrNoRows) { - return api.ObjectsListResponse{}, nil - } else if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch directory id: %w", err) + // fetch metadata + om, err := tx.ScanObjectMetadata(tx.QueryRow(ctx, fmt.Sprintf(` + SELECT %s + FROM objects o + WHERE o.id = ? + `, tx.SelectObjectMetadataExpr()), objID)) + if err != nil { + return api.Object{}, fmt.Errorf("failed to fetch object metadata: %w", err) } - args := []any{ - path, - dirID, bucket, + // fetch user metadata + rows, err := tx.Query(ctx, ` + SELECT oum.key, oum.value + FROM object_user_metadata oum + WHERE oum.db_object_id = ? + ORDER BY oum.id ASC + `, objID) + if err != nil { + return api.Object{}, fmt.Errorf("failed to fetch user metadata: %w", err) } + defer rows.Close() - // apply prefix - var prefixExpr string - if prefix != "" { - prefixExpr = "AND SUBSTR(o.object_id, 1, ?) = ?" - args = append(args, - utf8.RuneCountInString(path+prefix), path+prefix, - utf8.RuneCountInString(path+prefix), path+prefix, - ) + // build object + metadata := make(api.ObjectUserMetadata) + for rows.Next() { + var key, value string + if err := rows.Scan(&key, &value); err != nil { + return api.Object{}, fmt.Errorf("failed to scan user metadata: %w", err) + } + metadata[key] = value } - args = append(args, - bucket, - path+"%", - utf8.RuneCountInString(path), path, - dirID, - ) + return api.Object{ + Metadata: metadata, + ObjectMetadata: om, + Object: nil, // only return metadata + }, nil +} - // apply marker - var whereExpr string - markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { - var groupFn string - switch col { - case "size": - groupFn = "SUM" - case "health": - groupFn = "MIN" - default: - return fmt.Errorf("unknown column: %v", col) - } - err := tx.QueryRow(ctx, fmt.Sprintf(` - SELECT o.%s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE b.name = ? AND o.object_id = ? - UNION ALL - SELECT %s(o.%s) - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name - WHERE b.name = ? AND d.name = ? - GROUP BY d.id - `, col, groupFn, col, tx.CharLengthExpr()), bucket, marker, bucket, marker).Scan(dst) +func ObjectsStats(ctx context.Context, tx sql.Tx, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + var args []any + var bucketExpr string + var bucketID int64 + if opts.Bucket != "" { + err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE name = ?", opts.Bucket). + Scan(&bucketID) if errors.Is(err, dsql.ErrNoRows) { - return api.ErrMarkerNotFound - } else { - return err + return api.ObjectsStatsResponse{}, api.ErrBucketNotFound + } else if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch bucket id: %w", err) } - }) + bucketExpr = "WHERE db_bucket_id = ?" + args = append(args, bucketID) + } + + // objects stats + var numObjects, totalObjectsSize uint64 + var minHealth float64 + err := tx.QueryRow(ctx, "SELECT COUNT(*), COALESCE(MIN(health), 1), COALESCE(SUM(size), 0) FROM objects "+bucketExpr, args...). + Scan(&numObjects, &minHealth, &totalObjectsSize) if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to query marker: %w", err) - } else if len(markerExprs) > 0 { - whereExpr = "WHERE " + strings.Join(markerExprs, " AND ") + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch objects stats: %w", err) } - args = append(args, markerArgs...) - // apply sorting - orderByExprs, err := orderByObject(sortBy, sortDir) + // multipart upload stats + var unfinishedObjects uint64 + err = tx.QueryRow(ctx, "SELECT COUNT(*) FROM multipart_uploads "+bucketExpr, args...). + Scan(&unfinishedObjects) if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch multipart upload stats: %w", err) } - // apply offset and limit - args = append(args, limit) - - // objectsQuery consists of 2 parts - // 1. fetch all objects in requested directory - // 2. fetch all sub-directories - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM ( - SELECT o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag - FROM objects o - LEFT JOIN directories d ON d.name = o.object_id - WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) %s - AND d.id IS NULL - UNION ALL - SELECT d.name as object_id, SUM(o.size), MIN(o.health), '' as mime_type, MAX(o.created_at) as created_at, '' as etag - FROM objects o - INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name %s - WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) - AND o.object_id LIKE ? - AND SUBSTR(o.object_id, 1, ?) = ? - AND d.db_parent_id = ? - GROUP BY d.id - ) AS o - %s - ORDER BY %s - LIMIT ? - `, - tx.SelectObjectMetadataExpr(), - prefixExpr, - tx.CharLengthExpr(), - prefixExpr, - whereExpr, - strings.Join(orderByExprs, ", "), - ), args...) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - - // trim last element if we have more - var hasMore bool - var nextMarker string - if len(objects) == limit { - objects = objects[:len(objects)-1] - if len(objects) > 0 { - hasMore = true - nextMarker = objects[len(objects)-1].Name - } - } - - return api.ObjectsListResponse{ - HasMore: hasMore, - NextMarker: nextMarker, - Objects: objects, - }, nil -} - -func ObjectMetadata(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) { - // fetch object id - var objID int64 - if err := tx.QueryRow(ctx, ` - SELECT o.id - FROM objects o - INNER JOIN buckets b ON b.id = o.db_bucket_id - WHERE o.object_id = ? AND b.name = ? - `, key, bucket).Scan(&objID); errors.Is(err, dsql.ErrNoRows) { - return api.Object{}, api.ErrObjectNotFound - } else if err != nil { - return api.Object{}, fmt.Errorf("failed to fetch object id: %w", err) - } - - // fetch metadata - om, err := tx.ScanObjectMetadata(tx.QueryRow(ctx, fmt.Sprintf(` - SELECT %s - FROM objects o - WHERE o.id = ? - `, tx.SelectObjectMetadataExpr()), objID)) - if err != nil { - return api.Object{}, fmt.Errorf("failed to fetch object metadata: %w", err) - } - - // fetch user metadata - rows, err := tx.Query(ctx, ` - SELECT oum.key, oum.value - FROM object_user_metadata oum - WHERE oum.db_object_id = ? - ORDER BY oum.id ASC - `, objID) - if err != nil { - return api.Object{}, fmt.Errorf("failed to fetch user metadata: %w", err) - } - defer rows.Close() - - // build object - metadata := make(api.ObjectUserMetadata) - for rows.Next() { - var key, value string - if err := rows.Scan(&key, &value); err != nil { - return api.Object{}, fmt.Errorf("failed to scan user metadata: %w", err) - } - metadata[key] = value - } - - return api.Object{ - Metadata: metadata, - ObjectMetadata: om, - Object: nil, // only return metadata - }, nil -} - -func ObjectsStats(ctx context.Context, tx sql.Tx, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { - var args []any - var bucketExpr string - var bucketID int64 - if opts.Bucket != "" { - err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE name = ?", opts.Bucket). - Scan(&bucketID) - if errors.Is(err, dsql.ErrNoRows) { - return api.ObjectsStatsResponse{}, api.ErrBucketNotFound - } else if err != nil { - return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch bucket id: %w", err) - } - bucketExpr = "WHERE db_bucket_id = ?" - args = append(args, bucketID) - } - - // objects stats - var numObjects, totalObjectsSize uint64 - var minHealth float64 - err := tx.QueryRow(ctx, "SELECT COUNT(*), COALESCE(MIN(health), 1), COALESCE(SUM(size), 0) FROM objects "+bucketExpr, args...). - Scan(&numObjects, &minHealth, &totalObjectsSize) - if err != nil { - return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch objects stats: %w", err) - } - - // multipart upload stats - var unfinishedObjects uint64 - err = tx.QueryRow(ctx, "SELECT COUNT(*) FROM multipart_uploads "+bucketExpr, args...). - Scan(&unfinishedObjects) - if err != nil { - return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch multipart upload stats: %w", err) - } - - // multipart upload part stats - var totalUnfinishedObjectsSize uint64 - err = tx.QueryRow(ctx, "SELECT COALESCE(SUM(size), 0) FROM multipart_parts mp INNER JOIN multipart_uploads mu ON mp.db_multipart_upload_id = mu.id "+bucketExpr, args...). - Scan(&totalUnfinishedObjectsSize) - if err != nil { - return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch multipart upload part stats: %w", err) - } + // multipart upload part stats + var totalUnfinishedObjectsSize uint64 + err = tx.QueryRow(ctx, "SELECT COALESCE(SUM(size), 0) FROM multipart_parts mp INNER JOIN multipart_uploads mu ON mp.db_multipart_upload_id = mu.id "+bucketExpr, args...). + Scan(&totalUnfinishedObjectsSize) + if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch multipart upload part stats: %w", err) + } // total sectors var whereExpr string @@ -2920,3 +2666,268 @@ func Object(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) }, }, nil } + +func listObjectsNoDelim(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + // fetch one more to see if there are more entries + if limit <= -1 { + limit = math.MaxInt + } else if limit != math.MaxInt { + limit++ + } + + // establish sane defaults for sorting + if sortBy == "" { + sortBy = api.ObjectSortByName + } + if sortDir == "" { + sortDir = api.ObjectSortDirAsc + } + + // filter by bucket + whereExprs := []string{"o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)"} + whereArgs := []any{bucket} + + // apply prefix + if prefix != "" { + whereExprs = append(whereExprs, "o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?") + whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) + } + + // apply sorting + orderByExprs, err := orderByObject(sortBy, sortDir) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) + } + + // apply marker + markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { + err := tx.QueryRow(ctx, fmt.Sprintf(` + SELECT o.%s + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + WHERE b.name = ? AND o.object_id = ? + `, col), bucket, marker).Scan(dst) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrMarkerNotFound + } else { + return err + } + }) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to get marker exprs: %w", err) + } + whereExprs = append(whereExprs, markerExprs...) + whereArgs = append(whereArgs, markerArgs...) + + // apply limit + whereArgs = append(whereArgs, limit) + + // run query + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT %s + FROM objects o + WHERE %s + ORDER BY %s + LIMIT ? + `, + tx.SelectObjectMetadataExpr(), + strings.Join(whereExprs, " AND "), + strings.Join(orderByExprs, ", ")), + whereArgs...) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) + } + defer rows.Close() + + var objects []api.ObjectMetadata + for rows.Next() { + om, err := tx.ScanObjectMetadata(rows) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) + } + objects = append(objects, om) + } + + var hasMore bool + var nextMarker string + if len(objects) == limit { + objects = objects[:len(objects)-1] + if len(objects) > 0 { + hasMore = true + nextMarker = objects[len(objects)-1].Name + } + } + + return api.ObjectsListResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Objects: objects, + }, nil +} + +func listObjectsSlashDelim(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + // split prefix into path and object prefix + path := "/" // root of bucket + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + path = prefix[:idx+1] + prefix = prefix[idx+1:] + } + if !strings.HasSuffix(path, "/") { + panic("path must end with /") + } + + // fetch one more to see if there are more entries + if limit <= -1 { + limit = math.MaxInt + } else if limit != math.MaxInt { + limit++ + } + + // establish sane defaults for sorting + if sortBy == "" { + sortBy = api.ObjectSortByName + } + if sortDir == "" { + sortDir = api.ObjectSortDirAsc + } + + // fetch directory id + dirID, err := dirID(ctx, tx, path) + if errors.Is(err, dsql.ErrNoRows) { + return api.ObjectsListResponse{}, nil + } else if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch directory id: %w", err) + } + + args := []any{ + path, + dirID, bucket, + } + + // apply prefix + var prefixExpr string + if prefix != "" { + prefixExpr = "AND SUBSTR(o.object_id, 1, ?) = ?" + args = append(args, + utf8.RuneCountInString(path+prefix), path+prefix, + utf8.RuneCountInString(path+prefix), path+prefix, + ) + } + + args = append(args, + bucket, + path+"%", + utf8.RuneCountInString(path), path, + dirID, + ) + + // apply marker + var whereExpr string + markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { + var groupFn string + switch col { + case "size": + groupFn = "SUM" + case "health": + groupFn = "MIN" + default: + return fmt.Errorf("unknown column: %v", col) + } + err := tx.QueryRow(ctx, fmt.Sprintf(` + SELECT o.%s + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + WHERE b.name = ? AND o.object_id = ? + UNION ALL + SELECT %s(o.%s) + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name + WHERE b.name = ? AND d.name = ? + GROUP BY d.id + `, col, groupFn, col, tx.CharLengthExpr()), bucket, marker, bucket, marker).Scan(dst) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrMarkerNotFound + } else { + return err + } + }) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to query marker: %w", err) + } else if len(markerExprs) > 0 { + whereExpr = "WHERE " + strings.Join(markerExprs, " AND ") + } + args = append(args, markerArgs...) + + // apply sorting + orderByExprs, err := orderByObject(sortBy, sortDir) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) + } + + // apply offset and limit + args = append(args, limit) + + // objectsQuery consists of 2 parts + // 1. fetch all objects in requested directory + // 2. fetch all sub-directories + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT %s + FROM ( + SELECT o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag + FROM objects o + LEFT JOIN directories d ON d.name = o.object_id + WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) %s + AND d.id IS NULL + UNION ALL + SELECT d.name as object_id, SUM(o.size), MIN(o.health), '' as mime_type, MAX(o.created_at) as created_at, '' as etag + FROM objects o + INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name %s + WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) + AND o.object_id LIKE ? + AND SUBSTR(o.object_id, 1, ?) = ? + AND d.db_parent_id = ? + GROUP BY d.id + ) AS o + %s + ORDER BY %s + LIMIT ? + `, + tx.SelectObjectMetadataExpr(), + prefixExpr, + tx.CharLengthExpr(), + prefixExpr, + whereExpr, + strings.Join(orderByExprs, ", "), + ), args...) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) + } + defer rows.Close() + + var objects []api.ObjectMetadata + for rows.Next() { + om, err := tx.ScanObjectMetadata(rows) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) + } + objects = append(objects, om) + } + + // trim last element if we have more + var hasMore bool + var nextMarker string + if len(objects) == limit { + objects = objects[:len(objects)-1] + if len(objects) > 0 { + hasMore = true + nextMarker = objects[len(objects)-1].Name + } + } + + return api.ObjectsListResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Objects: objects, + }, nil +} diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index e8cf69780..fe39f09e3 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -470,8 +470,8 @@ func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) return ssql.ListBuckets(ctx, tx) } -func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ListObjects(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, delim, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { @@ -530,10 +530,6 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ObjectEntries(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) -} - func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { return ssql.ObjectMetadata(ctx, tx, bucket, path) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 674bf8237..a3dee2af5 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -457,8 +457,8 @@ func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) return ssql.ListBuckets(ctx, tx) } -func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ListObjects(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, delim, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { @@ -527,10 +527,6 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ObjectEntries(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) -} - func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { return ssql.ObjectMetadata(ctx, tx, bucket, path) } From 54b3d34d377a588661392087c5c1f7042765d508 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 10:49:30 +0200 Subject: [PATCH 23/98] api: get rid of setting constants --- api/events.go | 20 +++++++------- api/setting.go | 7 ----- bus/bus.go | 26 +++++++++++------- bus/client/settings.go | 8 +++--- bus/routes.go | 38 ++++++++++++-------------- internal/bus/pinmanager.go | 7 +++-- internal/test/e2e/events_test.go | 8 +----- internal/worker/cache.go | 46 ++++++++------------------------ stores/settingsdb.go | 31 ++++++++++++--------- 9 files changed, 82 insertions(+), 109 deletions(-) diff --git a/api/events.go b/api/events.go index 38d490506..bd6dcd776 100644 --- a/api/events.go +++ b/api/events.go @@ -50,22 +50,24 @@ type ( Timestamp time.Time `json:"timestamp"` } - EventHostUpdate struct { - HostKey types.PublicKey `json:"hostKey"` - NetAddr string `json:"netAddr"` - Timestamp time.Time `json:"timestamp"` - } - EventContractSetUpdate struct { Name string `json:"name"` ContractIDs []types.FileContractID `json:"contractIDs"` Timestamp time.Time `json:"timestamp"` } + EventHostUpdate struct { + HostKey types.PublicKey `json:"hostKey"` + NetAddr string `json:"netAddr"` + Timestamp time.Time `json:"timestamp"` + } + EventSettingUpdate struct { - Key string `json:"key"` - Update interface{} `json:"update"` - Timestamp time.Time `json:"timestamp"` + GougingSettings *GougingSettings `json:"gougingSettings,omitempty"` + PinnedSettings *PinnedSettings `json:"pinnedSettings,omitempty"` + S3Settings *S3Settings `json:"s3Settings,omitempty"` + UploadSettings *UploadSettings `json:"uploadSettings,omitempty"` + Timestamp time.Time `json:"timestamp"` } ) diff --git a/api/setting.go b/api/setting.go index 237a96f5f..e4741e5ff 100644 --- a/api/setting.go +++ b/api/setting.go @@ -9,13 +9,6 @@ import ( "go.sia.tech/core/types" ) -const ( - SettingGouging = "gouging" - SettingPinned = "pinned" - SettingS3 = "s3" - SettingUpload = "upload" -) - const ( S3MinAccessKeyLen = 16 S3MaxAccessKeyLen = 128 diff --git a/bus/bus.go b/bus/bus.go index dc6dff267..b7513177e 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -26,6 +26,7 @@ import ( "go.sia.tech/renterd/internal/rhp" rhp2 "go.sia.tech/renterd/internal/rhp/v2" "go.sia.tech/renterd/object" + "go.sia.tech/renterd/stores" "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" @@ -281,7 +282,7 @@ type ( UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error PinnedSettings(ctx context.Context) (api.PinnedSettings, error) - UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error + UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error UploadSettings(ctx context.Context) (api.UploadSettings, error) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error @@ -614,10 +615,10 @@ func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { func (b *Bus) compatV2Settings(ctx context.Context) error { // escape early if all settings are present if !errors.Is(errors.Join( - b.ss.Setting(ctx, api.SettingGouging, struct{}{}), - b.ss.Setting(ctx, api.SettingPinned, struct{}{}), - b.ss.Setting(ctx, api.SettingS3, struct{}{}), - b.ss.Setting(ctx, api.SettingUpload, struct{}{}), + b.ss.Setting(ctx, stores.SettingGouging, nil), + b.ss.Setting(ctx, stores.SettingPinned, nil), + b.ss.Setting(ctx, stores.SettingS3, nil), + b.ss.Setting(ctx, stores.SettingUpload, nil), ), api.ErrSettingNotFound) { return nil } @@ -649,17 +650,17 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { } // migrate pinned settings - var pps api.PinnedSettings - if err := b.ss.Setting(ctx, "pricepinning", &pps); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + var ps api.PinnedSettings + if err := b.ss.Setting(ctx, "pricepinning", &ps); err != nil && !errors.Is(err, api.ErrSettingNotFound) { return err } else if errors.Is(err, api.ErrSettingNotFound) { if err := b.ss.UpdatePinnedSettings(ctx, api.DefaultPinnedSettings); err != nil { return err } } else { - if err := pps.Validate(); err != nil { + if err := ps.Validate(); err != nil { return fmt.Errorf("failed to migrate pinned setting: %w", err) - } else if err := b.ss.UpdatePinnedSettings(ctx, pps); err != nil { + } else if err := b.ss.UpdatePinnedSettings(ctx, ps); err != nil { return err } } @@ -704,5 +705,10 @@ func (b *Bus) compatV2Settings(ctx context.Context) error { return err } - return nil + // delete old settings + return errors.Join( + b.ss.DeleteSetting(ctx, "contractset"), + b.ss.DeleteSetting(ctx, "pricepinning"), + b.ss.DeleteSetting(ctx, "uploadpacking"), + ) } diff --git a/bus/client/settings.go b/bus/client/settings.go index 5723c3cdd..46d3e8708 100644 --- a/bus/client/settings.go +++ b/bus/client/settings.go @@ -18,14 +18,14 @@ func (c *Client) UpdateGougingSettings(ctx context.Context, gs api.GougingSettin } // PricePinningSettings returns the contract set settings. -func (c *Client) PricePinningSettings(ctx context.Context) (pps api.PinnedSettings, err error) { - err = c.c.WithContext(ctx).GET("/settings/pinned", &pps) +func (c *Client) PricePinningSettings(ctx context.Context) (ps api.PinnedSettings, err error) { + err = c.c.WithContext(ctx).GET("/settings/pinned", &ps) return } // UpdatePinnedSettings updates the given setting. -func (c *Client) UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error { - return c.c.WithContext(ctx).PUT("/settings/pinned", pps) +func (c *Client) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + return c.c.WithContext(ctx).PUT("/settings/pinned", ps) } // S3Settings returns the S3 settings. diff --git a/bus/routes.go b/bus/routes.go index e681b1b93..284609981 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1272,9 +1272,8 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingGouging, - Update: gs, - Timestamp: time.Now().UTC(), + GougingSettings: &gs, + Timestamp: time.Now().UTC(), }, }) b.pinMgr.TriggerUpdate() @@ -1284,7 +1283,7 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { if pps, err := b.ss.PinnedSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get price pinning settings", err) == nil { + } else if jc.Check("failed to get pinned settings", err) == nil { // populate the Autopilots map with the current autopilots aps, err := b.as.Autopilots(jc.Request.Context()) if jc.Check("failed to fetch autopilots", err) != nil { @@ -1303,26 +1302,25 @@ func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { } func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { - var pps api.PinnedSettings - if jc.Decode(&pps) != nil { + var ps api.PinnedSettings + if jc.Decode(&ps) != nil { return - } else if err := pps.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, error: %v", err), http.StatusBadRequest) + } else if err := ps.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update pinned settings, error: %v", err), http.StatusBadRequest) return - } else if pps.Enabled { - if _, err := ibus.NewForexClient(pps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), pps.Currency); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) + } else if ps.Enabled { + if _, err := ibus.NewForexClient(ps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), ps.Currency); err != nil { + jc.Error(fmt.Errorf("couldn't update pinned settings, forex API unreachable,error: %v", err), http.StatusBadRequest) return } } - if jc.Check("could not update price pinning settings", b.ss.UpdatePinnedSettings(jc.Request.Context(), pps)) == nil { + if jc.Check("could not update pinned settings", b.ss.UpdatePinnedSettings(jc.Request.Context(), ps)) == nil { b.broadcastAction(webhooks.Event{ Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingPinned, - Update: pps, - Timestamp: time.Now().UTC(), + PinnedSettings: &ps, + Timestamp: time.Now().UTC(), }, }) b.pinMgr.TriggerUpdate() @@ -1349,9 +1347,8 @@ func (b *Bus) settingsUploadHandlerPUT(jc jape.Context) { Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingUpload, - Update: us, - Timestamp: time.Now().UTC(), + UploadSettings: &us, + Timestamp: time.Now().UTC(), }, }) } @@ -1377,9 +1374,8 @@ func (b *Bus) settingsS3HandlerPUT(jc jape.Context) { Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingS3, - Update: s3s, - Timestamp: time.Now().UTC(), + S3Settings: &s3s, + Timestamp: time.Now().UTC(), }, }) } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 3bc579f28..716068565 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -25,7 +25,7 @@ type ( UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error PinnedSettings(ctx context.Context) (api.PinnedSettings, error) - UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error + UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error } ) @@ -297,9 +297,8 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingGouging, - Update: gs, - Timestamp: time.Now().UTC(), + GougingSettings: &gs, + Timestamp: time.Now().UTC(), }, }) } diff --git a/internal/test/e2e/events_test.go b/internal/test/e2e/events_test.go index e1bc29df7..5f6d741ee 100644 --- a/internal/test/e2e/events_test.go +++ b/internal/test/e2e/events_test.go @@ -169,15 +169,9 @@ func TestEvents(t *testing.T) { t.Fatalf("unexpected event %+v", e) } case api.EventSettingUpdate: - if e.Key != api.SettingGouging || e.Timestamp.IsZero() { + if e.GougingSettings == nil || e.GougingSettings.HostBlockHeightLeeway != 100 || e.Timestamp.IsZero() { t.Fatalf("unexpected event %+v", e) } - var update api.GougingSettings - bytes, _ := json.Marshal(e.Update) - tt.OK(json.Unmarshal(bytes, &update)) - if update.HostBlockHeightLeeway != 100 { - t.Fatalf("unexpected update %+v", update) - } } } } diff --git a/internal/worker/cache.go b/internal/worker/cache.go index ebb071acd..1f5d28d22 100644 --- a/internal/worker/cache.go +++ b/internal/worker/cache.go @@ -181,8 +181,8 @@ func (c *cache) HandleEvent(event webhooks.Event) (err error) { log = log.With("hk", e.HostKey, "ts", e.Timestamp) c.handleHostUpdate(e) case api.EventSettingUpdate: - log = log.With("key", e.Key, "ts", e.Timestamp) - err = c.handleSettingUpdate(e) + log = log.With("gouging", e.GougingSettings != nil, "pinned", e.PinnedSettings != nil, "upload", e.UploadSettings != nil, "ts", e.Timestamp) + c.handleSettingUpdate(e) default: log.Info("unhandled event", e) return @@ -307,46 +307,22 @@ func (c *cache) handleHostUpdate(e api.EventHostUpdate) { c.cache.Set(cacheKeyDownloadContracts, contracts) } -func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) (err error) { +func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) { // return early if the cache doesn't have gouging params to update value, found, _ := c.cache.Get(cacheKeyGougingParams) if !found { - return nil + return } - gp := value.(api.GougingParams) - // marshal the updated value - data, err := json.Marshal(e.Update) - if err != nil { - return fmt.Errorf("couldn't marshal the given value, error: %v", err) + // update the cache + gp := value.(api.GougingParams) + if e.GougingSettings != nil { + gp.GougingSettings = *e.GougingSettings } - - // unmarshal into the appropriated setting and update the cache - switch e.Key { - case api.SettingGouging: - var gs api.GougingSettings - if err := json.Unmarshal(data, &gs); err != nil { - return fmt.Errorf("couldn't update gouging settings, invalid request body, %t", e.Update) - } else if err := gs.Validate(); err != nil { - return fmt.Errorf("couldn't update gouging settings, error: %v", err) - } - - gp.GougingSettings = gs - c.cache.Set(cacheKeyGougingParams, gp) - case api.SettingUpload: - var us api.UploadSettings - if err := json.Unmarshal(data, &us); err != nil { - return fmt.Errorf("couldn't update upload settings, invalid request body, %t", e.Update) - } else if err := us.Validate(); err != nil { - return fmt.Errorf("couldn't update upload settings, error: %v", err) - } - - gp.RedundancySettings = us.Redundancy - c.cache.Set(cacheKeyGougingParams, gp) - default: + if e.UploadSettings != nil { + gp.RedundancySettings = e.UploadSettings.Redundancy } - - return nil + c.cache.Set(cacheKeyGougingParams, gp) } func contractsEqual(x, y []api.ContractMetadata) bool { diff --git a/stores/settingsdb.go b/stores/settingsdb.go index 658b6de1f..31190f769 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -9,8 +9,15 @@ import ( sql "go.sia.tech/renterd/stores/sql" ) +const ( + SettingGouging = "gouging" + SettingPinned = "pinned" + SettingS3 = "s3" + SettingUpload = "upload" +) + func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, _ error) { - value, err := s.fetchSetting(ctx, api.SettingGouging) + value, err := s.fetchSetting(ctx, SettingGouging) if err != nil { return api.GougingSettings{}, err } @@ -27,32 +34,32 @@ func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSett if err != nil { return fmt.Errorf("couldn't marshal the given value, error: %v", err) } - return s.updateSetting(ctx, api.SettingGouging, string(data)) + return s.updateSetting(ctx, SettingGouging, string(data)) } -func (s *SQLStore) PinnedSettings(ctx context.Context) (pps api.PinnedSettings, _ error) { - value, err := s.fetchSetting(ctx, api.SettingPinned) +func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, _ error) { + value, err := s.fetchSetting(ctx, SettingPinned) if err != nil { return api.PinnedSettings{}, err } - if err := json.Unmarshal([]byte(value), &pps); err != nil { + if err := json.Unmarshal([]byte(value), &ps); err != nil { s.logger.Panicf("failed to unmarshal pinned settings '%s': %v", value, err) return api.PinnedSettings{}, err } return } -func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, pps api.PinnedSettings) error { - data, err := json.Marshal(pps) +func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + data, err := json.Marshal(ps) if err != nil { return fmt.Errorf("couldn't marshal the given value, error: %v", err) } - return s.updateSetting(ctx, api.SettingPinned, string(data)) + return s.updateSetting(ctx, SettingPinned, string(data)) } func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, _ error) { - value, err := s.fetchSetting(ctx, api.SettingUpload) + value, err := s.fetchSetting(ctx, SettingUpload) if err != nil { return api.UploadSettings{}, err } @@ -69,11 +76,11 @@ func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettin if err != nil { return fmt.Errorf("couldn't marshal the given value, error: %v", err) } - return s.updateSetting(ctx, api.SettingUpload, string(data)) + return s.updateSetting(ctx, SettingUpload, string(data)) } func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, _ error) { - value, err := s.fetchSetting(ctx, api.SettingS3) + value, err := s.fetchSetting(ctx, SettingS3) if err != nil { return api.S3Settings{}, err } @@ -90,7 +97,7 @@ func (s *SQLStore) UpdateS3Settings(ctx context.Context, ss api.S3Settings) erro if err != nil { return fmt.Errorf("couldn't marshal the given value, error: %v", err) } - return s.updateSetting(ctx, api.SettingS3, string(data)) + return s.updateSetting(ctx, SettingS3, string(data)) } func (s *SQLStore) DeleteSetting(ctx context.Context, key string) (err error) { From ed1679034330e69e48b61dda1897fa17e9171bd7 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 10:52:08 +0200 Subject: [PATCH 24/98] testing: fix TestPinManager --- internal/bus/pinmanager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index a90d76e38..1798a92a1 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -157,8 +157,8 @@ func (ms *mockPinStore) PinnedSettings(ctx context.Context) (api.PinnedSettings, func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { ms.mu.Lock() - defer ms.mu.Unlock() ms.ps = ps + ms.mu.Unlock() time.Sleep(2 * testUpdateInterval) return nil } From d320faf827e4bc49470cca108cb16dc5867ab7fd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 10:53:29 +0200 Subject: [PATCH 25/98] bus: make prefix path param --- bus/bus.go | 12 ++++++------ bus/client/objects.go | 14 +++++++++----- bus/routes.go | 7 ++----- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 9249629c0..6e3786b97 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -439,12 +439,12 @@ func (b *Bus) Handler() http.Handler { "POST /multipart/listuploads": b.multipartHandlerListUploadsPOST, "POST /multipart/listparts": b.multipartHandlerListPartsPOST, - "GET /object/*key": b.objectHandlerGET, - "GET /objects": b.objectsHandlerGET, - "PUT /objects/*key": b.objectsHandlerPUT, - "DELETE /objects/*key": b.objectsHandlerDELETE, - "POST /objects/copy": b.objectsCopyHandlerPOST, - "POST /objects/rename": b.objectsRenameHandlerPOST, + "GET /object/*key": b.objectHandlerGET, + "GET /objects/*prefix": b.objectsHandlerGET, + "PUT /objects/*key": b.objectsHandlerPUT, + "DELETE /objects/*key": b.objectsHandlerDELETE, + "POST /objects/copy": b.objectsCopyHandlerPOST, + "POST /objects/rename": b.objectsRenameHandlerPOST, "GET /params/gouging": b.paramsHandlerGougingGET, "GET /params/upload": b.paramsHandlerUploadGET, diff --git a/bus/client/objects.go b/bus/client/objects.go index 0415e2831..c6ed3423b 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -50,15 +50,15 @@ func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api } // Objects returns the object at given path. -func (c *Client) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.Object, err error) { +func (c *Client) Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (res api.Object, err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectPathEscape(path) - path += "?" + values.Encode() + key = api.ObjectPathEscape(key) + key += "?" + values.Encode() - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/object/%s", path), &res) + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/object/%s", key), &res) return } @@ -67,7 +67,11 @@ func (c *Client) Objects(ctx context.Context, bucket string, opts api.ListObject values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - err = c.c.WithContext(ctx).GET("/objects?"+values.Encode(), &resp) + + opts.Prefix = api.ObjectPathEscape(opts.Prefix) + opts.Prefix += "?" + values.Encode() + + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", opts.Prefix), &resp) return } diff --git a/bus/routes.go b/bus/routes.go index 73c65cc72..7df2619fc 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1064,7 +1064,7 @@ func (b *Bus) objectHandlerGET(jc jape.Context) { } func (b *Bus) objectsHandlerGET(jc jape.Context) { - var marker, delim, prefix, sortBy, sortDir string + var marker, delim, sortBy, sortDir string bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return @@ -1079,9 +1079,6 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { if jc.DecodeForm("marker", &marker) != nil { return } - if jc.DecodeForm("prefix", &prefix) != nil { - return - } if jc.DecodeForm("sortBy", &sortBy) != nil { return } @@ -1089,7 +1086,7 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { return } - resp, err := b.ms.ListObjects(jc.Request.Context(), bucket, prefix, delim, sortBy, sortDir, marker, limit) + resp, err := b.ms.ListObjects(jc.Request.Context(), bucket, jc.PathParam("prefix"), delim, sortBy, sortDir, marker, limit) if errors.Is(err, api.ErrUnsupportedDelimiter) { jc.Error(err, http.StatusBadRequest) return From 45ad7777796841286df9c278727da992d0de0f87 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 11:07:23 +0200 Subject: [PATCH 26/98] e2e: fix TestUploadDownloadExtended --- internal/test/e2e/cluster_test.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index b6f7da3f9..be1ba87cb 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -785,15 +785,21 @@ func TestUploadDownloadExtended(t *testing.T) { } } - // fetch entries with "file" prefix - res, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{Prefix: "file"}) + // fetch entries in /fileÅ› starting with "file" + res, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Delimiter: "/", + Prefix: "fileÅ›/file", + }) tt.OK(err) if len(res.Objects) != 2 { t.Fatal("expected two entry to be returned", len(res.Objects)) } - // fetch entries with "fileÅ›" prefix - res, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{Prefix: "fileÅ›/"}) + // fetch entries in /fileÅ› starting with "foo" + res, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Delimiter: "/", + Prefix: "fileÅ›/foo", + }) tt.OK(err) if len(res.Objects) != 0 { t.Fatal("expected no entries to be returned", len(res.Objects)) From f21c2cbe4a3a64f0b06c7f72f33d7992ab58696f Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Aug 2024 11:09:24 +0200 Subject: [PATCH 27/98] testing: fix race in TestPinManager --- internal/bus/pinmanager_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 1798a92a1..8716197ee 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -156,8 +156,16 @@ func (ms *mockPinStore) PinnedSettings(ctx context.Context) (api.PinnedSettings, } func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + b, err := json.Marshal(ps) + if err != nil { + return err + } + var cloned api.PinnedSettings + if err := json.Unmarshal(b, &cloned); err != nil { + return err + } ms.mu.Lock() - ms.ps = ps + ms.ps = cloned ms.mu.Unlock() time.Sleep(2 * testUpdateInterval) return nil From d4e4e01aecef9b508659cecace5ceb02bb9e7caf Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 11:11:50 +0200 Subject: [PATCH 28/98] bus: make prefix a mandatory arg --- api/object.go | 4 ---- bus/client/objects.go | 8 ++++---- internal/test/e2e/cluster_test.go | 32 +++++++++++-------------------- worker/s3/backend.go | 3 +-- worker/s3/s3.go | 2 +- 5 files changed, 17 insertions(+), 32 deletions(-) diff --git a/api/object.go b/api/object.go index bb5944c2e..bcd7bfb5c 100644 --- a/api/object.go +++ b/api/object.go @@ -216,7 +216,6 @@ type ( Delimiter string Limit int Marker string - Prefix string SortBy string SortDir string } @@ -330,9 +329,6 @@ func (opts ListObjectOptions) Apply(values url.Values) { if opts.Marker != "" { values.Set("marker", opts.Marker) } - if opts.Prefix != "" { - values.Set("prefix", opts.Prefix) - } if opts.SortBy != "" { values.Set("sortBy", opts.SortBy) } diff --git a/bus/client/objects.go b/bus/client/objects.go index c6ed3423b..3a081a9bd 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -63,15 +63,15 @@ func (c *Client) Object(ctx context.Context, bucket, key string, opts api.GetObj } // Objects lists objects in the given bucket. -func (c *Client) Objects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { +func (c *Client) Objects(ctx context.Context, bucket string, prefix string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - opts.Prefix = api.ObjectPathEscape(opts.Prefix) - opts.Prefix += "?" + values.Encode() + prefix = api.ObjectPathEscape(prefix) + prefix += "?" + values.Encode() - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", opts.Prefix), &resp) + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", prefix), &resp) return } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index be1ba87cb..a0f2b1f35 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -117,8 +117,7 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { } for _, test := range tests { // use the bus client - res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: test.prefix, + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.prefix, api.ListObjectOptions{ SortBy: test.sortBy, SortDir: test.sortDir, Limit: -1, @@ -136,8 +135,7 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { if len(res.Objects) > 0 { marker := "" for offset := 0; offset < len(test.want); offset++ { - res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: test.prefix, + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.prefix, api.ListObjectOptions{ SortBy: test.sortBy, SortDir: test.sortDir, Marker: marker, @@ -162,7 +160,7 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { } // list invalid marker - _, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + _, err := b.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{ Marker: "invalid", SortBy: api.ObjectSortByHealth, }) @@ -481,9 +479,8 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { } for _, test := range tests { // use the bus client - res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.path+test.prefix, api.ListObjectOptions{ Delimiter: "/", - Prefix: test.path + test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, }) @@ -497,9 +494,8 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { } var marker string for offset := 0; offset < len(test.want); offset++ { - res, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.path+test.prefix, api.ListObjectOptions{ Delimiter: "/", - Prefix: test.path + test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, Marker: marker, @@ -524,9 +520,8 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { continue } - res, err = b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err = b.Objects(context.Background(), api.DefaultBucketName, test.path+test.prefix, api.ListObjectOptions{ Delimiter: "/", - Prefix: test.path + test.prefix, SortBy: test.sortBy, SortDir: test.sortDir, Marker: test.want[offset].Name, @@ -554,9 +549,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { } // assert root dir is empty - if resp, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: "/", - }); err != nil { + if resp, err := b.Objects(context.Background(), api.DefaultBucketName, "/", api.ListObjectOptions{}); err != nil { t.Fatal(err) } else if len(resp.Objects) != 0 { t.Fatal("there should be no entries left", resp.Objects) @@ -770,9 +763,8 @@ func TestUploadDownloadExtended(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(file2), api.DefaultBucketName, "fileÅ›/file2", api.UploadObjectOptions{})) // fetch all entries from the worker - resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "fileÅ›/", api.ListObjectOptions{ Delimiter: "/", - Prefix: "fileÅ›/", }) tt.OK(err) @@ -786,9 +778,8 @@ func TestUploadDownloadExtended(t *testing.T) { } // fetch entries in /fileÅ› starting with "file" - res, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "fileÅ›/file", api.ListObjectOptions{ Delimiter: "/", - Prefix: "fileÅ›/file", }) tt.OK(err) if len(res.Objects) != 2 { @@ -796,9 +787,8 @@ func TestUploadDownloadExtended(t *testing.T) { } // fetch entries in /fileÅ› starting with "foo" - res, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + res, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "fileÅ›/foo", api.ListObjectOptions{ Delimiter: "/", - Prefix: "fileÅ›/foo", }) tt.OK(err) if len(res.Objects) != 0 { @@ -1693,7 +1683,7 @@ func TestUploadPacking(t *testing.T) { if res.Size != int64(len(data)) { t.Fatal("unexpected size after upload", res.Size, len(data)) } - resp, err := b.Objects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + resp, err := b.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{ Delimiter: "/", }) if err != nil { diff --git a/worker/s3/backend.go b/worker/s3/backend.go index 88a880828..57f166b38 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -92,11 +92,10 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 page.Marker = "/" + page.Marker } - resp, err := s.b.Objects(ctx, bucketName, api.ListObjectOptions{ + resp, err := s.b.Objects(ctx, bucketName, prefix.Prefix, api.ListObjectOptions{ Delimiter: prefix.Delimiter, Limit: int(page.MaxKeys), Marker: page.Marker, - Prefix: prefix.Prefix, }) if utils.IsErr(err, api.ErrBucketNotFound) { return nil, gofakes3.BucketNotFound(bucketName) diff --git a/worker/s3/s3.go b/worker/s3/s3.go index 77986b54f..8c334fa16 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -33,7 +33,7 @@ type Bus interface { AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) - Objects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) + Objects(ctx context.Context, bucket, prefix string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) From b0e71522a9083dd3ac488504f13488cf1af94883 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 14:02:41 +0200 Subject: [PATCH 29/98] e2e: fix TestUploadDownloadSpending --- internal/test/e2e/cluster_test.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index a0f2b1f35..28d0203d2 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -941,18 +941,9 @@ func TestUploadDownloadSpending(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) // Should be registered in bus. - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, "", api.GetObjectOptions{}) + _, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) tt.OK(err) - var found bool - if res.Name == fmt.Sprintf("/%s", path) { - found = true - break - } - if !found { - t.Fatal("uploaded object not found in bus") - } - // download the data var buffer bytes.Buffer tt.OK(w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{})) From 2161db9d20ad420612cb1a1b0d5ea4c5251c4763 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Aug 2024 14:41:45 +0200 Subject: [PATCH 30/98] bus: rename objects to listobjects endpoint --- bus/bus.go | 12 ++++++------ bus/client/objects.go | 4 ++-- worker/worker.go | 24 ++++++++---------------- 3 files changed, 16 insertions(+), 24 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 6e3786b97..471e968b7 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -439,12 +439,12 @@ func (b *Bus) Handler() http.Handler { "POST /multipart/listuploads": b.multipartHandlerListUploadsPOST, "POST /multipart/listparts": b.multipartHandlerListPartsPOST, - "GET /object/*key": b.objectHandlerGET, - "GET /objects/*prefix": b.objectsHandlerGET, - "PUT /objects/*key": b.objectsHandlerPUT, - "DELETE /objects/*key": b.objectsHandlerDELETE, - "POST /objects/copy": b.objectsCopyHandlerPOST, - "POST /objects/rename": b.objectsRenameHandlerPOST, + "GET /listobjects/*prefix": b.objectsHandlerGET, + "GET /objects/*key": b.objectHandlerGET, + "PUT /objects/*key": b.objectsHandlerPUT, + "DELETE /objects/*key": b.objectsHandlerDELETE, + "POST /objects/copy": b.objectsCopyHandlerPOST, + "POST /objects/rename": b.objectsRenameHandlerPOST, "GET /params/gouging": b.paramsHandlerGougingGET, "GET /params/upload": b.paramsHandlerUploadGET, diff --git a/bus/client/objects.go b/bus/client/objects.go index 3a081a9bd..de3aeb702 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -58,7 +58,7 @@ func (c *Client) Object(ctx context.Context, bucket, key string, opts api.GetObj key = api.ObjectPathEscape(key) key += "?" + values.Encode() - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/object/%s", key), &res) + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", key), &res) return } @@ -71,7 +71,7 @@ func (c *Client) Objects(ctx context.Context, bucket string, prefix string, opts prefix = api.ObjectPathEscape(prefix) prefix += "?" + values.Encode() - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", prefix), &resp) + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/listobjects/%s", prefix), &resp) return } diff --git a/worker/worker.go b/worker/worker.go index 8d1166465..2464d3f62 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -726,17 +726,9 @@ func (w *Worker) objectsHandlerHEAD(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } - var ignoreDelim bool - if jc.DecodeForm("ignoreDelim", &ignoreDelim) != nil { - return - } // parse path - path := jc.PathParam("path") - if !ignoreDelim && (path == "" || strings.HasSuffix(path, "/")) { - jc.Error(errors.New("HEAD requests can only be performed on objects, not directories"), http.StatusBadRequest) - return - } + path := jc.PathParam("key") var off int if jc.DecodeForm("offset", &off) != nil { @@ -816,7 +808,7 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { return } - path := jc.PathParam("path") + path := jc.PathParam("key") if path == "" { jc.Error(errors.New("no path provided"), http.StatusBadRequest) return @@ -857,7 +849,7 @@ func (w *Worker) objectsHandlerPUT(jc jape.Context) { ctx := jc.Request.Context() // grab the path - path := jc.PathParam("path") + path := jc.PathParam("key") // decode the contract set from the query string var contractset string @@ -1020,7 +1012,7 @@ func (w *Worker) objectsHandlerDELETE(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } - err := w.bus.DeleteObject(jc.Request.Context(), bucket, jc.PathParam("path"), api.DeleteObjectOptions{Batch: batch}) + err := w.bus.DeleteObject(jc.Request.Context(), bucket, jc.PathParam("key"), api.DeleteObjectOptions{Batch: batch}) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return @@ -1213,10 +1205,10 @@ func (w *Worker) Handler() http.Handler { "GET /stats/uploads": w.uploadsStatsHandlerGET, "POST /slab/migrate": w.slabMigrateHandler, - "HEAD /objects/*path": w.objectsHandlerHEAD, - "GET /objects/*path": w.objectsHandlerGET, - "PUT /objects/*path": w.objectsHandlerPUT, - "DELETE /objects/*path": w.objectsHandlerDELETE, + "HEAD /objects/*key": w.objectsHandlerHEAD, + "GET /objects/*key": w.objectsHandlerGET, + "PUT /objects/*key": w.objectsHandlerPUT, + "DELETE /objects/*key": w.objectsHandlerDELETE, "PUT /multipart/*path": w.multipartUploadHandlerPUT, From b414531f9ce44eab0033635d05b140dd68b77252 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 30 Aug 2024 16:03:54 +0200 Subject: [PATCH 31/98] bus: remove ApplyValues --- api/object.go | 4 ---- internal/test/e2e/cluster_test.go | 4 +--- worker/client/client.go | 1 - 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/api/object.go b/api/object.go index bcd7bfb5c..9bcd8aaaf 100644 --- a/api/object.go +++ b/api/object.go @@ -280,10 +280,6 @@ func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { values.Set("contractset", opts.ContractSet) } } - -func (opts DownloadObjectOptions) ApplyValues(values url.Values) { -} - func (opts DownloadObjectOptions) ApplyHeaders(h http.Header) { if opts.Range != nil { if opts.Range.Length == -1 { diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 28d0203d2..ed658052d 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1674,9 +1674,7 @@ func TestUploadPacking(t *testing.T) { if res.Size != int64(len(data)) { t.Fatal("unexpected size after upload", res.Size, len(data)) } - resp, err := b.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{ - Delimiter: "/", - }) + resp, err := b.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{}) if err != nil { t.Fatal(err) } diff --git a/worker/client/client.go b/worker/client/client.go index a65e7136e..6ea6172cd 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -274,7 +274,6 @@ func (c *Client) NotifyEvent(ctx context.Context, e webhooks.Event) (err error) func (c *Client) object(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (_ io.ReadCloser, _ http.Header, err error) { values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) - opts.ApplyValues(values) path += "?" + values.Encode() c.c.Custom("GET", fmt.Sprintf("/objects/%s", path), nil, (*[]api.ObjectMetadata)(nil)) From 88e13f9e5b052a1abbd7679288e9b39335e4d9eb Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 30 Aug 2024 16:46:29 +0200 Subject: [PATCH 32/98] bus: add substring parameter to Objects endpoint --- api/object.go | 4 ++ bus/bus.go | 6 +-- bus/client/objects.go | 10 ----- bus/routes.go | 25 +++-------- internal/test/e2e/cluster_test.go | 40 +++++++++-------- stores/metadata.go | 12 +---- stores/metadata_test.go | 75 ++++++++++++++++--------------- stores/sql/database.go | 6 +-- stores/sql/main.go | 41 ++++------------- stores/sql/mysql/main.go | 8 +--- stores/sql/sqlite/main.go | 8 +--- 11 files changed, 88 insertions(+), 147 deletions(-) diff --git a/api/object.go b/api/object.go index 9bcd8aaaf..f3d345c03 100644 --- a/api/object.go +++ b/api/object.go @@ -218,6 +218,7 @@ type ( Marker string SortBy string SortDir string + Substring string } SearchObjectOptions struct { @@ -331,6 +332,9 @@ func (opts ListObjectOptions) Apply(values url.Values) { if opts.SortDir != "" { values.Set("sortDir", opts.SortDir) } + if opts.Substring != "" { + values.Set("substring", opts.Substring) + } } func (opts SearchObjectOptions) Apply(values url.Values) { diff --git a/bus/bus.go b/bus/bus.go index 092f109ee..3ec713524 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -231,7 +231,7 @@ type ( UpdateBucketPolicy(ctx context.Context, bucketName string, policy api.BucketPolicy) error CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) - ListObjects(ctx context.Context, bucketName, prefix, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) + ListObjects(ctx context.Context, bucketName, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) Object(ctx context.Context, bucketName, path string) (api.Object, error) ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) @@ -240,7 +240,6 @@ type ( RemoveObjects(ctx context.Context, bucketName, prefix string) error RenameObject(ctx context.Context, bucketName, from, to string, force bool) error RenameObjects(ctx context.Context, bucketName, from, to string, force bool) error - SearchObjects(ctx context.Context, bucketName, substring string, offset, limit int) ([]api.ObjectMetadata, error) UpdateObject(ctx context.Context, bucketName, path, contractSet, ETag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error AbortMultipartUpload(ctx context.Context, bucketName, path string, uploadID string) (err error) @@ -461,8 +460,7 @@ func (b *Bus) Handler() http.Handler { "POST /slabbuffer/done": b.packedSlabsHandlerDonePOST, "POST /slabbuffer/fetch": b.packedSlabsHandlerFetchPOST, - "POST /search/hosts": b.searchHostsHandlerPOST, - "GET /search/objects": b.searchObjectsHandlerGET, + "POST /search/hosts": b.searchHostsHandlerPOST, "DELETE /sectors/:hk/:root": b.sectorsHostRootHandlerDELETE, diff --git a/bus/client/objects.go b/bus/client/objects.go index de3aeb702..6a8482160 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -103,16 +103,6 @@ func (c *Client) RenameObjects(ctx context.Context, bucket, from, to string, for return c.renameObjects(ctx, bucket, from, to, api.ObjectsRenameModeMulti, force) } -// SearchObjects returns all objects that contains a sub-string in their key. -func (c *Client) SearchObjects(ctx context.Context, bucket string, opts api.SearchObjectOptions) (entries []api.ObjectMetadata, err error) { - values := url.Values{} - values.Set("bucket", bucket) - opts.Apply(values) - - err = c.c.WithContext(ctx).GET("/search/objects?"+values.Encode(), &entries) - return -} - func (c *Client) renameObjects(ctx context.Context, bucket, from, to, mode string, force bool) (err error) { err = c.c.WithContext(ctx).POST("/objects/rename", api.ObjectsRenameRequest{ Bucket: bucket, diff --git a/bus/routes.go b/bus/routes.go index 7860c77ca..2d4b7a729 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1029,24 +1029,6 @@ func (b *Bus) contractsAllHandlerDELETE(jc jape.Context) { jc.Check("couldn't remove contracts", b.ms.ArchiveAllContracts(jc.Request.Context(), api.ContractArchivalReasonRemoved)) } -func (b *Bus) searchObjectsHandlerGET(jc jape.Context) { - offset := 0 - limit := -1 - var key string - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("key", &key) != nil { - return - } - bucket := api.DefaultBucketName - if jc.DecodeForm("bucket", &bucket) != nil { - return - } - keys, err := b.ms.SearchObjects(jc.Request.Context(), bucket, key, offset, limit) - if jc.Check("couldn't list objects", err) != nil { - return - } - jc.Encode(keys) -} - func (b *Bus) objectHandlerGET(jc jape.Context) { path := jc.PathParam("key") bucket := api.DefaultBucketName @@ -1076,7 +1058,7 @@ func (b *Bus) objectHandlerGET(jc jape.Context) { } func (b *Bus) objectsHandlerGET(jc jape.Context) { - var marker, delim, sortBy, sortDir string + var marker, delim, sortBy, sortDir, substring string bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return @@ -1097,8 +1079,11 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { if jc.DecodeForm("sortDir", &sortDir) != nil { return } + if jc.DecodeForm("substring", &substring) != nil { + return + } - resp, err := b.ms.ListObjects(jc.Request.Context(), bucket, jc.PathParam("prefix"), delim, sortBy, sortDir, marker, limit) + resp, err := b.ms.ListObjects(jc.Request.Context(), bucket, jc.PathParam("prefix"), substring, delim, sortBy, sortDir, marker, limit) if errors.Is(err, api.ErrUnsupportedDelimiter) { jc.Error(err, http.StatusBadRequest) return diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index ed658052d..cdcb922e4 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -959,20 +959,20 @@ func TestUploadDownloadSpending(t *testing.T) { uploadDownload() // Fuzzy search for uploaded data in various ways. - objects, err := cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{}) + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{}) tt.OK(err) - if len(objects) != 2 { - t.Fatalf("should have 2 objects but got %v", len(objects)) + if len(resp.Objects) != 2 { + t.Fatalf("should have 2 objects but got %v", len(resp.Objects)) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "ata"}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "ata"}) tt.OK(err) - if len(objects) != 2 { - t.Fatalf("should have 2 objects but got %v", len(objects)) + if len(resp.Objects) != 2 { + t.Fatalf("should have 2 objects but got %v", len(resp.Objects)) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "1258"}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "1258"}) tt.OK(err) - if len(objects) != 1 { - t.Fatalf("should have 1 objects but got %v", len(objects)) + if len(resp.Objects) != 1 { + t.Fatalf("should have 1 objects but got %v", len(resp.Objects)) } // renew contracts. @@ -1204,28 +1204,29 @@ func TestParallelUpload(t *testing.T) { wg.Wait() // Check if objects exist. - objects, err := cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/dir/", Limit: 100}) + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/dir/", Limit: 100}) tt.OK(err) - if len(objects) != 3 { - t.Fatal("wrong number of objects", len(objects)) + if len(resp.Objects) != 3 { + t.Fatal("wrong number of objects", len(resp.Objects)) } // Upload one more object. tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader([]byte("data")), api.DefaultBucketName, "/foo", api.UploadObjectOptions{})) - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/", Limit: 100}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) tt.OK(err) - if len(objects) != 4 { - t.Fatal("wrong number of objects", len(objects)) + if len(resp.Objects) != 4 { + t.Fatal("wrong number of objects", len(resp.Objects)) } // Delete all objects under /dir/. if err := cluster.Bus.DeleteObject(context.Background(), api.DefaultBucketName, "/dir/", api.DeleteObjectOptions{Batch: true}); err != nil { t.Fatal(err) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/", Limit: 100}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) + cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) tt.OK(err) - if len(objects) != 1 { + if len(resp.Objects) != 1 { t.Fatal("objects weren't deleted") } @@ -1233,9 +1234,10 @@ func TestParallelUpload(t *testing.T) { if err := cluster.Bus.DeleteObject(context.Background(), api.DefaultBucketName, "/", api.DeleteObjectOptions{Batch: true}); err != nil { t.Fatal(err) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/", Limit: 100}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) + cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) tt.OK(err) - if len(objects) != 0 { + if len(resp.Objects) != 0 { t.Fatal("objects weren't deleted") } } diff --git a/stores/metadata.go b/stores/metadata.go index c84e6c646..44b2406d2 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -310,14 +310,6 @@ func (s *SQLStore) RenewedContract(ctx context.Context, renewedFrom types.FileCo return } -func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) (objects []api.ObjectMetadata, err error) { - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - objects, err = tx.SearchObjects(ctx, bucket, substring, offset, limit) - return err - }) - return -} - func (s *SQLStore) Object(ctx context.Context, bucket, path string) (obj api.Object, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { obj, err = tx.Object(ctx, bucket, path) @@ -786,9 +778,9 @@ func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []types } } -func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { +func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - resp, err = tx.ListObjects(ctx, bucket, prefix, delim, sortBy, sortDir, marker, limit) + resp, err = tx.ListObjects(ctx, bucket, prefix, substring, delim, sortBy, sortDir, marker, limit) return err }) return diff --git a/stores/metadata_test.go b/stores/metadata_test.go index ba0dc4710..15e595729 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1379,7 +1379,7 @@ func TestObjectHealth(t *testing.T) { } // assert health is returned correctly by ObjectEntries - resp, err := ss.ListObjects(context.Background(), api.DefaultBucketName, "/", "", "", "", "", -1) + resp, err := ss.ListObjects(context.Background(), api.DefaultBucketName, "/", "", "", "", "", "", -1) entries := resp.Objects if err != nil { t.Fatal(err) @@ -1390,10 +1390,10 @@ func TestObjectHealth(t *testing.T) { } // assert health is returned correctly by SearchObject - entries, err = ss.SearchObjects(context.Background(), api.DefaultBucketName, "foo", 0, -1) + resp, err = ss.ListObjects(context.Background(), api.DefaultBucketName, "/", "foo", "", "", "", "", -1) if err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("wrong number of entries", len(entries)) } else if entries[0].Health != expectedHealth { t.Fatal("wrong health", entries[0].Health) @@ -1532,7 +1532,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { - resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, "", -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } @@ -1545,7 +1545,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { var marker string for offset := 0; offset < len(test.want); offset++ { - resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, marker, 1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } @@ -1567,7 +1567,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { continue } - resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, test.want[offset].Name, 1) + resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, test.want[offset].Name, 1) if err != nil { t.Fatal(err) } @@ -1634,7 +1634,7 @@ func TestListObjectsExplicitDir(t *testing.T) { {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Name: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, } for _, test := range tests { - got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "/", test.sortBy, test.sortDir, "", -1) + got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } @@ -1647,8 +1647,9 @@ func TestListObjectsExplicitDir(t *testing.T) { } } -// TestSearchObjects is a test for the SearchObjects method. -func TestSearchObjects(t *testing.T) { +// TestListObjectsSubstring is a test for the ListObjects fuzzy +// search via the "substring" argument. +func TestListObjectsSubstring(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { @@ -1701,18 +1702,22 @@ func TestSearchObjects(t *testing.T) { {"uu", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, } for _, test := range tests { - got, err := ss.SearchObjects(ctx, api.DefaultBucketName, test.path, 0, -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.path, "", "", "", "", -1) if err != nil { t.Fatal(err) } + got := resp.Objects assertEqual(got, test.want) + var marker string for offset := 0; offset < len(test.want); offset++ { - if got, err := ss.SearchObjects(ctx, api.DefaultBucketName, test.path, offset, 1); err != nil { + if resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.path, "", "", "", marker, 1); err != nil { t.Fatal(err) - } else if len(got) != 1 { + } else if got := resp.Objects; len(got) != 1 { t.Errorf("\nkey: %v unexpected number of objects, %d != 1", test.path, len(got)) } else if !metadataEquals(got[0], test.want[offset]) { t.Errorf("\nkey: %v\ngot: %v\nwant: %v", test.path, got, test.want[offset]) + } else { + marker = resp.NextMarker } } } @@ -2550,16 +2555,16 @@ func TestRenameObjects(t *testing.T) { } // Assert that number of objects matches. - objs, err := ss.SearchObjects(ctx, api.DefaultBucketName, "/", 0, 100) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", "/", "", "", "", "", 100) if err != nil { t.Fatal(err) } - if len(objs) != len(objectsAfter) { - t.Fatal("unexpected number of objects", len(objs), len(objectsAfter)) + if len(resp.Objects) != len(objectsAfter) { + t.Fatal("unexpected number of objects", len(resp.Objects), len(objectsAfter)) } // Assert paths are correct. - for _, obj := range objs { + for _, obj := range resp.Objects { if _, exists := objectsAfterMap[obj.Name]; !exists { t.Fatal("unexpected path", obj.Name) } @@ -3327,13 +3332,13 @@ func TestBucketObjects(t *testing.T) { } // List the objects in the buckets. - if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } else if entries[0].Size != 1 { t.Fatal("unexpected size", entries[0].Size) - } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) @@ -3342,15 +3347,15 @@ func TestBucketObjects(t *testing.T) { } // Search the objects in the buckets. - if objects, err := ss.SearchObjects(context.Background(), b1, "", 0, -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b1, "", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(objects) != 2 { + } else if objects := resp.Objects; len(objects) != 2 { t.Fatal("expected 2 objects", len(objects)) } else if objects[0].Size != 3 || objects[1].Size != 1 { t.Fatal("unexpected size", objects[0].Size, objects[1].Size) - } else if objects, err := ss.SearchObjects(context.Background(), b2, "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(objects) != 2 { + } else if objects := resp.Objects; len(objects) != 2 { t.Fatal("expected 2 objects", len(objects)) } else if objects[0].Size != 4 || objects[1].Size != 2 { t.Fatal("unexpected size", objects[0].Size, objects[1].Size) @@ -3359,13 +3364,13 @@ func TestBucketObjects(t *testing.T) { // Rename object foo/bar in bucket 1 to foo/baz but not in bucket 2. if err := ss.RenameObjectBlocking(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { t.Fatal(err) - } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/foo/baz" { t.Fatal("unexpected name", entries[0].Name) - } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) @@ -3376,13 +3381,13 @@ func TestBucketObjects(t *testing.T) { // Rename foo/bar in bucket 2 using the batch rename. if err := ss.RenameObjectsBlocking(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { t.Fatal(err) - } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) } else if entries[0].Name != "/foo/baz" { t.Fatal("unexpected name", entries[0].Name) - } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) @@ -3395,28 +3400,28 @@ func TestBucketObjects(t *testing.T) { t.Fatal(err) } else if err := ss.RemoveObjectBlocking(context.Background(), b1, "/foo/baz"); err != nil { t.Fatal(err) - } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) > 0 { t.Fatal("expected 0 entries", len(entries)) - } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } // Delete all files in bucket 2. - if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) } else if err := ss.RemoveObjectsBlocking(context.Background(), b2, "/"); err != nil { t.Fatal(err) - } else if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 0 { t.Fatal("expected 0 entries", len(entries)) - } else if resp, err := ss.ListObjects(context.Background(), b1, "/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) @@ -3467,7 +3472,7 @@ func TestCopyObject(t *testing.T) { // Copy it within the same bucket. if om, err := ss.CopyObject(ctx, "src", "src", "/foo", "/bar", "", nil); err != nil { t.Fatal(err) - } else if resp, err := ss.ListObjects(ctx, "src", "/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(ctx, "src", "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) @@ -3480,7 +3485,7 @@ func TestCopyObject(t *testing.T) { // Copy it cross buckets. if om, err := ss.CopyObject(ctx, "src", "dst", "/foo", "/bar", "", nil); err != nil { t.Fatal(err) - } else if resp, err := ss.ListObjects(ctx, "dst", "/", "", "", "", "", -1); err != nil { + } else if resp, err := ss.ListObjects(ctx, "dst", "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) @@ -3640,7 +3645,7 @@ func TestListObjectsNoDelimiter(t *testing.T) { } } for _, test := range tests { - res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", test.sortBy, test.sortDir, "", -1) + res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", "", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } @@ -3655,7 +3660,7 @@ func TestListObjectsNoDelimiter(t *testing.T) { if len(res.Objects) > 0 { marker := "" for offset := 0; offset < len(test.want); offset++ { - res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", test.sortBy, test.sortDir, marker, 1) + res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", "", test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } diff --git a/stores/sql/database.go b/stores/sql/database.go index a9d4e8a06..c127a4fb9 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -193,7 +193,7 @@ type ( ListBuckets(ctx context.Context) ([]api.Bucket, error) // ListObjects returns a list of objects from the given bucket. - ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) + ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) // MakeDirsForPath creates all directories for a given object's path. MakeDirsForPath(ctx context.Context, path string) (int64, error) @@ -310,10 +310,6 @@ type ( // SearchHosts returns a list of hosts that match the provided filters SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) - // SearchObjects returns a list of objects that contain the provided - // substring. - SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) - // SetContractSet creates the contract set with the given name and // associates it with the provided contract IDs. SetContractSet(ctx context.Context, name string, contractIds []types.FileContractID) error diff --git a/stores/sql/main.go b/stores/sql/main.go index 202108c9e..51277b7dd 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -1013,9 +1013,9 @@ func orderByObject(sortBy, sortDir string) (orderByExprs []string, _ error) { return orderByExprs, nil } -func ListObjects(ctx context.Context, tx Tx, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { +func ListObjects(ctx context.Context, tx Tx, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { if delim == "" { - resp, err = listObjectsNoDelim(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) + resp, err = listObjectsNoDelim(ctx, tx, bucket, prefix, substring, sortBy, sortDir, marker, limit) } else if delim == "/" { resp, err = listObjectsSlashDelim(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) } else { @@ -2369,35 +2369,6 @@ func scanStateElement(s Scanner) (types.StateElement, error) { }, nil } -func SearchObjects(ctx context.Context, tx Tx, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { - if limit <= -1 { - limit = math.MaxInt - } - - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE INSTR(o.object_id, ?) > 0 AND b.name = ? - ORDER BY o.object_id ASC - LIMIT ? OFFSET ? - `, tx.SelectObjectMetadataExpr()), substring, bucket, limit, offset) - if err != nil { - return nil, fmt.Errorf("failed to search objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return nil, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - return objects, nil -} - func ObjectsBySlabKey(ctx context.Context, tx Tx, bucket string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) { rows, err := tx.Query(ctx, fmt.Sprintf(` SELECT %s @@ -2667,7 +2638,7 @@ func Object(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) }, nil } -func listObjectsNoDelim(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { +func listObjectsNoDelim(ctx context.Context, tx Tx, bucket, prefix, substring, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { // fetch one more to see if there are more entries if limit <= -1 { limit = math.MaxInt @@ -2693,6 +2664,12 @@ func listObjectsNoDelim(ctx context.Context, tx Tx, bucket, prefix, sortBy, sort whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) } + // apply substring + if prefix != "" { + whereExprs = append(whereExprs, "INSTR(o.object_id, ?) > 0") + whereArgs = append(whereArgs, substring) + } + // apply sorting orderByExprs, err := orderByObject(sortBy, sortDir) if err != nil { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index fe39f09e3..09b420345 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -470,8 +470,8 @@ func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) return ssql.ListBuckets(ctx, tx) } -func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ListObjects(ctx, tx, bucket, prefix, delim, sortBy, sortDir, marker, limit) +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, substring, delim, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { @@ -756,10 +756,6 @@ func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMo return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) } -func (tx *MainDatabaseTx) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { - return ssql.SearchObjects(ctx, tx, bucket, substring, offset, limit) -} - func (tx *MainDatabaseTx) SelectObjectMetadataExpr() string { return "o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag" } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index a3dee2af5..6596848d3 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -457,8 +457,8 @@ func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) return ssql.ListBuckets(ctx, tx) } -func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ListObjects(ctx, tx, bucket, prefix, delim, sortBy, sortDir, marker, limit) +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, substring, delim, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { @@ -757,10 +757,6 @@ func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMo return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) } -func (tx *MainDatabaseTx) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { - return ssql.SearchObjects(ctx, tx, bucket, substring, offset, limit) -} - func (tx *MainDatabaseTx) SelectObjectMetadataExpr() string { return "o.object_id, o.size, o.health, o.mime_type, DATETIME(o.created_at), o.etag" } From 35e1345c872738117ab55e46b8c099751358324c Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 3 Sep 2024 15:09:37 +0200 Subject: [PATCH 33/98] config: remove deprecated fields --- README.md | 5 --- api/worker.go | 3 -- bus/routes.go | 37 ++-------------- cmd/renterd/config.go | 82 +----------------------------------- cmd/renterd/logger.go | 4 -- cmd/renterd/node.go | 31 -------------- config/config.go | 14 +++--- internal/test/e2e/cluster.go | 4 +- worker/worker.go | 1 - 9 files changed, 13 insertions(+), 168 deletions(-) diff --git a/README.md b/README.md index c20749935..ccac14ab1 100644 --- a/README.md +++ b/README.md @@ -54,10 +54,6 @@ overview of all settings configurable through the CLI. | `Log.Database.Level` | Logger level for database queries (info\|warn\|error). Defaults to 'warn' | `warn` | `--log.database.level` | `RENTERD_LOG_DATABASE_LEVEL`, `RENTERD_LOG_LEVEL` | `log.database.level` | | `Log.Database.IgnoreRecordNotFoundError` | Enable ignoring 'not found' errors resulting from database queries. Defaults to 'true' | `true` | `--log.database.ignoreRecordNotFoundError` | `RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR` | `log.database.ignoreRecordNotFoundError` | | `Log.Database.SlowThreshold` | Threshold for slow queries in logger. Defaults to 100ms | `100ms` | `--log.database.slowThreshold` | `RENTERD_LOG_DATABASE_SLOW_THRESHOLD` | `log.database.slowThreshold` | -| `Log.Database.Level (DEPRECATED)` | Logger level | `warn` | `--db.logger.logLevel` | `RENTERD_DB_LOGGER_LOG_LEVEL` | `log.database.level` | -| `Log.Database.IgnoreRecordNotFoundError (DEPRECATED)` | Ignores 'not found' errors in logger | `true` | `--db.logger.ignoreNotFoundError`| `RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR` | `log.ignoreRecordNotFoundError` | -| `Log.Database.SlowThreshold (DEPRECATED)` | Threshold for slow queries in logger | `100ms` | `--db.logger.slowThreshold` | `RENTERD_DB_LOGGER_SLOW_THRESHOLD` | `log.slowThreshold` | -| `Log.Path (DEPRECATED)` | Path to directory for logs | - | `--log-path` | `RENTERD_LOG_PATH` | `log.path` | | `Database.MySQL.URI` | Database URI for the bus | - | `--db.uri` | `RENTERD_DB_URI` | `database.mysql.uri` | | `Database.MySQL.User` | Database username for the bus | `renterd` | `--db.user` | `RENTERD_DB_USER` | `database.mysql.user` | | `Database.MySQL.Password` | Database password for the bus | - | - | `RENTERD_DB_PASSWORD` | `database.mysql.password` | @@ -102,7 +98,6 @@ overview of all settings configurable through the CLI. | `S3.Enabled` | Enables/disables S3 API | `true` | `--s3.enabled` | `RENTERD_S3_ENABLED` | `s3.enabled` | | `S3.HostBucketBases` | Enables bucket rewriting in the router for the provided bases | - | `--s3.hostBucketBases` | `RENTERD_S3_HOST_BUCKET_BASES` | `s3.hostBucketBases` | | `S3.HostBucketEnabled` | Enables bucket rewriting in the router | - | `--s3.hostBucketEnabled` | `RENTERD_S3_HOST_BUCKET_ENABLED` | `s3.hostBucketEnabled` | -| `S3.KeypairsV4 (DEPRECATED)` | V4 keypairs for S3 | - | - | - | `s3.keypairsV4` | ### Single-Node Setup diff --git a/api/worker.go b/api/worker.go index ab4aec5dd..7a6dd8dc8 100644 --- a/api/worker.go +++ b/api/worker.go @@ -58,9 +58,6 @@ type ( ContractsResponse struct { Contracts []Contract `json:"contracts"` Errors map[types.PublicKey]string `json:"errors,omitempty"` - - // deprecated - Error string `json:"error,omitempty"` } MemoryResponse struct { diff --git a/bus/routes.go b/bus/routes.go index 92fe0843b..a904333aa 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -290,13 +290,6 @@ func (b *Bus) walletTransactionsHandler(jc jape.Context) { return } - // TODO: deprecate these parameters when moving to v2.0.0 - var before, since time.Time - if jc.DecodeForm("before", (*api.TimeRFC3339)(&before)) != nil || - jc.DecodeForm("since", (*api.TimeRFC3339)(&since)) != nil { - return - } - // convertToTransaction converts wallet event data to a Transaction. convertToTransaction := func(kind string, data wallet.EventData) (txn types.Transaction, ok bool) { ok = true @@ -339,32 +332,10 @@ func (b *Bus) walletTransactionsHandler(jc jape.Context) { return transactions } - if before.IsZero() && since.IsZero() { - events, err := b.w.Events(offset, limit) - if jc.Check("couldn't load transactions", err) == nil { - jc.Encode(convertToTransactions(events)) - } - return - } - - // TODO: remove this when 'before' and 'since' are deprecated, until then we - // fetch all transactions and paginate manually if either is specified - events, err := b.w.Events(0, -1) - if jc.Check("couldn't load transactions", err) != nil { - return - } - filtered := events[:0] - for _, txn := range events { - if (before.IsZero() || txn.Timestamp.Before(before)) && - (since.IsZero() || txn.Timestamp.After(since)) { - filtered = append(filtered, txn) - } - } - events = filtered - if limit == 0 || limit == -1 { - jc.Encode(convertToTransactions(events[offset:])) - } else { - jc.Encode(convertToTransactions(events[offset : offset+limit])) + // fetch events + events, err := b.w.Events(offset, limit) + if jc.Check("couldn't load transactions", err) == nil { + jc.Encode(convertToTransactions(events)) } } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index e1200f121..40f651086 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -2,7 +2,6 @@ package main import ( "bufio" - "encoding/hex" "errors" "flag" "fmt" @@ -20,10 +19,8 @@ import ( "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/api" "go.sia.tech/renterd/config" - "go.sia.tech/renterd/worker/s3" "golang.org/x/term" "gopkg.in/yaml.v3" - "lukechampine.com/frand" ) // TODO: handle RENTERD_S3_HOST_BUCKET_BASES correctly @@ -45,7 +42,6 @@ var ( enableANSI = runtime.GOOS != "windows" hostBasesStr string - keyPairsV4 string workerRemotePassStr string workerRemoteAddrsStr string ) @@ -69,7 +65,6 @@ func defaultConfig() config.Config { }, }, Log: config.Log{ - Path: "", // deprecated. included for compatibility. Level: "", File: config.LogFile{ Enabled: true, @@ -127,7 +122,6 @@ func defaultConfig() config.Config { Address: "localhost:8080", Enabled: true, DisableAuth: false, - KeypairsV4: nil, }, } } @@ -219,17 +213,6 @@ func sanitizeConfig(cfg *config.Config) error { } } - // parse S3 auth keys - if cfg.S3.Enabled { - if !cfg.S3.DisableAuth && keyPairsV4 != "" { - var err error - cfg.S3.KeypairsV4, err = s3.Parsev4AuthKeys(strings.Split(keyPairsV4, ";")) - if err != nil { - return fmt.Errorf("failed to parse keypairs: %v", err) - } - } - } - // default log levels if cfg.Log.Level == "" { cfg.Log.Level = "info" @@ -267,12 +250,6 @@ func parseYamlConfig(cfg *config.Config) { } func parseCLIFlags(cfg *config.Config) { - // deprecated - these go first so that they can be overwritten by the non-deprecated flags - flag.StringVar(&cfg.Log.Database.Level, "db.logger.logLevel", cfg.Log.Database.Level, "(deprecated) Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") - flag.BoolVar(&cfg.Database.Log.IgnoreRecordNotFoundError, "db.logger.ignoreNotFoundError", cfg.Database.Log.IgnoreRecordNotFoundError, "(deprecated) Ignores 'not found' errors in logger (overrides with RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR)") - flag.DurationVar(&cfg.Database.Log.SlowThreshold, "db.logger.slowThreshold", cfg.Database.Log.SlowThreshold, "(deprecated) Threshold for slow queries in logger (overrides with RENTERD_DB_LOGGER_SLOW_THRESHOLD)") - flag.StringVar(&cfg.Log.Path, "log-path", cfg.Log.Path, "(deprecated) Path to directory for logs (overrides with RENTERD_LOG_PATH)") - // node flag.StringVar(&cfg.HTTP.Address, "http", cfg.HTTP.Address, "Address for serving the API") flag.StringVar(&cfg.Directory, "dir", cfg.Directory, "Directory for storing node state") @@ -303,7 +280,6 @@ func parseCLIFlags(cfg *config.Config) { flag.Uint64Var(&cfg.Bus.AnnouncementMaxAgeHours, "bus.announcementMaxAgeHours", cfg.Bus.AnnouncementMaxAgeHours, "Max age for announcements") flag.BoolVar(&cfg.Bus.Bootstrap, "bus.bootstrap", cfg.Bus.Bootstrap, "Bootstraps gateway and consensus modules") flag.StringVar(&cfg.Bus.GatewayAddr, "bus.gatewayAddr", cfg.Bus.GatewayAddr, "Address for Sia peer connections (overrides with RENTERD_BUS_GATEWAY_ADDR)") - flag.DurationVar(&cfg.Bus.PersistInterval, "bus.persistInterval", cfg.Bus.PersistInterval, "(deprecated) Interval for persisting consensus updates") flag.DurationVar(&cfg.Bus.UsedUTXOExpiry, "bus.usedUTXOExpiry", cfg.Bus.UsedUTXOExpiry, "Expiry for used UTXOs in transactions") flag.Int64Var(&cfg.Bus.SlabBufferCompletionThreshold, "bus.slabBufferCompletionThreshold", cfg.Bus.SlabBufferCompletionThreshold, "Threshold for slab buffer upload (overrides with RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD)") @@ -374,10 +350,6 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_DB_NAME", &cfg.Database.MySQL.Database) parseEnvVar("RENTERD_DB_METRICS_NAME", &cfg.Database.MySQL.MetricsDatabase) - parseEnvVar("RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR", &cfg.Database.Log.IgnoreRecordNotFoundError) - parseEnvVar("RENTERD_DB_LOGGER_LOG_LEVEL", &cfg.Log.Level) - parseEnvVar("RENTERD_DB_LOGGER_SLOW_THRESHOLD", &cfg.Database.Log.SlowThreshold) - parseEnvVar("RENTERD_WORKER_ENABLED", &cfg.Worker.Enabled) parseEnvVar("RENTERD_WORKER_ID", &cfg.Worker.ID) parseEnvVar("RENTERD_WORKER_UNAUTHENTICATED_DOWNLOADS", &cfg.Worker.AllowUnauthenticatedDownloads) @@ -395,7 +367,6 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_S3_HOST_BUCKET_ENABLED", &cfg.S3.HostBucketEnabled) parseEnvVar("RENTERD_S3_HOST_BUCKET_BASES", &cfg.S3.HostBucketBases) - parseEnvVar("RENTERD_LOG_PATH", &cfg.Log.Path) parseEnvVar("RENTERD_LOG_LEVEL", &cfg.Log.Level) parseEnvVar("RENTERD_LOG_FILE_ENABLED", &cfg.Log.File.Enabled) parseEnvVar("RENTERD_LOG_FILE_FORMAT", &cfg.Log.File.Format) @@ -410,8 +381,6 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_WORKER_REMOTE_ADDRS", &workerRemoteAddrsStr) parseEnvVar("RENTERD_WORKER_API_PASSWORD", &workerRemotePassStr) - - parseEnvVar("RENTERD_S3_KEYPAIRS_V4", &keyPairsV4) } // readPasswordInput reads a password from stdin. @@ -689,53 +658,6 @@ func setS3Config(cfg *config.Config) { fmt.Println("It should not be exposed to the public internet without setting up a reverse proxy.") setListenAddress("S3 Address", &cfg.S3.Address, true) - // s3 access key - if len(cfg.S3.KeypairsV4) != 0 { - fmt.Println("") - fmt.Println("A S3 keypair has already been created.") - fmt.Println("If you change your S3 key pair, you will need to update any scripts or applications that use the S3 API.") - if !promptYesNo("Would you like to change your S3 key pair?") { - return - } - } - - cfg.S3.KeypairsV4 = make(map[string]string) - - fmt.Println("") - answer := promptQuestion("Would you like to automatically generate a new S3 key pair or set your own?", []string{"auto", "manual"}) - if strings.EqualFold(answer, "auto") { - // generate a new key pair - accessKey := hex.EncodeToString(frand.Bytes(20)) - secretKey := hex.EncodeToString(frand.Bytes(20)) - cfg.S3.KeypairsV4[accessKey] = secretKey - fmt.Println("") - fmt.Println("A new S3 key pair has been generated below.") - fmt.Println(wrapANSI("\033[34;1m", "Access Key:", "\033[0m"), accessKey) - fmt.Println(wrapANSI("\033[34;1m", "Secret Key:", "\033[0m"), secretKey) - fmt.Println("") - return - } - - var accessKey, secretKey string - for { - fmt.Println("") - fmt.Println("Enter your S3 access key. It must between 16 and 128 characters long.") - accessKey = readInput("Enter access key") - if len(accessKey) >= 16 && len(accessKey) <= 128 { - break - } - fmt.Println(wrapANSI("\033[31m", "Access key must be between 16 and 128 characters!", "\033[0m")) - } - - for { - fmt.Println("") - fmt.Println("Enter your S3 secret key. It must be 40 characters long.") - secretKey = readInput("Enter secret key") - if len(secretKey) == 40 { - break - } - fmt.Println(wrapANSI("\033[31m", "Secret key must be be 40 characters!", "\033[0m")) - } - - cfg.S3.KeypairsV4[accessKey] = secretKey + // s3 keypairs + fmt.Println("The S3 keypairs need to be configured through the 's3' setting.") } diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go index d107cc4a0..d53bdd709 100644 --- a/cmd/renterd/logger.go +++ b/cmd/renterd/logger.go @@ -14,10 +14,6 @@ import ( func NewLogger(dir, filename string, cfg config.Log) (*zap.Logger, func(context.Context) error, error) { // path path := filepath.Join(dir, filename) - if cfg.Path != "" { - path = filepath.Join(cfg.Path, filename) - } - if cfg.File.Path != "" { path = cfg.File.Path } diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index a9758439c..5510788bb 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -9,7 +9,6 @@ import ( "os" "path/filepath" "runtime" - "strings" "time" "go.sia.tech/core/consensus" @@ -21,7 +20,6 @@ import ( "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" - "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" "go.sia.tech/renterd/build" "go.sia.tech/renterd/bus" @@ -411,35 +409,6 @@ func (n *node) Run() error { } } - // set initial S3 keys - if n.cfg.S3.Enabled && !n.cfg.S3.DisableAuth { - as, err := n.bus.S3AuthenticationSettings(context.Background()) - if err != nil && !strings.Contains(err.Error(), api.ErrSettingNotFound.Error()) { - return fmt.Errorf("failed to fetch S3 authentication settings: %w", err) - } else if as.V4Keypairs == nil { - as.V4Keypairs = make(map[string]string) - } - - // S3 key pair validation was broken at one point, we need to remove the - // invalid key pairs here to ensure we don't fail when we update the - // setting below. - for k, v := range as.V4Keypairs { - if err := (api.S3AuthenticationSettings{V4Keypairs: map[string]string{k: v}}).Validate(); err != nil { - n.logger.Infof("removing invalid S3 keypair for AccessKeyID %s, reason: %v", k, err) - delete(as.V4Keypairs, k) - } - } - - // merge keys - for k, v := range n.cfg.S3.KeypairsV4 { - as.V4Keypairs[k] = v - } - // update settings - if err := n.bus.UpdateSetting(context.Background(), api.SettingS3Authentication, as); err != nil { - return fmt.Errorf("failed to update S3 authentication settings: %w", err) - } - } - // start S3 server if n.s3Srv != nil { go n.s3Srv.Serve(n.s3Listener) diff --git a/config/config.go b/config/config.go index 6755d3869..89d950cce 100644 --- a/config/config.go +++ b/config/config.go @@ -41,7 +41,6 @@ type ( } Database struct { - Log DatabaseLog `yaml:"log,omitempty"` // deprecated. included for compatibility. // optional fields depending on backend MySQL MySQL `yaml:"mysql,omitempty"` } @@ -55,7 +54,6 @@ type ( RemotePassword string `yaml:"remotePassword,omitempty"` UsedUTXOExpiry time.Duration `yaml:"usedUtxoExpiry,omitempty"` SlabBufferCompletionThreshold int64 `yaml:"slabBufferCompleionThreshold,omitempty"` - PersistInterval time.Duration `yaml:"persistInterval,omitempty"` // deprecated } // LogFile configures the file output of the logger. @@ -76,7 +74,6 @@ type ( } Log struct { - Path string `yaml:"path,omitempty"` // deprecated. included for compatibility. Level string `yaml:"level,omitempty"` // global log level StdOut StdOut `yaml:"stdout,omitempty"` File LogFile `yaml:"file,omitempty"` @@ -104,12 +101,11 @@ type ( } S3 struct { - Address string `yaml:"address,omitempty"` - DisableAuth bool `yaml:"disableAuth,omitempty"` - Enabled bool `yaml:"enabled,omitempty"` - KeypairsV4 map[string]string `yaml:"keypairsV4,omitempty"` // deprecated. included for compatibility. - HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` - HostBucketBases []string `yaml:"hostBucketBases,omitempty"` + Address string `yaml:"address,omitempty"` + DisableAuth bool `yaml:"disableAuth,omitempty"` + Enabled bool `yaml:"enabled,omitempty"` + HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` + HostBucketBases []string `yaml:"hostBucketBases,omitempty"` } // Worker contains the configuration for a worker. diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 3b09b672e..639911471 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -752,8 +752,8 @@ func (c *TestCluster) WaitForContracts() []api.Contract { // fetch all contracts resp, err := c.Worker.Contracts(context.Background(), time.Minute) c.tt.OK(err) - if resp.Error != "" { - c.tt.Fatal(resp.Error) + if len(resp.Errors) > 0 { + c.tt.Fatal(resp.Errors) } return resp.Contracts } diff --git a/worker/worker.go b/worker/worker.go index be27f2cc6..f75884790 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1015,7 +1015,6 @@ func (w *Worker) rhpContractsHandlerGET(jc jape.Context) { contracts, errs := w.fetchContracts(ctx, busContracts, hosttimeout) resp := api.ContractsResponse{Contracts: contracts} if errs != nil { - resp.Error = errs.Error() resp.Errors = make(map[types.PublicKey]string) for pk, err := range errs { resp.Errors[pk] = err.Error() From b2c5eeff4b6dacd8d8c387719fe5f335d75dc26e Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 3 Sep 2024 15:33:19 +0200 Subject: [PATCH 34/98] stores: unmarshal in store --- stores/settingsdb.go | 81 ++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 55 deletions(-) diff --git a/stores/settingsdb.go b/stores/settingsdb.go index 31190f769..e4eeb20c9 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -16,16 +16,8 @@ const ( SettingUpload = "upload" ) -func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, _ error) { - value, err := s.fetchSetting(ctx, SettingGouging) - if err != nil { - return api.GougingSettings{}, err - } - - if err := json.Unmarshal([]byte(value), &gs); err != nil { - s.logger.Panicf("failed to unmarshal gouging settings '%s': %v", value, err) - return api.GougingSettings{}, err - } +func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { + err = s.fetchSetting(ctx, SettingGouging, &gs) return } @@ -37,16 +29,8 @@ func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSett return s.updateSetting(ctx, SettingGouging, string(data)) } -func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, _ error) { - value, err := s.fetchSetting(ctx, SettingPinned) - if err != nil { - return api.PinnedSettings{}, err - } - - if err := json.Unmarshal([]byte(value), &ps); err != nil { - s.logger.Panicf("failed to unmarshal pinned settings '%s': %v", value, err) - return api.PinnedSettings{}, err - } +func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, err error) { + err = s.fetchSetting(ctx, SettingPinned, ps) return } @@ -58,16 +42,8 @@ func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettin return s.updateSetting(ctx, SettingPinned, string(data)) } -func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, _ error) { - value, err := s.fetchSetting(ctx, SettingUpload) - if err != nil { - return api.UploadSettings{}, err - } - - if err := json.Unmarshal([]byte(value), &us); err != nil { - s.logger.Panicf("failed to unmarshal upload settings '%s': %v", value, err) - return api.UploadSettings{}, err - } +func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, err error) { + err = s.fetchSetting(ctx, SettingUpload, us) return } @@ -79,16 +55,8 @@ func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettin return s.updateSetting(ctx, SettingUpload, string(data)) } -func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, _ error) { - value, err := s.fetchSetting(ctx, SettingS3) - if err != nil { - return api.S3Settings{}, err - } - - if err := json.Unmarshal([]byte(value), &ss); err != nil { - s.logger.Panicf("failed to unmarshal s3 settings '%s': %v", value, err) - return api.S3Settings{}, err - } +func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, err error) { + err = s.fetchSetting(ctx, SettingS3, ss) return } @@ -119,26 +87,29 @@ func (s *SQLStore) Setting(ctx context.Context, key string, out interface{}) (er return json.Unmarshal([]byte(value), &out) } -func (s *SQLStore) fetchSetting(ctx context.Context, key string) (string, error) { - // check cache first +func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{}) error { s.settingsMu.Lock() defer s.settingsMu.Unlock() - value, ok := s.settings[key] - if ok { - return value, nil - } - // check database - var err error - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - value, err = tx.Setting(ctx, key) + value, ok := s.settings[key] + if !ok { + var err error + if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + value, err = tx.Setting(ctx, key) + return err + }); err != nil { + return fmt.Errorf("failed to fetch setting from db: %w", err) + } + s.settings[key] = value + } + + // unmarshal setting + if err := json.Unmarshal([]byte(value), &out); err != nil { + s.logger.Panicf("failed to unmarshal %s setting '%s': %v", key, value, err) return err - }) - if err != nil { - return "", fmt.Errorf("failed to fetch setting from db: %w", err) } - s.settings[key] = value - return value, nil + + return nil } func (s *SQLStore) updateSetting(ctx context.Context, key, value string) error { From 8a8e78313bf115086ed2243d8b3a73aad5c95bd0 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 3 Sep 2024 16:12:52 +0200 Subject: [PATCH 35/98] gouging: change gouging settings to base unit --- api/setting.go | 4 ++-- internal/bus/pinmanager.go | 8 ++++---- internal/gouging/gouging.go | 27 ++++++++------------------- 3 files changed, 14 insertions(+), 25 deletions(-) diff --git a/api/setting.go b/api/setting.go index 5976b00b2..c9601ccae 100644 --- a/api/setting.go +++ b/api/setting.go @@ -40,8 +40,8 @@ var ( DefaultGougingSettings = GougingSettings{ MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(15), // 15 SC per contract - MaxDownloadPrice: types.Siacoins(3000), // 3000 SC per 1 TB - MaxUploadPrice: types.Siacoins(3000), // 3000 SC per 1 TB + MaxDownloadPrice: types.Siacoins(3000).Div64(1e12), // 3000 SC per 1 TB + MaxUploadPrice: types.Siacoins(3000).Div64(1e12), // 3000 SC per 1 TB MaxStoragePrice: types.Siacoins(3000).Div64(1e12).Div64(144 * 30), // 3000 SC per TB per month HostBlockHeightLeeway: 6, // 6 blocks MinPriceTableValidity: 5 * time.Minute, // 5 minutes diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index c128a8392..07e8d7b4f 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -253,10 +253,10 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin // update max download price if pins.MaxDownload.IsPinned() { - update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxDownload.Value), rate) + maxDownloadCurr, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxDownload.Value), rate) if err != nil { pm.logger.Warn("failed to convert max download price to currency") - } else if !gs.MaxDownloadPrice.Equals(update) { + } else if update := maxDownloadCurr.Div64(1e12); !gs.MaxDownloadPrice.Equals(update) { gs.MaxDownloadPrice = update pm.logger.Infow("updating max download price", "old", gs.MaxDownloadPrice, "new", update, "rate", rate) updated = true @@ -277,10 +277,10 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin // update max upload price if pins.MaxUpload.IsPinned() { - update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxUpload.Value), rate) + maxUploadCurr, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxUpload.Value), rate) if err != nil { pm.logger.Warnw("failed to convert max upload price to currency", zap.Error(err)) - } else if !gs.MaxUploadPrice.Equals(update) { + } else if update := maxUploadCurr.Div64(1e12); !gs.MaxUploadPrice.Equals(update) { pm.logger.Infow("updating max upload price", "old", gs.MaxUploadPrice, "new", update, "rate", rate) gs.MaxUploadPrice = update updated = true diff --git a/internal/gouging/gouging.go b/internal/gouging/gouging.go index 8e729247d..5620498e2 100644 --- a/internal/gouging/gouging.go +++ b/internal/gouging/gouging.go @@ -13,8 +13,6 @@ import ( ) const ( - bytesPerTB = 1e12 - // maxBaseRPCPriceVsBandwidth is the max ratio for sane pricing between the // MinBaseRPCPrice and the MinDownloadBandwidthPrice. This ensures that 1 // million base RPC charges are at most 1% of the cost to download 4TB. This @@ -190,7 +188,7 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t } // check LatestRevisionCost - expect sane value - maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(gs.MaxDownloadPrice.Div64(bytesPerTB).Mul64(2048)) + maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(gs.MaxDownloadPrice.Mul64(2048)) if overflow { maxRevisionCost = types.MaxCurrency } @@ -292,12 +290,9 @@ func checkPruneGougingRHPv2(gs api.GougingSettings, hs *rhpv2.HostSettings) erro if overflow { return fmt.Errorf("%w: overflow detected when computing sector download price", errHostSettingsGouging) } - dpptb, overflow := sectorDownloadPrice.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TB - if overflow { - return fmt.Errorf("%w: overflow detected when computing download price per TiB", errHostSettingsGouging) - } - if !gs.MaxDownloadPrice.IsZero() && dpptb.Cmp(gs.MaxDownloadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max dl price: %v > %v", errHostSettingsGouging, dpptb, gs.MaxDownloadPrice) + dppb := sectorDownloadPrice.Div64(rhpv2.SectorSize) + if !gs.MaxDownloadPrice.IsZero() && dppb.Cmp(gs.MaxDownloadPrice) > 0 { + return fmt.Errorf("%w: cost per byte exceeds max dl price: %v > %v", errHostSettingsGouging, dppb, gs.MaxDownloadPrice) } return nil } @@ -310,12 +305,9 @@ func checkDownloadGougingRHPv3(gs api.GougingSettings, pt *rhpv3.HostPriceTable) if overflow { return fmt.Errorf("%w: overflow detected when computing sector download price", ErrPriceTableGouging) } - dpptb, overflow := sectorDownloadPrice.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TiB - if overflow { - return fmt.Errorf("%w: overflow detected when computing download price per TiB", ErrPriceTableGouging) - } - if !gs.MaxDownloadPrice.IsZero() && dpptb.Cmp(gs.MaxDownloadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max dl price: %v > %v", ErrPriceTableGouging, dpptb, gs.MaxDownloadPrice) + dppb := sectorDownloadPrice.Div64(rhpv2.SectorSize) + if !gs.MaxDownloadPrice.IsZero() && dppb.Cmp(gs.MaxDownloadPrice) > 0 { + return fmt.Errorf("%w: cost per byte exceeds max dl price: %v > %v", ErrPriceTableGouging, dppb, gs.MaxDownloadPrice) } return nil } @@ -328,10 +320,7 @@ func checkUploadGougingRHPv3(gs api.GougingSettings, pt *rhpv3.HostPriceTable) e if overflow { return fmt.Errorf("%w: overflow detected when computing sector price", ErrPriceTableGouging) } - uploadPrice, overflow := sectorUploadPricePerMonth.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TiB - if overflow { - return fmt.Errorf("%w: overflow detected when computing upload price per TiB", ErrPriceTableGouging) - } + uploadPrice := sectorUploadPricePerMonth.Div64(rhpv2.SectorSize) if !gs.MaxUploadPrice.IsZero() && uploadPrice.Cmp(gs.MaxUploadPrice) > 0 { return fmt.Errorf("%w: cost per TiB exceeds max ul price: %v > %v", ErrPriceTableGouging, uploadPrice, gs.MaxUploadPrice) } From 1c4648d63a16b664285e52224082251bc5e07f28 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 3 Sep 2024 17:16:07 +0200 Subject: [PATCH 36/98] settings: update store --- api/setting.go | 15 +- bus/bus.go | 116 +-------------- bus/routes.go | 27 ++-- cmd/renterd/node.go | 2 +- internal/bus/pinmanager.go | 5 +- internal/test/e2e/cluster.go | 4 +- stores/settings.go | 281 +++++++++++++++++++++++++++++++++++ stores/settingsdb.go | 130 ---------------- stores/sql.go | 5 +- stores/sql/main.go | 7 +- stores/sql_test.go | 3 +- 11 files changed, 319 insertions(+), 276 deletions(-) create mode 100644 stores/settings.go delete mode 100644 stores/settingsdb.go diff --git a/api/setting.go b/api/setting.go index e4741e5ff..a9443d6be 100644 --- a/api/setting.go +++ b/api/setting.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "go.sia.tech/core/consensus" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" ) @@ -19,10 +20,6 @@ var ( // ErrInvalidRedundancySettings is returned if the redundancy settings are // not valid ErrInvalidRedundancySettings = errors.New("invalid redundancy settings") - - // ErrSettingNotFound is returned if a requested setting is not present in the - // database. - ErrSettingNotFound = errors.New("setting not found") ) var ( @@ -65,10 +62,12 @@ var ( V4Keypairs: map[string]string{}, }, } +) - // DefaultUploadSettings define the default upload settings the bus is - // configured with on startup. - DefaultUploadSettings = UploadSettings{ +// DefaultUploadSettings define the default upload settings the bus is +// configured with on startup. +func DefaultUploadSettings(network *consensus.Network) UploadSettings { + return UploadSettings{ Packing: UploadPackingSettings{ Enabled: true, SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB @@ -78,7 +77,7 @@ var ( TotalShards: 30, }, } -) +} type ( // GougingSettings contain some price settings used in price gouging. diff --git a/bus/bus.go b/bus/bus.go index 718b4a091..4bb509d73 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -31,7 +31,6 @@ import ( rhp3 "go.sia.tech/renterd/internal/rhp/v3" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" - "go.sia.tech/renterd/stores" "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" @@ -297,9 +296,7 @@ type ( S3Settings(ctx context.Context) (api.S3Settings, error) UpdateS3Settings(ctx context.Context, s3as api.S3Settings) error - // required for compat - Setting(ctx context.Context, key string, out interface{}) error - DeleteSetting(ctx context.Context, key string) error + MigrateV2Settings(ctx context.Context) error } WalletMetricsRecorder interface { @@ -364,6 +361,11 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa rhp3: rhp3.New(rhp.NewFallbackDialer(store, net.Dialer{}, l), l), } + // migrate settings store + if err := store.MigrateV2Settings(ctx); err != nil { + return nil, err + } + // create contract locker b.contractLocker = ibus.NewContractLocker() @@ -379,11 +381,6 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa // create wallet metrics recorder b.walletMetricsRecorder = ibus.NewWalletMetricRecorder(store, w, defaultWalletRecordMetricInterval, l) - // migrate settings to V2 types - if err := b.compatV2Settings(ctx); err != nil { - return nil, err - } - return b, nil } @@ -574,107 +571,6 @@ func (b *Bus) addRenewedContract(ctx context.Context, renewedFrom types.FileCont return r, nil } -func (b *Bus) compatV2Settings(ctx context.Context) error { - // escape early if all settings are present - if !errors.Is(errors.Join( - b.ss.Setting(ctx, stores.SettingGouging, nil), - b.ss.Setting(ctx, stores.SettingPinned, nil), - b.ss.Setting(ctx, stores.SettingS3, nil), - b.ss.Setting(ctx, stores.SettingUpload, nil), - ), api.ErrSettingNotFound) { - return nil - } - - // migrate gouging settings - if _, err := b.ss.GougingSettings(ctx); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - return err - } else if errors.Is(err, api.ErrSettingNotFound) { - if err := b.ss.UpdateGougingSettings(ctx, api.DefaultGougingSettings); err != nil { - return err - } - } - - // migrate S3 settings - var s3as api.S3AuthenticationSettings - if err := b.ss.Setting(ctx, "s3authentication", &s3as); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - return err - } else if errors.Is(err, api.ErrSettingNotFound) { - if err := b.ss.UpdateS3Settings(ctx, api.DefaultS3Settings); err != nil { - return err - } - } else if err == nil { - s3s := api.S3Settings{Authentication: s3as} - if err := s3s.Validate(); err != nil { - return fmt.Errorf("failed to migrate S3 setting: %w", err) - } else if err := b.ss.UpdateS3Settings(ctx, s3s); err != nil { - return err - } - } - - // migrate pinned settings - var ps api.PinnedSettings - if err := b.ss.Setting(ctx, "pricepinning", &ps); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - return err - } else if errors.Is(err, api.ErrSettingNotFound) { - if err := b.ss.UpdatePinnedSettings(ctx, api.DefaultPinnedSettings); err != nil { - return err - } - } else { - if err := ps.Validate(); err != nil { - return fmt.Errorf("failed to migrate pinned setting: %w", err) - } else if err := b.ss.UpdatePinnedSettings(ctx, ps); err != nil { - return err - } - } - - // migrate upload settings - us := api.DefaultUploadSettings - var css struct { - Default string `json:"default"` - } - - // override default contract set on default upload settings - if err := b.ss.Setting(ctx, "contractset", &css); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - return err - } else if err == nil { - us.DefaultContractSet = css.Default - } - - // override redundancy settings on default upload settings - var rs api.RedundancySettings - if err := b.ss.Setting(ctx, "redundancy", &rs); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - return err - } else if errors.Is(err, api.ErrSettingNotFound) { - // default redundancy settings for testnet are different from mainnet - if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { - us.Redundancy = api.DefaultRedundancySettingsTestnet - } - } else { - us.Redundancy = rs - } - - // override upload packing settings on default upload settings - var ups api.UploadPackingSettings - if err := b.ss.Setting(ctx, "uploadpacking", &ups); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - return err - } else if err == nil { - us.Packing = ups - } - - if err := us.Validate(); err != nil { - return fmt.Errorf("failed to migrate upload setting: %w", err) - } else if err := b.ss.UpdateUploadSettings(ctx, us); err != nil { - return err - } - - // delete old settings - return errors.Join( - b.ss.DeleteSetting(ctx, "contractset"), - b.ss.DeleteSetting(ctx, "pricepinning"), - b.ss.DeleteSetting(ctx, "uploadpacking"), - ) -} - func (b *Bus) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { seed := blake2b.Sum256(append(b.deriveSubKey("renterkey"), hostKey[:]...)) pk := types.NewPrivateKeyFromSeed(seed[:]) diff --git a/bus/routes.go b/bus/routes.go index e4159dbeb..52b32ab5e 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1348,9 +1348,8 @@ func (b *Bus) packedSlabsHandlerDonePOST(jc jape.Context) { } func (b *Bus) settingsGougingHandlerGET(jc jape.Context) { - if gs, err := b.ss.GougingSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get gouging settings", err) == nil { + gs, err := b.ss.GougingSettings(jc.Request.Context()) + if jc.Check("failed to get gouging settings", err) == nil { jc.Encode(gs) } } @@ -1376,9 +1375,8 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { } func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { - if pps, err := b.ss.PinnedSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get pinned settings", err) == nil { + pps, err := b.ss.PinnedSettings(jc.Request.Context()) + if jc.Check("failed to get pinned settings", err) == nil { // populate the Autopilots map with the current autopilots aps, err := b.as.Autopilots(jc.Request.Context()) if jc.Check("failed to fetch autopilots", err) != nil { @@ -1423,9 +1421,8 @@ func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { } func (b *Bus) settingsUploadHandlerGET(jc jape.Context) { - if us, err := b.ss.UploadSettings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get upload settings", err) == nil { + us, err := b.ss.UploadSettings(jc.Request.Context()) + if jc.Check("failed to get upload settings", err) == nil { jc.Encode(us) } } @@ -1450,9 +1447,8 @@ func (b *Bus) settingsUploadHandlerPUT(jc jape.Context) { } func (b *Bus) settingsS3HandlerGET(jc jape.Context) { - if s3s, err := b.ss.S3Settings(jc.Request.Context()); errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - } else if jc.Check("failed to get S3 settings", err) == nil { + s3s, err := b.ss.S3Settings(jc.Request.Context()) + if jc.Check("failed to get S3 settings", err) == nil { jc.Encode(s3s) } } @@ -1610,7 +1606,7 @@ func (b *Bus) slabsPartialHandlerPOST(jc jape.Context) { return } us, err := b.ss.UploadSettings(jc.Request.Context()) - if err != nil && !errors.Is(err, api.ErrSettingNotFound) { + if err != nil { jc.Error(fmt.Errorf("could not get upload packing settings: %w", err), http.StatusInternalServerError) return } @@ -1645,10 +1641,7 @@ func (b *Bus) paramsHandlerUploadGET(jc jape.Context) { var uploadPacking bool var contractSet string us, err := b.ss.UploadSettings(jc.Request.Context()) - if err != nil && !errors.Is(err, api.ErrSettingNotFound) { - jc.Error(fmt.Errorf("could not get upload settings: %w", err), http.StatusInternalServerError) - return - } else if err == nil { + if jc.Check("could not get upload settings", err) == nil { contractSet = us.DefaultContractSet uploadPacking = us.Packing.Enabled } diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 5510788bb..6721408ca 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -264,7 +264,7 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network if err != nil { return nil, nil, err } - sqlStore, err := stores.NewSQLStore(storeCfg) + sqlStore, err := stores.NewSQLStore(storeCfg, network) if err != nil { return nil, nil, err } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 716068565..8929b9064 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -311,10 +311,7 @@ func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { // fetch pinned settings settings, err := pm.s.PinnedSettings(ctx) - if errors.Is(err, api.ErrSettingNotFound) { - pm.logger.Debug("price pinning not configured, skipping price update") - return nil - } else if err != nil { + if err != nil { return fmt.Errorf("failed to fetch pinned settings: %w", err) } else if !settings.Enabled { pm.logger.Debug("price pinning is disabled, skipping price update") diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 1c59cddfe..f96440835 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -526,7 +526,8 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, return nil, nil, nil, nil, err } - sqlStore, err := stores.NewSQLStore(storeCfg) + network, genesis := testNetwork() + sqlStore, err := stores.NewSQLStore(storeCfg, network) if err != nil { return nil, nil, nil, nil, err } @@ -554,7 +555,6 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, } // create chain manager - network, genesis := testNetwork() store, state, err := chain.NewDBStore(bdb, network, genesis) if err != nil { return nil, nil, nil, nil, err diff --git a/stores/settings.go b/stores/settings.go new file mode 100644 index 000000000..4d13262c6 --- /dev/null +++ b/stores/settings.go @@ -0,0 +1,281 @@ +package stores + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "go.sia.tech/renterd/api" + sql "go.sia.tech/renterd/stores/sql" + "go.uber.org/zap" +) + +const ( + SettingGouging = "gouging" + SettingPinned = "pinned" + SettingS3 = "s3" + SettingUpload = "upload" +) + +func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { + err = s.fetchSetting(ctx, SettingPinned, &gs) + return +} + +func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + return s.updateSetting(ctx, SettingGouging, gs) +} + +func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, err error) { + err = s.fetchSetting(ctx, SettingPinned, &ps) + return +} + +func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + return s.updateSetting(ctx, SettingPinned, ps) +} + +func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, err error) { + err = s.fetchSetting(ctx, SettingUpload, &us) + return +} + +func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { + return s.updateSetting(ctx, SettingUpload, us) +} + +func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, err error) { + err = s.fetchSetting(ctx, SettingS3, &ss) + return +} + +func (s *SQLStore) UpdateS3Settings(ctx context.Context, ss api.S3Settings) error { + return s.updateSetting(ctx, SettingS3, ss) +} + +// MigrateV2Settings migrates the settings from the old format to the new, +// migrating the existing settings over to the new types and removing the old +// settings. If a setting is not present in the database it will be set to its +// default setting. If an existing setting is not valid, the default will be +// used and a warning will get logged. +func (s *SQLStore) MigrateV2Settings(ctx context.Context) error { + return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + // escape early if none of the old settings are present + var found bool + for _, key := range []string{ + "pricepinning", + "s3authentication", + "contractset", + "redundancy", + "uploadpacking", + } { + if _, err := tx.Setting(ctx, key); err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if err == nil { + found = true + break + } + } + if !found { + return nil + } + + s.logger.Info("migrating settings...") + + // migrate gouging settings + value, err := tx.Setting(ctx, "gouging") + if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if errors.Is(err, sql.ErrSettingNotFound) { + if err := tx.UpdateSetting(ctx, SettingGouging, s.defaultSetting(SettingGouging)); err != nil { + return err + } + } + + // migrate pinned settings + value, err = tx.Setting(ctx, "pricepinning") + if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if err == nil { + var ps api.PinnedSettings + if err := json.Unmarshal([]byte(value), &ps); err != nil { + s.logger.Warnw("failed to unmarshal pinned settings, using default", zap.Error(err)) + value = s.defaultSetting(SettingPinned) + } else if err := ps.Validate(); err != nil { + s.logger.Warnw("failed to migrate pinned settings, using default", zap.Error(err)) + value = s.defaultSetting(SettingPinned) + } + + // update setting and delete old value + if err := tx.UpdateSetting(ctx, SettingPinned, value); err != nil { + return err + } else if err := tx.DeleteSetting(ctx, "pricepinning"); err != nil { + return err + } + } + + // migrate s3 settings + value, err = tx.Setting(ctx, "s3authentication") + if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if err == nil { + var s3s api.S3Settings + if err := json.Unmarshal([]byte(value), &s3s.Authentication); err != nil { + s.logger.Warnw("failed to unmarshal S3 authentication settings, using default", zap.Error(err)) + s3s = api.DefaultS3Settings + } else if err := s3s.Validate(); err != nil { + s.logger.Warnw("failed to migrate S3 settings, using default", zap.Error(err)) + s3s = api.DefaultS3Settings + } + + // update setting and delete old value + update, _ := json.Marshal(s3s) + if err := tx.UpdateSetting(ctx, SettingS3, string(update)); err != nil { + return err + } else if err := tx.DeleteSetting(ctx, "s3authentication"); err != nil { + return err + } + } + + us := api.DefaultUploadSettings(s.network) + + // migrate contractset settings + value, err = tx.Setting(ctx, "contractset") + if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if err == nil { + var css struct { + Default string `json:"default"` + } + if err := json.Unmarshal([]byte(value), &css); err != nil { + s.logger.Warnw("failed to unmarshal contractset setting, using default", zap.Error(err)) + } else { + us.DefaultContractSet = css.Default + } + + // delete old value + if err := tx.DeleteSetting(ctx, "contractset"); err != nil { + return err + } + } + + // migrate redundancy settings + value, err = tx.Setting(ctx, "redundancy") + if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if err == nil { + var rs api.RedundancySettings + if err := json.Unmarshal([]byte(value), &rs); err != nil { + s.logger.Warnw("failed to unmarshal redundancy settings, using default", zap.Error(err)) + } else if err := rs.Validate(); err != nil { + s.logger.Warnw("failed to migrate redundancy settings, using default", zap.Error(err)) + } else { + us.Redundancy = rs + } + + // delete old value + if err := tx.DeleteSetting(ctx, "redundancy"); err != nil { + return err + } + } + + // migrate uploadpacking settings + value, err = tx.Setting(ctx, "uploadpacking") + if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return err + } else if err == nil { + var ups api.UploadPackingSettings + if err := json.Unmarshal([]byte(value), &ups); err != nil { + s.logger.Warnw("failed to unmarshal uploadpacking settings, using default", zap.Error(err)) + } else { + us.Packing = ups + } + + // delete old value + if err := tx.DeleteSetting(ctx, "uploadpacking"); err != nil { + return err + } + } + + // update upload settings + if update, err := json.Marshal(us); err != nil { + return fmt.Errorf("failed to marshal upload settings: %w", err) + } else if err := tx.UpdateSetting(ctx, SettingUpload, string(update)); err != nil { + return err + } + + s.logger.Info("successfully migrated settings") + return nil + }) +} + +func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{}) error { + s.settingsMu.Lock() + defer s.settingsMu.Unlock() + + // fetch setting value + value, ok := s.settings[key] + if !ok { + var err error + if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + value, err = tx.Setting(ctx, key) + return err + }); err != nil && !errors.Is(err, sql.ErrSettingNotFound) { + return fmt.Errorf("failed to fetch setting from db: %w", err) + } else if err != nil { + value = s.defaultSetting(key) + } + s.settings[key] = value + } + + // unmarshal setting + if err := json.Unmarshal([]byte(value), &out); err != nil { + s.logger.Warnf("failed to unmarshal %s setting '%s': %v, using default", key, value, err) + return json.Unmarshal([]byte(s.defaultSetting(key)), &out) + } + + return nil +} + +func (s *SQLStore) updateSetting(ctx context.Context, key string, value any) error { + s.settingsMu.Lock() + defer s.settingsMu.Unlock() + + // marshal the value + b, err := json.Marshal(value) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + + // update db first + err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateSetting(ctx, key, string(b)) + }) + if err != nil { + return err + } + + // update cache second + s.settings[key] = string(b) + return nil +} + +func (s *SQLStore) defaultSetting(key string) string { + switch key { + case SettingGouging: + b, _ := json.Marshal(api.DefaultGougingSettings) + return string(b) + case SettingPinned: + b, _ := json.Marshal(api.DefaultPinnedSettings) + return string(b) + case SettingS3: + b, _ := json.Marshal(api.DefaultS3Settings) + return string(b) + case SettingUpload: + b, _ := json.Marshal(api.DefaultUploadSettings(s.network)) + return string(b) + default: + panic("unknown setting") // developer error + } +} diff --git a/stores/settingsdb.go b/stores/settingsdb.go deleted file mode 100644 index e4eeb20c9..000000000 --- a/stores/settingsdb.go +++ /dev/null @@ -1,130 +0,0 @@ -package stores - -import ( - "context" - "encoding/json" - "fmt" - - "go.sia.tech/renterd/api" - sql "go.sia.tech/renterd/stores/sql" -) - -const ( - SettingGouging = "gouging" - SettingPinned = "pinned" - SettingS3 = "s3" - SettingUpload = "upload" -) - -func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { - err = s.fetchSetting(ctx, SettingGouging, &gs) - return -} - -func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { - data, err := json.Marshal(gs) - if err != nil { - return fmt.Errorf("couldn't marshal the given value, error: %v", err) - } - return s.updateSetting(ctx, SettingGouging, string(data)) -} - -func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, err error) { - err = s.fetchSetting(ctx, SettingPinned, ps) - return -} - -func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { - data, err := json.Marshal(ps) - if err != nil { - return fmt.Errorf("couldn't marshal the given value, error: %v", err) - } - return s.updateSetting(ctx, SettingPinned, string(data)) -} - -func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, err error) { - err = s.fetchSetting(ctx, SettingUpload, us) - return -} - -func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { - data, err := json.Marshal(us) - if err != nil { - return fmt.Errorf("couldn't marshal the given value, error: %v", err) - } - return s.updateSetting(ctx, SettingUpload, string(data)) -} - -func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, err error) { - err = s.fetchSetting(ctx, SettingS3, ss) - return -} - -func (s *SQLStore) UpdateS3Settings(ctx context.Context, ss api.S3Settings) error { - data, err := json.Marshal(ss) - if err != nil { - return fmt.Errorf("couldn't marshal the given value, error: %v", err) - } - return s.updateSetting(ctx, SettingS3, string(data)) -} - -func (s *SQLStore) DeleteSetting(ctx context.Context, key string) (err error) { - return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.DeleteSetting(ctx, key) - }) -} - -func (s *SQLStore) Setting(ctx context.Context, key string, out interface{}) (err error) { - var value string - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - value, err = tx.Setting(ctx, key) - return err - }) - if err != nil { - return fmt.Errorf("failed to fetch setting from db: %w", err) - } - - return json.Unmarshal([]byte(value), &out) -} - -func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{}) error { - s.settingsMu.Lock() - defer s.settingsMu.Unlock() - - value, ok := s.settings[key] - if !ok { - var err error - if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - value, err = tx.Setting(ctx, key) - return err - }); err != nil { - return fmt.Errorf("failed to fetch setting from db: %w", err) - } - s.settings[key] = value - } - - // unmarshal setting - if err := json.Unmarshal([]byte(value), &out); err != nil { - s.logger.Panicf("failed to unmarshal %s setting '%s': %v", key, value, err) - return err - } - - return nil -} - -func (s *SQLStore) updateSetting(ctx context.Context, key, value string) error { - // update db first - s.settingsMu.Lock() - defer s.settingsMu.Unlock() - - err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.UpdateSetting(ctx, key, value) - }) - if err != nil { - return err - } - - // update cache second - s.settings[key] = value - return nil -} diff --git a/stores/sql.go b/stores/sql.go index 50533768d..cd0c54e08 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "go.sia.tech/core/consensus" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/stores/sql" @@ -39,6 +40,7 @@ type ( logger *zap.SugaredLogger walletAddress types.Address + network *consensus.Network // ObjectDB related fields slabBufferMgr *SlabBufferManager @@ -64,7 +66,7 @@ type ( // NewSQLStore uses a given Dialector to connect to a SQL database. NOTE: Only // pass migrate=true for the first instance of SQLHostDB if you connect via the // same Dialector multiple times. -func NewSQLStore(cfg Config) (*SQLStore, error) { +func NewSQLStore(cfg Config, network *consensus.Network) (*SQLStore, error) { if err := os.MkdirAll(cfg.PartialSlabDir, 0700); err != nil { return nil, fmt.Errorf("failed to create partial slab dir '%s': %v", cfg.PartialSlabDir, err) } @@ -97,6 +99,7 @@ func NewSQLStore(cfg Config) (*SQLStore, error) { settings: make(map[string]string), walletAddress: cfg.WalletAddress, + network: network, slabPruneSigChan: make(chan struct{}, 1), lastPrunedAt: time.Now(), diff --git a/stores/sql/main.go b/stores/sql/main.go index 02f876214..948c78a67 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -28,7 +28,10 @@ import ( "lukechampine.com/frand" ) -var ErrNegativeOffset = errors.New("offset can not be negative") +var ( + ErrNegativeOffset = errors.New("offset can not be negative") + ErrSettingNotFound = errors.New("setting not found") +) // helper types type ( @@ -2212,7 +2215,7 @@ func Setting(ctx context.Context, tx sql.Tx, key string) (string, error) { var value string err := tx.QueryRow(ctx, "SELECT value FROM settings WHERE `key` = ?", key).Scan((*BusSetting)(&value)) if errors.Is(err, dsql.ErrNoRows) { - return "", api.ErrSettingNotFound + return "", ErrSettingNotFound } else if err != nil { return "", fmt.Errorf("failed to fetch setting '%s': %w", key, err) } diff --git a/stores/sql_test.go b/stores/sql_test.go index 0846254cb..48fb7a6a9 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "go.sia.tech/core/consensus" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" @@ -179,7 +180,7 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { LongQueryDuration: 100 * time.Millisecond, LongTxDuration: 100 * time.Millisecond, RetryTransactionIntervals: []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 200 * time.Millisecond}, - }) + }, &consensus.Network{}) if err != nil { t.Fatal("failed to create SQLStore", err) } From 0e49a7c7dd9606770fae255ad1493c2c7ce75d7e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 3 Sep 2024 17:18:08 +0200 Subject: [PATCH 37/98] e2e: fix TestGouging --- internal/gouging/gouging.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/gouging/gouging.go b/internal/gouging/gouging.go index 5620498e2..fd9aa3ff6 100644 --- a/internal/gouging/gouging.go +++ b/internal/gouging/gouging.go @@ -188,7 +188,11 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t } // check LatestRevisionCost - expect sane value - maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(gs.MaxDownloadPrice.Mul64(2048)) + twoKiBMax, overflow := gs.MaxDownloadPrice.Mul64WithOverflow(2048) + if overflow { + twoKiBMax = types.MaxCurrency + } + maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(twoKiBMax) if overflow { maxRevisionCost = types.MaxCurrency } From bd4d1c86b243f418cbdc4341002c78197082339c Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 3 Sep 2024 17:18:35 +0200 Subject: [PATCH 38/98] wallet: remove before and since wallet txn options --- api/wallet.go | 12 ---------- internal/test/e2e/cluster_test.go | 38 ------------------------------- 2 files changed, 50 deletions(-) diff --git a/api/wallet.go b/api/wallet.go index ad8acb56d..23fca9623 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -78,18 +78,6 @@ type ( // WalletTransactionsOption is an option for the WalletTransactions method. type WalletTransactionsOption func(url.Values) -func WalletTransactionsWithBefore(before time.Time) WalletTransactionsOption { - return func(q url.Values) { - q.Set("before", before.Format(time.RFC3339)) - } -} - -func WalletTransactionsWithSince(since time.Time) WalletTransactionsOption { - return func(q url.Values) { - q.Set("since", since.Format(time.RFC3339)) - } -} - func WalletTransactionsWithLimit(limit int) WalletTransactionsOption { return func(q url.Values) { q.Set("limit", fmt.Sprint(limit)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index ec4bfabd4..09042410e 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1604,50 +1604,12 @@ func TestWalletTransactions(t *testing.T) { t.Fatal("transactions don't match", cmp.Diff(txns, allTxns[2:])) } - // Find the first index that has a different timestamp than the first. - var txnIdx int - for i := 1; i < len(allTxns); i++ { - if allTxns[i].Timestamp.Unix() != allTxns[0].Timestamp.Unix() { - txnIdx = i - break - } - } - medianTxnTimestamp := allTxns[txnIdx].Timestamp - // Limit the number of transactions to 5. txns, err = b.WalletTransactions(context.Background(), api.WalletTransactionsWithLimit(5)) tt.OK(err) if len(txns) != 5 { t.Fatalf("expected exactly 5 transactions, got %v", len(txns)) } - - // Fetch txns before and since median. - txns, err = b.WalletTransactions(context.Background(), api.WalletTransactionsWithBefore(medianTxnTimestamp)) - tt.OK(err) - if len(txns) == 0 { - for _, txn := range allTxns { - fmt.Println(txn.Timestamp.Unix()) - } - t.Fatal("expected at least 1 transaction before median timestamp", medianTxnTimestamp.Unix()) - } - for _, txn := range txns { - if txn.Timestamp.Unix() >= medianTxnTimestamp.Unix() { - t.Fatal("expected only transactions before median timestamp") - } - } - txns, err = b.WalletTransactions(context.Background(), api.WalletTransactionsWithSince(medianTxnTimestamp)) - tt.OK(err) - if len(txns) == 0 { - for _, txn := range allTxns { - fmt.Println(txn.Timestamp.Unix()) - } - t.Fatal("expected at least 1 transaction after median timestamp") - } - for _, txn := range txns { - if txn.Timestamp.Unix() < medianTxnTimestamp.Unix() { - t.Fatal("expected only transactions after median timestamp", medianTxnTimestamp.Unix()) - } - } } func TestUploadPacking(t *testing.T) { From dc3580b734d28b2483f7986797d92f88cb8f4012 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 3 Sep 2024 17:28:54 +0200 Subject: [PATCH 39/98] stores: add migrations --- internal/sql/migrations.go | 6 ++++++ .../mysql/migrations/main/migration_00016_gouging_units.sql | 1 + .../migrations/main/migration_00016_gouging_units.sql | 1 + 3 files changed, 8 insertions(+) create mode 100644 stores/sql/mysql/migrations/main/migration_00016_gouging_units.sql create mode 100644 stores/sql/sqlite/migrations/main/migration_00016_gouging_units.sql diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index 377bf6fc5..aefee160d 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -205,6 +205,12 @@ var ( return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00015_reset_drift", log) }, }, + { + ID: "00016_gouging_units", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00016_gouging_units", log) + }, + }, } } MetricsMigrations = func(ctx context.Context, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { diff --git a/stores/sql/mysql/migrations/main/migration_00016_gouging_units.sql b/stores/sql/mysql/migrations/main/migration_00016_gouging_units.sql new file mode 100644 index 000000000..c151d90a3 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00016_gouging_units.sql @@ -0,0 +1 @@ +UPDATE ephemeral_accounts SET drift = "0", clean_shutdown = 0, requires_sync = 1; \ No newline at end of file diff --git a/stores/sql/sqlite/migrations/main/migration_00016_gouging_units.sql b/stores/sql/sqlite/migrations/main/migration_00016_gouging_units.sql new file mode 100644 index 000000000..c151d90a3 --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00016_gouging_units.sql @@ -0,0 +1 @@ +UPDATE ephemeral_accounts SET drift = "0", clean_shutdown = 0, requires_sync = 1; \ No newline at end of file From 36a73dbe27fd7beee7f873bd3695ec6210517a8f Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 3 Sep 2024 17:31:19 +0200 Subject: [PATCH 40/98] testing: fix setting key --- stores/settings.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/settings.go b/stores/settings.go index 4d13262c6..2c9fc9a6b 100644 --- a/stores/settings.go +++ b/stores/settings.go @@ -19,7 +19,7 @@ const ( ) func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { - err = s.fetchSetting(ctx, SettingPinned, &gs) + err = s.fetchSetting(ctx, SettingGouging, &gs) return } From 51a4669c5bf87a2b1aabf9f34e6c73a744f094e3 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 3 Sep 2024 19:45:49 +0200 Subject: [PATCH 41/98] bus: remove /search/hosts --- api/host.go | 7 +- autopilot/autopilot.go | 8 +- autopilot/client.go | 2 +- autopilot/contractor/contractor.go | 8 +- bus/bus.go | 4 +- bus/client/hosts.go | 28 +- bus/routes.go | 6 +- internal/test/e2e/blocklist_test.go | 6 +- internal/test/e2e/cluster_test.go | 4 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 11 +- stores/hostdb_test.go | 147 ++------- stores/sql/database.go | 6 +- stores/sql/main.go | 452 ++++++++++++++-------------- stores/sql/mysql/main.go | 8 +- stores/sql/sqlite/main.go | 8 +- 16 files changed, 305 insertions(+), 404 deletions(-) diff --git a/api/host.go b/api/host.go index 36dd0862a..8ce9b6d76 100644 --- a/api/host.go +++ b/api/host.go @@ -57,9 +57,8 @@ type ( MinRecentScanFailures uint64 `json:"minRecentScanFailures"` } - // SearchHostsRequest is the request type for the /api/bus/search/hosts - // endpoint. - SearchHostsRequest struct { + // HostsRequest is the request type for the /api/bus/hosts endpoint. + HostsRequest struct { Offset int `json:"offset"` Limit int `json:"limit"` AutopilotID string `json:"autopilotID"` @@ -110,7 +109,7 @@ type ( Offset int } - SearchHostOptions struct { + HostOptions struct { AutopilotID string AddressContains string FilterMode string diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 58bf76bb1..6c24db8ad 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,9 +54,9 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error // metrics @@ -194,7 +194,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // fetch hosts - hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{}) + hosts, err := ap.bus.Hosts(ctx, api.HostOptions{}) if jc.Check("failed to get hosts", err) != nil { return } @@ -747,7 +747,7 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { } func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.SearchHostsRequest + var req api.HostsRequest if jc.Decode(&req) != nil { return } else if req.AutopilotID != "" && req.AutopilotID != ap.id { @@ -755,7 +755,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { return } - hosts, err := ap.bus.SearchHosts(jc.Request.Context(), api.SearchHostOptions{ + hosts, err := ap.bus.Hosts(jc.Request.Context(), api.HostOptions{ AutopilotID: ap.id, Offset: req.Offset, Limit: req.Limit, diff --git a/autopilot/client.go b/autopilot/client.go index 010c1f037..22ab93f36 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -41,7 +41,7 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostResponse, err e // HostInfo returns information about all hosts. func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostResponse, err error) { - err = c.c.POST("/hosts", api.SearchHostsRequest{ + err = c.c.POST("/hosts", api.HostsRequest{ Offset: offset, Limit: limit, FilterMode: filterMode, diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 37223864b..639729c28 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -90,8 +90,8 @@ type Bus interface { FileContractTax(ctx context.Context, payout types.Currency) (types.Currency, error) FormContract(ctx context.Context, renterAddress types.Address, renterFunds types.Currency, hostKey types.PublicKey, hostIP string, hostCollateral types.Currency, endHeight uint64) (api.ContractMetadata, error) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) error UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error } @@ -1129,7 +1129,7 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, for _, c := range contracts { usedHosts[c.HostKey] = struct{}{} } - allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{}) + allHosts, err := bus.Hosts(ctx, api.HostOptions{}) if err != nil { return nil, fmt.Errorf("failed to fetch usable hosts: %w", err) } @@ -1228,7 +1228,7 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, func performHostChecks(ctx *mCtx, bus Bus, logger *zap.SugaredLogger) error { var usabilityBreakdown unusableHostsBreakdown // fetch all hosts that are not blocked - hosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{}) + hosts, err := bus.Hosts(ctx, api.HostOptions{}) if err != nil { return fmt.Errorf("failed to fetch all hosts: %w", err) } @@ -1281,7 +1281,7 @@ func performPostMaintenanceTasks(ctx *mCtx, bus Bus, w Worker, alerter alerts.Al if err != nil { return fmt.Errorf("failed to fetch contracts: %w", err) } - allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{}) + allHosts, err := bus.Hosts(ctx, api.HostOptions{}) if err != nil { return fmt.Errorf("failed to fetch all hosts: %w", err) } diff --git a/bus/bus.go b/bus/bus.go index 423329e94..4f25c5426 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -201,12 +201,12 @@ type ( Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) + Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) RecordHostScans(ctx context.Context, scans []api.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, check api.HostCheck) error @@ -442,6 +442,7 @@ func (b *Bus) Handler() http.Handler { "GET /contract/:id/roots": b.contractIDRootsHandlerGET, "GET /contract/:id/size": b.contractSizeHandlerGET, + "POST /hosts": b.hostsHandlerPOST, "GET /hosts/allowlist": b.hostsAllowlistHandlerGET, "PUT /hosts/allowlist": b.hostsAllowlistHandlerPUT, "GET /hosts/blocklist": b.hostsBlocklistHandlerGET, @@ -479,7 +480,6 @@ func (b *Bus) Handler() http.Handler { "POST /slabbuffer/done": b.packedSlabsHandlerDonePOST, "POST /slabbuffer/fetch": b.packedSlabsHandlerFetchPOST, - "POST /search/hosts": b.searchHostsHandlerPOST, "GET /search/objects": b.searchObjectsHandlerGET, "DELETE /sectors/:hk/:root": b.sectorsHostRootHandlerDELETE, diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 391a9977e..a13ac5c57 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -16,6 +16,20 @@ func (c *Client) Host(ctx context.Context, hostKey types.PublicKey) (h api.Host, return } +// Hosts returns all hosts that match certain search criteria. +func (c *Client) Hosts(ctx context.Context, opts api.HostOptions) (hosts []api.Host, err error) { + err = c.c.WithContext(ctx).POST("/hosts", api.HostsRequest{ + AutopilotID: opts.AutopilotID, + Offset: opts.Offset, + Limit: opts.Limit, + FilterMode: opts.FilterMode, + UsabilityMode: opts.UsabilityMode, + AddressContains: opts.AddressContains, + KeyIn: opts.KeyIn, + }, &hosts) + return +} + // HostAllowlist returns the allowlist. func (c *Client) HostAllowlist(ctx context.Context) (allowlist []types.PublicKey, err error) { err = c.c.WithContext(ctx).GET("/hosts/allowlist", &allowlist) @@ -68,20 +82,6 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) return } -// SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []api.Host, err error) { - err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ - AutopilotID: opts.AutopilotID, - Offset: opts.Offset, - Limit: opts.Limit, - FilterMode: opts.FilterMode, - UsabilityMode: opts.UsabilityMode, - AddressContains: opts.AddressContains, - KeyIn: opts.KeyIn, - }, &hosts) - return -} - // UpdateHostAllowlist updates the host allowlist, adding and removing the given entries. func (c *Client) UpdateHostAllowlist(ctx context.Context, add, remove []types.PublicKey, clear bool) (err error) { err = c.c.WithContext(ctx).PUT("/hosts/allowlist", api.UpdateAllowlistRequest{Add: add, Remove: remove, Clear: clear}) diff --git a/bus/routes.go b/bus/routes.go index 9733a0eb5..0e278c0a7 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -564,8 +564,8 @@ func (b *Bus) walletPendingHandler(jc jape.Context) { jc.Encode(relevant) } -func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { - var req api.SearchHostsRequest +func (b *Bus) hostsHandlerPOST(jc jape.Context) { + var req api.HostsRequest if jc.Decode(&req) != nil { return } @@ -606,7 +606,7 @@ func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { req.Limit = -1 } - hosts, err := b.hs.SearchHosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) + hosts, err := b.hs.Hosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return } diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 39d2540a3..c82283726 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -121,7 +121,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.SearchHosts(context.Background(), api.SearchHostOptions{}) + hosts, err := b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -146,7 +146,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.SearchHosts(context.Background(), api.SearchHostOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -156,7 +156,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.SearchHosts(context.Background(), api.SearchHostOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index f048666fb..9064d1968 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -290,7 +290,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.SearchHosts(context.Background(), api.SearchHostOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) @@ -744,7 +744,7 @@ func TestUploadDownloadBasic(t *testing.T) { // check that stored data on hosts was updated tt.Retry(100, 100*time.Millisecond, func() error { - hosts, err := cluster.Bus.SearchHosts(context.Background(), api.SearchHostOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) for _, host := range hosts { if host.StoredData != rhpv2.SectorSize { diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 81db425a4..4ad48fc76 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -59,7 +59,7 @@ func TestHostPruning(t *testing.T) { tt.OKAll(a.Trigger(true)) // assert the host was not pruned - hostss, err := b.SearchHosts(context.Background(), api.SearchHostOptions{}) + hostss, err := b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -71,7 +71,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.SearchHosts(context.Background(), api.SearchHostOptions{}) + hostss, err = b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hostss) != 0 { a.Trigger(true) // trigger autopilot diff --git a/stores/hostdb.go b/stores/hostdb.go index 5111682d1..22f9ab9f3 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -17,7 +17,7 @@ var ( // Host returns information about a host. func (s *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) { - hosts, err := s.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hostKey}, 0, 1) + hosts, err := s.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hostKey}, 0, 1) if err != nil { return api.Host{}, err } else if len(hosts) == 0 { @@ -48,20 +48,15 @@ func (s *SQLStore) ResetLostSectors(ctx context.Context, hk types.PublicKey) err }) } -func (s *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { +func (s *SQLStore) Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { var hosts []api.Host err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - hosts, err = tx.SearchHosts(ctx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) + hosts, err = tx.Hosts(ctx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) return }) return hosts, err } -// Hosts returns non-blocked hosts at given offset and limit. -func (s *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]api.Host, error) { - return s.SearchHosts(ctx, "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, offset, limit) -} - func (s *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index b4bba0dc1..c62d67adc 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -38,7 +38,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, 0, -1) + allHosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -63,7 +63,7 @@ func TestSQLHostDB(t *testing.T) { } // Same thing again but with hosts. - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -106,101 +106,8 @@ func TestSQLHostDB(t *testing.T) { } } -func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, settings rhpv2.HostSettings) error { - return s.RecordHostScans(context.Background(), []api.HostScan{ - { - HostKey: hk, - Settings: settings, - Success: err == nil, - Timestamp: t, - }, - }) -} - -// TestSQLHosts tests the Hosts method of the SQLHostDB type. -func TestSQLHosts(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { - t.Fatal("unexpected", len(hosts), err) - } - if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if host := hosts[0]; host.PublicKey != hk1 { - t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) - } - if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if host := hosts[0]; host.PublicKey != hk2 { - t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) - } - if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { - t.Fatal("unexpected", len(hosts), err) - } - if _, err := ss.Hosts(ctx, -1, -1); !errors.Is(err, sql.ErrNegativeOffset) { - t.Fatal("unexpected error", err) - } - - // Add a scan for each host. - n := time.Now() - if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { - t.Fatal(err) - } - if err := ss.addTestScan(hk2, n.Add(-2*time.Minute), nil, rhpv2.HostSettings{}); err != nil { - t.Fatal(err) - } - if err := ss.addTestScan(hk3, n.Add(-3*time.Minute), nil, rhpv2.HostSettings{}); err != nil { - t.Fatal(err) - } - - // Fetch all hosts using the HostsForScanning method. - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) - if err != nil { - t.Fatal(err) - } - if len(hostAddresses) != 3 { - t.Fatal("wrong number of addresses") - } - if hostAddresses[0].PublicKey != hk3 { - t.Fatal("wrong key") - } - if hostAddresses[1].PublicKey != hk2 { - t.Fatal("wrong key") - } - if hostAddresses[2].PublicKey != hk1 { - t.Fatal("wrong key") - } - - // Fetch one host by setting the cutoff exactly to hk2. - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) - if err != nil { - t.Fatal(err) - } - if len(hostAddresses) != 1 { - t.Fatal("wrong number of addresses") - } - - // Fetch no hosts. - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) - if err != nil { - t.Fatal(err) - } - if len(hostAddresses) != 0 { - t.Fatal("wrong number of addresses") - } -} - -// TestSearchHosts is a unit test for SearchHosts. -func TestSearchHosts(t *testing.T) { +// TestHosts is a unit test for the Hosts method of the SQLHostDB type. +func TestHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() @@ -216,7 +123,7 @@ func TestSearchHosts(t *testing.T) { hk1, hk2, hk3 := hks[0], hks[1], hks[2] // search all hosts - his, err := ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err := ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -224,19 +131,19 @@ func TestSearchHosts(t *testing.T) { } // assert offset & limit are taken into account - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) + his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) if err != nil { t.Fatal(err) } else if len(his) != 1 { t.Fatal("unexpected") } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 2) + his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 2) if err != nil { t.Fatal(err) } else if len(his) != 2 { t.Fatal("unexpected") } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 3, 1) + his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 3, 1) if err != nil { t.Fatal(err) } else if len(his) != 0 { @@ -244,16 +151,16 @@ func TestSearchHosts(t *testing.T) { } // assert address and key filters are taken into account - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1001", nil, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1001", nil, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 2 { + if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 2 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk1}, 0, -1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk1}, 0, -1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } @@ -262,7 +169,7 @@ func TestSearchHosts(t *testing.T) { if err != nil { t.Fatal(err) } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 2 { @@ -270,7 +177,7 @@ func TestSearchHosts(t *testing.T) { } else if his[0].PublicKey != (types.PublicKey{2}) || his[1].PublicKey != (types.PublicKey{3}) { t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -325,7 +232,7 @@ func TestSearchHosts(t *testing.T) { } // fetch all hosts - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -342,7 +249,7 @@ func TestSearchHosts(t *testing.T) { } // assert autopilot filter is taken into account - his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -364,7 +271,7 @@ func TestSearchHosts(t *testing.T) { if err != nil { t.Fatal(err) } - his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -376,7 +283,7 @@ func TestSearchHosts(t *testing.T) { t.Fatal("unexpected", c1, ok) } - his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -692,7 +599,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -770,23 +677,23 @@ func TestSQLHostAllowlist(t *testing.T) { t.Fatal(err) } - assertSearch := func(total, allowed, blocked int) error { + assertHosts := func(total, allowed, blocked int) error { t.Helper() - hosts, err := ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err := ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { return err } if len(hosts) != total { return fmt.Errorf("invalid number of hosts: %v", len(hosts)) } - hosts, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err = ss.Hosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { return err } if len(hosts) != allowed { return fmt.Errorf("invalid number of hosts: %v", len(hosts)) } - hosts, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err = ss.Hosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { return err } @@ -798,7 +705,7 @@ func TestSQLHostAllowlist(t *testing.T) { // Search for hosts using different modes. Should have 3 hosts in total, 2 // allowed ones and 2 blocked ones. - if err := assertSearch(3, 1, 2); err != nil { + if err := assertHosts(3, 1, 2); err != nil { t.Fatal(err) } @@ -818,7 +725,7 @@ func TestSQLHostAllowlist(t *testing.T) { // Search for hosts using different modes. Should have 2 hosts in total, 0 // allowed ones and 2 blocked ones. - if err := assertSearch(2, 0, 2); err != nil { + if err := assertHosts(2, 0, 2); err != nil { t.Fatal(err) } @@ -830,7 +737,7 @@ func TestSQLHostAllowlist(t *testing.T) { // Search for hosts using different modes. Should have 2 hosts in total, 2 // allowed ones and 0 blocked ones. - if err := assertSearch(2, 2, 0); err != nil { + if err := assertHosts(2, 2, 0); err != nil { t.Fatal(err) } @@ -860,7 +767,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } diff --git a/stores/sql/database.go b/stores/sql/database.go index cc2aab0df..67b1d7382 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -185,6 +185,9 @@ type ( // InsertObject inserts a new object into the database. InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error + // Hosts returns a list of hosts that match the provided filters + Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) + // HostsForScanning returns a list of hosts to scan which haven't been // scanned since at least maxLastScan. HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) @@ -310,9 +313,6 @@ type ( // existing ones and setting the clean shutdown flag. SaveAccounts(ctx context.Context, accounts []api.Account) error - // SearchHosts returns a list of hosts that match the provided filters - SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) - // SearchObjects returns a list of objects that contain the provided // substring. SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) diff --git a/stores/sql/main.go b/stores/sql/main.go index bb03bd86d..d3801b175 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -655,6 +655,232 @@ func HostBlocklist(ctx context.Context, tx sql.Tx) ([]string, error) { return blocklist, nil } +func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { + if offset < 0 { + return nil, ErrNegativeOffset + } + + var hasAllowlist, hasBlocklist bool + if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_allowlist_entries)").Scan(&hasAllowlist); err != nil { + return nil, fmt.Errorf("failed to check for allowlist: %w", err) + } else if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_blocklist_entries)").Scan(&hasBlocklist); err != nil { + return nil, fmt.Errorf("failed to check for blocklist: %w", err) + } + + // validate filterMode + switch filterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + default: + return nil, fmt.Errorf("invalid filter mode: %v", filterMode) + } + + var whereExprs []string + var args []any + + // fetch autopilot id + var autopilotID int64 + if autopilot != "" { + if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", autopilot). + Scan(&autopilotID); errors.Is(err, dsql.ErrNoRows) { + return nil, api.ErrAutopilotNotFound + } else if err != nil { + return nil, fmt.Errorf("failed to fetch autopilot id: %w", err) + } + } + + // filter allowlist/blocklist + switch filterMode { + case api.HostFilterModeAllowed: + if hasAllowlist { + whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + case api.HostFilterModeBlocked: + if hasAllowlist { + whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if !hasAllowlist && !hasBlocklist { + // if neither an allowlist nor a blocklist exist, all hosts are + // allowed which means we return none + return []api.Host{}, nil + } + } + + // filter address + if addressContains != "" { + whereExprs = append(whereExprs, "h.net_address LIKE ?") + args = append(args, "%"+addressContains+"%") + } + + // filter public key + if len(keyIn) > 0 { + pubKeys := make([]any, len(keyIn)) + for i, pk := range keyIn { + pubKeys[i] = PublicKey(pk) + } + placeholders := strings.Repeat("?, ", len(keyIn)-1) + "?" + whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) + args = append(args, pubKeys...) + } + + // filter usability + whereApExpr := "" + if autopilot != "" { + whereApExpr = "AND hc.db_autopilot_id = ?" + } + switch usabilityMode { + case api.UsabilityFilterModeUsable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) + args = append(args, autopilotID) + case api.UsabilityFilterModeUnusable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) + args = append(args, autopilotID) + } + + // offset + limit + if limit == -1 { + limit = math.MaxInt64 + } + offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) + + // fetch stored data for each host + rows, err := tx.Query(ctx, "SELECT host_id, SUM(size) FROM contracts GROUP BY host_id") + if err != nil { + return nil, fmt.Errorf("failed to fetch stored data: %w", err) + } + defer rows.Close() + + storedDataMap := make(map[int64]uint64) + for rows.Next() { + var hostID int64 + var storedData uint64 + if err := rows.Scan(&hostID, &storedData); err != nil { + return nil, fmt.Errorf("failed to scan stored data: %w", err) + } + storedDataMap[hostID] = storedData + } + + // query hosts + var blockedExprs []string + if hasAllowlist { + blockedExprs = append(blockedExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + blockedExprs = append(blockedExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + var blockedExpr string + if len(blockedExprs) > 0 { + blockedExpr = strings.Join(blockedExprs, " OR ") + } else { + blockedExpr = "FALSE" + } + var whereExpr string + if len(whereExprs) > 0 { + whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") + } + rows, err = tx.Query(ctx, fmt.Sprintf(` + SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, + h.settings, h.total_scans, h.last_scan, h.last_scan_success, h.second_to_last_scan_success, + h.uptime, h.downtime, h.successful_interactions, h.failed_interactions, COALESCE(h.lost_sectors, 0), + h.scanned, h.resolved_addresses, %s + FROM hosts h + %s + %s + `, blockedExpr, whereExpr, offsetLimitStr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch hosts: %w", err) + } + defer rows.Close() + + var hosts []api.Host + for rows.Next() { + var h api.Host + var hostID int64 + var pte dsql.NullTime + var resolvedAddresses string + err := rows.Scan(&hostID, &h.KnownSince, &h.LastAnnouncement, (*PublicKey)(&h.PublicKey), + &h.NetAddress, (*PriceTable)(&h.PriceTable.HostPriceTable), &pte, + (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeNS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, + &h.Interactions.SecondToLastScanSuccess, &h.Interactions.Uptime, &h.Interactions.Downtime, + &h.Interactions.SuccessfulInteractions, &h.Interactions.FailedInteractions, &h.Interactions.LostSectors, + &h.Scanned, &resolvedAddresses, &h.Blocked, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan host: %w", err) + } + + if resolvedAddresses != "" { + h.ResolvedAddresses = strings.Split(resolvedAddresses, ",") + h.Subnets, err = utils.AddressesToSubnets(h.ResolvedAddresses) + if err != nil { + return nil, fmt.Errorf("failed to convert addresses to subnets: %w", err) + } + } + h.PriceTable.Expiry = pte.Time + h.StoredData = storedDataMap[hostID] + hosts = append(hosts, h) + } + + // query host checks + var apExpr string + if autopilot != "" { + apExpr = "WHERE ap.identifier = ?" + args = append(args, autopilot) + } + rows, err = tx.Query(ctx, fmt.Sprintf(` + SELECT h.public_key, ap.identifier, hc.usability_blocked, hc.usability_offline, hc.usability_low_score, hc.usability_redundant_ip, + hc.usability_gouging, usability_not_accepting_contracts, hc.usability_not_announced, hc.usability_not_completing_scan, + hc.score_age, hc.score_collateral, hc.score_interactions, hc.score_storage_remaining, hc.score_uptime, + hc.score_version, hc.score_prices, hc.gouging_contract_err, hc.gouging_download_err, hc.gouging_gouging_err, + hc.gouging_prune_err, hc.gouging_upload_err + FROM ( + SELECT h.id, h.public_key + FROM hosts h + %s + %s + ) AS h + INNER JOIN host_checks hc ON hc.db_host_id = h.id + INNER JOIN autopilots ap ON hc.db_autopilot_id = ap.id + %s + `, whereExpr, offsetLimitStr, apExpr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch host checks: %w", err) + } + defer rows.Close() + + hostChecks := make(map[types.PublicKey]map[string]api.HostCheck) + for rows.Next() { + var ap string + var pk PublicKey + var hc api.HostCheck + err := rows.Scan(&pk, &ap, &hc.Usability.Blocked, &hc.Usability.Offline, &hc.Usability.LowScore, &hc.Usability.RedundantIP, + &hc.Usability.Gouging, &hc.Usability.NotAcceptingContracts, &hc.Usability.NotAnnounced, &hc.Usability.NotCompletingScan, + &hc.Score.Age, &hc.Score.Collateral, &hc.Score.Interactions, &hc.Score.StorageRemaining, &hc.Score.Uptime, + &hc.Score.Version, &hc.Score.Prices, &hc.Gouging.ContractErr, &hc.Gouging.DownloadErr, &hc.Gouging.GougingErr, + &hc.Gouging.PruneErr, &hc.Gouging.UploadErr) + if err != nil { + return nil, fmt.Errorf("failed to scan host: %w", err) + } + if _, ok := hostChecks[types.PublicKey(pk)]; !ok { + hostChecks[types.PublicKey(pk)] = make(map[string]api.HostCheck) + } + hostChecks[types.PublicKey(pk)][ap] = hc + } + + // fill in hosts + for i := range hosts { + hosts[i].Checks = hostChecks[hosts[i].PublicKey] + } + return hosts, nil +} + func HostsForScanning(ctx context.Context, tx sql.Tx, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { if offset < 0 { return nil, ErrNegativeOffset @@ -1975,232 +2201,6 @@ func ResetLostSectors(ctx context.Context, tx sql.Tx, hk types.PublicKey) error return nil } -func SearchHosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - if offset < 0 { - return nil, ErrNegativeOffset - } - - var hasAllowlist, hasBlocklist bool - if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_allowlist_entries)").Scan(&hasAllowlist); err != nil { - return nil, fmt.Errorf("failed to check for allowlist: %w", err) - } else if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_blocklist_entries)").Scan(&hasBlocklist); err != nil { - return nil, fmt.Errorf("failed to check for blocklist: %w", err) - } - - // validate filterMode - switch filterMode { - case api.HostFilterModeAllowed: - case api.HostFilterModeBlocked: - case api.HostFilterModeAll: - default: - return nil, fmt.Errorf("invalid filter mode: %v", filterMode) - } - - var whereExprs []string - var args []any - - // fetch autopilot id - var autopilotID int64 - if autopilot != "" { - if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", autopilot). - Scan(&autopilotID); errors.Is(err, dsql.ErrNoRows) { - return nil, api.ErrAutopilotNotFound - } else if err != nil { - return nil, fmt.Errorf("failed to fetch autopilot id: %w", err) - } - } - - // filter allowlist/blocklist - switch filterMode { - case api.HostFilterModeAllowed: - if hasAllowlist { - whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if hasBlocklist { - whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - case api.HostFilterModeBlocked: - if hasAllowlist { - whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if hasBlocklist { - whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if !hasAllowlist && !hasBlocklist { - // if neither an allowlist nor a blocklist exist, all hosts are - // allowed which means we return none - return []api.Host{}, nil - } - } - - // filter address - if addressContains != "" { - whereExprs = append(whereExprs, "h.net_address LIKE ?") - args = append(args, "%"+addressContains+"%") - } - - // filter public key - if len(keyIn) > 0 { - pubKeys := make([]any, len(keyIn)) - for i, pk := range keyIn { - pubKeys[i] = PublicKey(pk) - } - placeholders := strings.Repeat("?, ", len(keyIn)-1) + "?" - whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) - args = append(args, pubKeys...) - } - - // filter usability - whereApExpr := "" - if autopilot != "" { - whereApExpr = "AND hc.db_autopilot_id = ?" - } - switch usabilityMode { - case api.UsabilityFilterModeUsable: - whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) - args = append(args, autopilotID) - case api.UsabilityFilterModeUnusable: - whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) - args = append(args, autopilotID) - } - - // offset + limit - if limit == -1 { - limit = math.MaxInt64 - } - offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) - - // fetch stored data for each host - rows, err := tx.Query(ctx, "SELECT host_id, SUM(size) FROM contracts GROUP BY host_id") - if err != nil { - return nil, fmt.Errorf("failed to fetch stored data: %w", err) - } - defer rows.Close() - - storedDataMap := make(map[int64]uint64) - for rows.Next() { - var hostID int64 - var storedData uint64 - if err := rows.Scan(&hostID, &storedData); err != nil { - return nil, fmt.Errorf("failed to scan stored data: %w", err) - } - storedDataMap[hostID] = storedData - } - - // query hosts - var blockedExprs []string - if hasAllowlist { - blockedExprs = append(blockedExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if hasBlocklist { - blockedExprs = append(blockedExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - var blockedExpr string - if len(blockedExprs) > 0 { - blockedExpr = strings.Join(blockedExprs, " OR ") - } else { - blockedExpr = "FALSE" - } - var whereExpr string - if len(whereExprs) > 0 { - whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") - } - rows, err = tx.Query(ctx, fmt.Sprintf(` - SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, - h.settings, h.total_scans, h.last_scan, h.last_scan_success, h.second_to_last_scan_success, - h.uptime, h.downtime, h.successful_interactions, h.failed_interactions, COALESCE(h.lost_sectors, 0), - h.scanned, h.resolved_addresses, %s - FROM hosts h - %s - %s - `, blockedExpr, whereExpr, offsetLimitStr), args...) - if err != nil { - return nil, fmt.Errorf("failed to fetch hosts: %w", err) - } - defer rows.Close() - - var hosts []api.Host - for rows.Next() { - var h api.Host - var hostID int64 - var pte dsql.NullTime - var resolvedAddresses string - err := rows.Scan(&hostID, &h.KnownSince, &h.LastAnnouncement, (*PublicKey)(&h.PublicKey), - &h.NetAddress, (*PriceTable)(&h.PriceTable.HostPriceTable), &pte, - (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeNS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, - &h.Interactions.SecondToLastScanSuccess, &h.Interactions.Uptime, &h.Interactions.Downtime, - &h.Interactions.SuccessfulInteractions, &h.Interactions.FailedInteractions, &h.Interactions.LostSectors, - &h.Scanned, &resolvedAddresses, &h.Blocked, - ) - if err != nil { - return nil, fmt.Errorf("failed to scan host: %w", err) - } - - if resolvedAddresses != "" { - h.ResolvedAddresses = strings.Split(resolvedAddresses, ",") - h.Subnets, err = utils.AddressesToSubnets(h.ResolvedAddresses) - if err != nil { - return nil, fmt.Errorf("failed to convert addresses to subnets: %w", err) - } - } - h.PriceTable.Expiry = pte.Time - h.StoredData = storedDataMap[hostID] - hosts = append(hosts, h) - } - - // query host checks - var apExpr string - if autopilot != "" { - apExpr = "WHERE ap.identifier = ?" - args = append(args, autopilot) - } - rows, err = tx.Query(ctx, fmt.Sprintf(` - SELECT h.public_key, ap.identifier, hc.usability_blocked, hc.usability_offline, hc.usability_low_score, hc.usability_redundant_ip, - hc.usability_gouging, usability_not_accepting_contracts, hc.usability_not_announced, hc.usability_not_completing_scan, - hc.score_age, hc.score_collateral, hc.score_interactions, hc.score_storage_remaining, hc.score_uptime, - hc.score_version, hc.score_prices, hc.gouging_contract_err, hc.gouging_download_err, hc.gouging_gouging_err, - hc.gouging_prune_err, hc.gouging_upload_err - FROM ( - SELECT h.id, h.public_key - FROM hosts h - %s - %s - ) AS h - INNER JOIN host_checks hc ON hc.db_host_id = h.id - INNER JOIN autopilots ap ON hc.db_autopilot_id = ap.id - %s - `, whereExpr, offsetLimitStr, apExpr), args...) - if err != nil { - return nil, fmt.Errorf("failed to fetch host checks: %w", err) - } - defer rows.Close() - - hostChecks := make(map[types.PublicKey]map[string]api.HostCheck) - for rows.Next() { - var ap string - var pk PublicKey - var hc api.HostCheck - err := rows.Scan(&pk, &ap, &hc.Usability.Blocked, &hc.Usability.Offline, &hc.Usability.LowScore, &hc.Usability.RedundantIP, - &hc.Usability.Gouging, &hc.Usability.NotAcceptingContracts, &hc.Usability.NotAnnounced, &hc.Usability.NotCompletingScan, - &hc.Score.Age, &hc.Score.Collateral, &hc.Score.Interactions, &hc.Score.StorageRemaining, &hc.Score.Uptime, - &hc.Score.Version, &hc.Score.Prices, &hc.Gouging.ContractErr, &hc.Gouging.DownloadErr, &hc.Gouging.GougingErr, - &hc.Gouging.PruneErr, &hc.Gouging.UploadErr) - if err != nil { - return nil, fmt.Errorf("failed to scan host: %w", err) - } - if _, ok := hostChecks[types.PublicKey(pk)]; !ok { - hostChecks[types.PublicKey(pk)] = make(map[string]api.HostCheck) - } - hostChecks[types.PublicKey(pk)][ap] = hc - } - - // fill in hosts - for i := range hosts { - hosts[i].Checks = hostChecks[hosts[i].PublicKey] - } - return hosts, nil -} - func Setting(ctx context.Context, tx sql.Tx, key string) (string, error) { var value string err := tx.QueryRow(ctx, "SELECT value FROM settings WHERE `key` = ?", key).Scan((*BusSetting)(&value)) diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 08ff0010e..9f4ea97a5 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -403,6 +403,10 @@ func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { return ssql.HostBlocklist(ctx, tx) } +func (tx *MainDatabaseTx) Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { + return ssql.Hosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) +} + func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) } @@ -756,10 +760,6 @@ func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md return md, nil } -func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) -} - func (tx *MainDatabaseTx) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { return ssql.SearchObjects(ctx, tx, bucket, substring, offset, limit) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index b72ec5e8c..6ee1f3c04 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -392,6 +392,10 @@ func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { return ssql.HostBlocklist(ctx, tx) } +func (tx *MainDatabaseTx) Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { + return ssql.Hosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) +} + func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) } @@ -757,10 +761,6 @@ func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md return md, nil } -func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) -} - func (tx *MainDatabaseTx) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { return ssql.SearchObjects(ctx, tx, bucket, substring, offset, limit) } From e6d837cf3bc9b3266090618fb2c682270fbdb15c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 4 Sep 2024 13:40:51 +0200 Subject: [PATCH 42/98] bus: fix analyse --- autopilot/autopilot.go | 6 ++-- autopilot/contractor/contractor.go | 4 +-- autopilot/workerpool.go | 1 - bus/bus.go | 9 ----- bus/client/contracts.go | 6 ++++ bus/client/wallet.go | 5 +-- internal/test/e2e/cluster_test.go | 55 ++++++++++++++++++------------ worker/client/rhp.go | 7 ---- 8 files changed, 49 insertions(+), 44 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 41ae0ded9..2d07cd928 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -11,6 +11,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" @@ -41,6 +42,7 @@ type Bus interface { // contracts AncestorContracts(ctx context.Context, id types.FileContractID, minStartHeight uint64) ([]api.ArchivedContract, error) ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error + BroadcastRevision(ctx context.Context, contractID types.FileContractID) error Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) Contracts(ctx context.Context, opts api.ContractsOpts) (contracts []api.ContractMetadata, err error) FileContractTax(ctx context.Context, payout types.Currency) (types.Currency, error) @@ -84,7 +86,7 @@ type Bus interface { // wallet Wallet(ctx context.Context) (api.WalletResponse, error) - WalletPending(ctx context.Context) (resp []types.Transaction, err error) + WalletPending(ctx context.Context) (resp []wallet.Event, err error) WalletRedistribute(ctx context.Context, outputs int, amount types.Currency) (ids []types.TransactionID, err error) } @@ -609,7 +611,7 @@ func (ap *Autopilot) performWalletMaintenance(ctx context.Context) error { } for _, txn := range pending { for _, mTxnID := range ap.maintenanceTxnIDs { - if mTxnID == txn.ID() { + if mTxnID == types.TransactionID(txn.ID) { l.Debugf("wallet maintenance skipped, pending transaction found with id %v", mTxnID) return nil } diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index a886f7cc2..22374a1bb 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -83,6 +83,7 @@ const ( type Bus interface { AncestorContracts(ctx context.Context, id types.FileContractID, minStartHeight uint64) ([]api.ArchivedContract, error) ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error + BroadcastRevision(ctx context.Context, contractID types.FileContractID) error ConsensusState(ctx context.Context) (api.ConsensusState, error) Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) Contracts(ctx context.Context, opts api.ContractsOpts) (contracts []api.ContractMetadata, err error) @@ -98,7 +99,6 @@ type Bus interface { type Worker interface { Contracts(ctx context.Context, hostTimeout time.Duration) (api.ContractsResponse, error) - RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) } @@ -433,7 +433,7 @@ func (c *Contractor) broadcastRevisions(ctx context.Context, w Worker, contracts // broadcast revision ctx, cancel := context.WithTimeout(ctx, timeoutBroadcastRevision) - err := w.RHPBroadcast(ctx, contract.ID) + err := c.bus.BroadcastRevision(ctx, contract.ID) cancel() if utils.IsErr(err, errors.New("transaction has a file contract with an outdated revision number")) { continue // don't log - revision was already broadcasted diff --git a/autopilot/workerpool.go b/autopilot/workerpool.go index 11bcfa09b..871f1babc 100644 --- a/autopilot/workerpool.go +++ b/autopilot/workerpool.go @@ -18,7 +18,6 @@ type Worker interface { ID(ctx context.Context) (string, error) MigrateSlab(ctx context.Context, s object.Slab, set string) (api.MigrateSlabResponse, error) - RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) } diff --git a/bus/bus.go b/bus/bus.go index 2c8a264e6..ce600424c 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -510,15 +510,6 @@ func (b *Bus) Handler() http.Handler { "GET /wallet/pending": b.walletPendingHandler, "POST /wallet/redistribute": b.walletRedistributeHandler, "POST /wallet/send": b.walletSendSiacoinsHandler, - // "GET /wallet": b.walletHandler, - // "POST /wallet/discard": b.walletDiscardHandler, - // "POST /wallet/fund": b.walletFundHandler, - // "GET /wallet/outputs": b.walletOutputsHandler, - // "GET /wallet/pending": b.walletPendingHandler, - // "POST /wallet/redistribute": b.walletRedistributeHandler, - // "POST /wallet/send": b.walletSendSiacoinsHandler, - // "POST /wallet/sign": b.walletSignHandler, - // "GET /wallet/transactions": b.walletTransactionsHandler, "GET /webhooks": b.webhookHandlerGet, "POST /webhooks": b.webhookHandlerPost, diff --git a/bus/client/contracts.go b/bus/client/contracts.go index 6929c8af9..b0b58de69 100644 --- a/bus/client/contracts.go +++ b/bus/client/contracts.go @@ -11,6 +11,12 @@ import ( "go.sia.tech/renterd/api" ) +// BroadcastRevision broadcasts the latest revision for a contract. +func (c *Client) BroadcastRevision(ctx context.Context, contractID types.FileContractID) (err error) { + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/contract/%s/broadcast", contractID), nil, nil) + return +} + // AddContract adds the provided contract to the metadata store. func (c *Client) AddContract(ctx context.Context, contract rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, state string) (added api.ContractMetadata, err error) { err = c.c.WithContext(ctx).POST(fmt.Sprintf("/contract/%s", contract.ID()), api.ContractAddRequest{ diff --git a/bus/client/wallet.go b/bus/client/wallet.go index f6d958dce..00c612515 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -7,6 +7,7 @@ import ( "net/url" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/api" ) @@ -29,7 +30,7 @@ func (c *Client) Wallet(ctx context.Context) (resp api.WalletResponse, err error // WalletPending returns the txpool transactions that are relevant to the // wallet. -func (c *Client) WalletPending(ctx context.Context) (resp []types.Transaction, err error) { +func (c *Client) WalletPending(ctx context.Context) (resp []wallet.Event, err error) { err = c.c.WithContext(ctx).GET("/wallet/pending", &resp) return } @@ -48,7 +49,7 @@ func (c *Client) WalletRedistribute(ctx context.Context, outputs int, amount typ } // WalletEvents returns all events relevant to the wallet. -func (c *Client) WalletEvents(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []api.Transaction, err error) { +func (c *Client) WalletEvents(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []wallet.Event, err error) { c.c.Custom("GET", "/wallet/events", nil, &resp) values := url.Values{} diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 3b494dc9a..d325b1476 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1089,7 +1089,6 @@ func TestContractApplyChainUpdates(t *testing.T) { defer cluster.Shutdown() // convenience variables - w := cluster.Worker b := cluster.Bus tt := cluster.tt @@ -1111,7 +1110,7 @@ func TestContractApplyChainUpdates(t *testing.T) { } // broadcast the revision for each contract - tt.OK(w.RHPBroadcast(context.Background(), contract.ID)) + tt.OK(b.BroadcastRevision(context.Background(), contract.ID)) cluster.MineBlocks(1) // check the revision height was updated. @@ -1789,18 +1788,18 @@ func TestWallet(t *testing.T) { tt := cluster.tt // Check wallet info is sane after startup. - wallet, err := b.Wallet(context.Background()) + wr, err := b.Wallet(context.Background()) tt.OK(err) - if wallet.Confirmed.IsZero() { + if wr.Confirmed.IsZero() { t.Fatal("wallet confirmed balance should not be zero") } - if !wallet.Spendable.Equals(wallet.Confirmed) { + if !wr.Spendable.Equals(wr.Confirmed) { t.Fatal("wallet spendable balance should match confirmed") } - if !wallet.Unconfirmed.IsZero() { + if !wr.Unconfirmed.IsZero() { t.Fatal("wallet unconfirmed balance should be zero") } - if wallet.Address == (types.Address{}) { + if wr.Address == (types.Address{}) { t.Fatal("wallet address should be set") } @@ -1815,9 +1814,14 @@ func TestWallet(t *testing.T) { var minerFee types.Currency for _, txn := range txns { - if txn.ID == txnID { - for _, fee := range txn.Raw.MinerFees { - minerFee = minerFee.Add(fee) + if types.TransactionID(txn.ID) == txnID { + switch txn := txn.Data.(type) { + case *wallet.EventV1Transaction: + for _, fee := range txn.Transaction.MinerFees { + minerFee = minerFee.Add(fee) + } + case *wallet.EventV2Transaction: + minerFee = minerFee.Add(txn.MinerFee) } } } @@ -1830,18 +1834,18 @@ func TestWallet(t *testing.T) { tt.Retry(600, 100*time.Millisecond, func() error { updated, err := b.Wallet(context.Background()) tt.OK(err) - if !updated.Confirmed.Equals(wallet.Confirmed) { - return fmt.Errorf("wallet confirmed balance should not have changed: %v %v", updated.Confirmed, wallet.Confirmed) + if !updated.Confirmed.Equals(wr.Confirmed) { + return fmt.Errorf("wr confirmed balance should not have changed: %v %v", updated.Confirmed, wr.Confirmed) } // The diffs of the spendable balance and unconfirmed balance should add up // to the amount of money sent as well as the miner fees used. - spendableDiff := wallet.Spendable.Sub(updated.Spendable) + spendableDiff := wr.Spendable.Sub(updated.Spendable) if updated.Unconfirmed.Cmp(spendableDiff) > 0 { t.Fatalf("unconfirmed balance can't be greater than the difference in spendable balance here: \nconfirmed %v (%v) - >%v (%v) \nunconfirmed %v (%v) -> %v (%v) \nspendable %v (%v) -> %v (%v) \nfee %v (%v)", - wallet.Confirmed, wallet.Confirmed.ExactString(), updated.Confirmed, updated.Confirmed.ExactString(), - wallet.Unconfirmed, wallet.Unconfirmed.ExactString(), updated.Unconfirmed, updated.Unconfirmed.ExactString(), - wallet.Spendable, wallet.Spendable.ExactString(), updated.Spendable, updated.Spendable.ExactString(), + wr.Confirmed, wr.Confirmed.ExactString(), updated.Confirmed, updated.Confirmed.ExactString(), + wr.Unconfirmed, wr.Unconfirmed.ExactString(), updated.Unconfirmed, updated.Unconfirmed.ExactString(), + wr.Spendable, wr.Spendable.ExactString(), updated.Spendable, updated.Spendable.ExactString(), minerFee, minerFee.ExactString()) } withdrawnAmt := spendableDiff.Sub(updated.Unconfirmed) @@ -2540,11 +2544,20 @@ func TestWalletRedistribute(t *testing.T) { utxos := make(map[types.SiacoinOutputID]struct{}) for _, txn := range txns { - for i := range txn.Raw.SiacoinOutputs { - utxos[txn.Raw.SiacoinOutputID(i)] = struct{}{} - } - for _, sci := range txn.Raw.SiacoinInputs { - delete(utxos, sci.ParentID) + if v1Txn, ok := txn.Data.(*wallet.EventV1Transaction); ok { + for i := range v1Txn.SpentSiacoinElements { + utxos[v1Txn.Transaction.SiacoinOutputID(i)] = struct{}{} + } + for _, sci := range v1Txn.Transaction.SiacoinInputs { + delete(utxos, sci.ParentID) + } + } else if v2Txn, ok := txn.Data.(*wallet.EventV1Transaction); ok { + for i := range v2Txn.SpentSiacoinElements { + utxos[v2Txn.Transaction.SiacoinOutputID(i)] = struct{}{} + } + for _, sci := range v2Txn.Transaction.SiacoinInputs { + delete(utxos, sci.ParentID) + } } } if cnt := len(utxos); cnt != 5 { diff --git a/worker/client/rhp.go b/worker/client/rhp.go index bb923b705..5aeeee8bf 100644 --- a/worker/client/rhp.go +++ b/worker/client/rhp.go @@ -2,19 +2,12 @@ package client import ( "context" - "fmt" "time" "go.sia.tech/core/types" "go.sia.tech/renterd/api" ) -// RHPBroadcast broadcasts the latest revision for a contract. -func (c *Client) RHPBroadcast(ctx context.Context, contractID types.FileContractID) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/rhp/contract/%s/broadcast", contractID), nil, nil) - return -} - // RHPPriceTable fetches a price table for a host. func (c *Client) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (pt api.HostPriceTable, err error) { req := api.RHPPriceTableRequest{ From 6b466248d0fa188d7fa12b25e80dc0ce4acf3231 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 4 Sep 2024 15:03:51 +0200 Subject: [PATCH 43/98] e2e: fix TestWalletEvents --- bus/client/client.go | 26 -------------------------- bus/client/wallet.go | 15 +-------------- internal/test/e2e/cluster_test.go | 2 +- 3 files changed, 2 insertions(+), 41 deletions(-) diff --git a/bus/client/client.go b/bus/client/client.go index b082e5d9e..8ca1bf81e 100644 --- a/bus/client/client.go +++ b/bus/client/client.go @@ -1,11 +1,6 @@ package client import ( - "encoding/json" - "errors" - "io" - "net/http" - "go.sia.tech/jape" "go.sia.tech/renterd/api" ) @@ -28,24 +23,3 @@ func (c *Client) State() (state api.BusStateResponse, err error) { err = c.c.GET("/state", &state) return } - -func (c *Client) do(req *http.Request, resp interface{}) error { - req.Header.Set("Content-Type", "application/json") - if c.c.Password != "" { - req.SetBasicAuth("", c.c.Password) - } - r, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer io.Copy(io.Discard, r.Body) - defer r.Body.Close() - if !(200 <= r.StatusCode && r.StatusCode < 300) { - err, _ := io.ReadAll(r.Body) - return errors.New(string(err)) - } - if resp == nil { - return nil - } - return json.NewDecoder(r.Body).Decode(resp) -} diff --git a/bus/client/wallet.go b/bus/client/wallet.go index 00c612515..5314ade52 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -2,8 +2,6 @@ package client import ( "context" - "fmt" - "net/http" "net/url" "go.sia.tech/core/types" @@ -50,21 +48,10 @@ func (c *Client) WalletRedistribute(ctx context.Context, outputs int, amount typ // WalletEvents returns all events relevant to the wallet. func (c *Client) WalletEvents(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []wallet.Event, err error) { - c.c.Custom("GET", "/wallet/events", nil, &resp) - values := url.Values{} for _, opt := range opts { opt(values) } - u, err := url.Parse(fmt.Sprintf("%v/wallet/transactions", c.c.BaseURL)) - if err != nil { - panic(err) - } - u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) - if err != nil { - panic(err) - } - err = c.do(req, &resp) + err = c.c.WithContext(ctx).GET("/wallet/events?"+values.Encode(), &resp) return } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 772a374c6..436ce480b 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1568,7 +1568,7 @@ func TestUnconfirmedContractArchival(t *testing.T) { }) } -func TestWalletTransactions(t *testing.T) { +func TestWalletEvents(t *testing.T) { if testing.Short() { t.SkipNow() } From 4347a87f273d85b4631c22577fc652929cc99474 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 4 Sep 2024 15:28:02 +0200 Subject: [PATCH 44/98] e2e: fix TestWallet --- internal/test/e2e/cluster_test.go | 34 +++++++++++++++---------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 436ce480b..338655549 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1803,30 +1803,30 @@ func TestWallet(t *testing.T) { t.Fatal("wallet address should be set") } - // Send 1 SC to an address outside our wallet. We manually do this to be in - // control of the miner fees. + // Send 1 SC to an address outside our wallet. sendAmt := types.HastingsPerSiacoin - txnID, err := b.SendSiacoins(context.Background(), types.VoidAddress, sendAmt, false) + _, err = b.SendSiacoins(context.Background(), types.Address{1, 2, 3}, sendAmt, false) tt.OK(err) txns, err := b.WalletEvents(context.Background()) tt.OK(err) - var minerFee types.Currency - for _, txn := range txns { - if types.TransactionID(txn.ID) == txnID { - switch txn := txn.Data.(type) { - case *wallet.EventV1Transaction: - for _, fee := range txn.Transaction.MinerFees { - minerFee = minerFee.Add(fee) - } - case *wallet.EventV2Transaction: - minerFee = minerFee.Add(txn.MinerFee) - } - } + txns, err = b.WalletPending(context.Background()) + tt.OK(err) + if len(txns) != 1 { + t.Fatalf("expected 1 txn got %v", len(txns)) } - if minerFee.IsZero() { - t.Fatal("miner fee should not be zero") + + var minerFee types.Currency + switch txn := txns[0].Data.(type) { + case wallet.EventV1Transaction: + for _, fee := range txn.Transaction.MinerFees { + minerFee = minerFee.Add(fee) + } + case wallet.EventV2Transaction: + minerFee = txn.MinerFee + default: + t.Fatalf("unexpected event %T", txn) } // The wallet should still have the same confirmed balance, a lower From 5d35cb056c9ad1c0ed0ad3f8f6a910af251ef48c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 4 Sep 2024 16:19:39 +0200 Subject: [PATCH 45/98] e2e: fix TestWalletRedistribute --- bus/routes.go | 16 +++++++--- internal/test/e2e/cluster_test.go | 49 +++++++++++++++++-------------- 2 files changed, 39 insertions(+), 26 deletions(-) diff --git a/bus/routes.go b/bus/routes.go index 90c0de3e2..fdff08a78 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -377,14 +377,22 @@ func (b *Bus) walletRedistributeHandler(jc jape.Context) { return } - available, err := b.w.SpendableOutputs() + spendableOutputs, err := b.w.SpendableOutputs() if jc.Check("couldn't fetch spendable outputs", err) != nil { return - } else if len(available) >= wfr.Outputs { - b.logger.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), wfr.Outputs) + } + var available int + for _, so := range spendableOutputs { + if so.SiacoinOutput.Value.Cmp(wfr.Amount) >= 0 { + available++ + } + } + if available >= wfr.Outputs { + b.logger.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", available, wfr.Outputs) + jc.Encode([]types.TransactionID{}) return } - wantedOutputs := wfr.Outputs - len(available) + wantedOutputs := wfr.Outputs - available var ids []types.TransactionID if state := b.cm.TipState(); state.Index.Height < state.Network.HardforkV2.AllowHeight { diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 338655549..a781efd38 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2529,46 +2529,51 @@ func TestWalletRedistribute(t *testing.T) { }) defer cluster.Shutdown() - // redistribute into 5 outputs - _, err := cluster.Bus.WalletRedistribute(context.Background(), 5, types.Siacoins(10)) + // redistribute into 2 outputs of 500KS each + numOutputs := 2 + outputAmt := types.Siacoins(500e3) + txnSet, err := cluster.Bus.WalletRedistribute(context.Background(), numOutputs, outputAmt) if err != nil { t.Fatal(err) + } else if len(txnSet) == 0 { + t.Fatal("nothing happened") } cluster.MineBlocks(1) // assert we have 5 outputs with 10 SC txns, err := cluster.Bus.WalletEvents(context.Background()) - if err != nil { - t.Fatal(err) - } + cluster.tt.OK(err) - utxos := make(map[types.SiacoinOutputID]struct{}) + nOutputs := 0 for _, txn := range txns { - if v1Txn, ok := txn.Data.(*wallet.EventV1Transaction); ok { - for i := range v1Txn.SpentSiacoinElements { - utxos[v1Txn.Transaction.SiacoinOutputID(i)] = struct{}{} - } - for _, sci := range v1Txn.Transaction.SiacoinInputs { - delete(utxos, sci.ParentID) - } - } else if v2Txn, ok := txn.Data.(*wallet.EventV1Transaction); ok { - for i := range v2Txn.SpentSiacoinElements { - utxos[v2Txn.Transaction.SiacoinOutputID(i)] = struct{}{} + switch txn := txn.Data.(type) { + case wallet.EventV1Transaction: + for _, sco := range txn.Transaction.SiacoinOutputs { + if sco.Value.Equals(types.Siacoins(500e3)) { + nOutputs++ + } } - for _, sci := range v2Txn.Transaction.SiacoinInputs { - delete(utxos, sci.ParentID) + case wallet.EventV2Transaction: + for _, sco := range txn.SiacoinOutputs { + if sco.Value.Equals(types.Siacoins(500e3)) { + nOutputs++ + } } + case wallet.EventPayout: + default: + t.Fatalf("unexpected transaction type %T", txn) } } - if cnt := len(utxos); cnt != 5 { + if cnt := nOutputs; cnt != numOutputs { t.Fatalf("expected 5 outputs with 10 SC, got %v", cnt) } // assert redistributing into 3 outputs succeeds, used to fail because we // were broadcasting an empty transaction set - _, err = cluster.Bus.WalletRedistribute(context.Background(), 3, types.Siacoins(10)) - if err != nil { - t.Fatal(err) + txnSet, err = cluster.Bus.WalletRedistribute(context.Background(), nOutputs, outputAmt) + cluster.tt.OK(err) + if len(txnSet) != 0 { + t.Fatal("txnSet should be empty") } } From b6d738f6d3da56b407f638d88dc813ff9e24b957 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 4 Sep 2024 16:56:15 +0200 Subject: [PATCH 46/98] bus: address comments --- api/object.go | 15 ++++----------- bus/bus.go | 18 +++++++++--------- bus/client/objects.go | 24 ++++++++++++------------ bus/routes.go | 8 ++++---- stores/sql/main.go | 7 ++++--- worker/client/client.go | 12 ++++++------ 6 files changed, 39 insertions(+), 45 deletions(-) diff --git a/api/object.go b/api/object.go index f3d345c03..899b181e1 100644 --- a/api/object.go +++ b/api/object.go @@ -80,13 +80,6 @@ type ( // well ObjectUserMetadata map[string]string - // ObjectsResponse is the response type for the /bus/objects endpoint. - ObjectsResponse struct { - HasMore bool `json:"hasMore"` - Entries []ObjectMetadata `json:"entries,omitempty"` - Object *Object `json:"object,omitempty"` - } - // GetObjectResponse is the response type for the GET /worker/object endpoint. GetObjectResponse struct { Content io.ReadCloser `json:"content"` @@ -187,10 +180,10 @@ type ( // CopyObjectsRequest is the request type for the /bus/objects/copy endpoint. CopyObjectsRequest struct { SourceBucket string `json:"sourceBucket"` - SourcePath string `json:"sourcePath"` + SourceKey string `json:"sourcePath"` DestinationBucket string `json:"destinationBucket"` - DestinationPath string `json:"destinationPath"` + DestinationKey string `json:"destinationPath"` MimeType string `json:"mimeType"` Metadata ObjectUserMetadata `json:"metadata"` @@ -353,6 +346,6 @@ func FormatETag(eTag string) string { return fmt.Sprintf("%q", eTag) } -func ObjectPathEscape(path string) string { - return url.PathEscape(strings.TrimPrefix(path, "/")) +func ObjectKeyEscape(key string) string { + return url.PathEscape(strings.TrimPrefix(key, "/")) } diff --git a/bus/bus.go b/bus/bus.go index 7e5a0ee0a..8feca7219 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -232,22 +232,22 @@ type ( ListBuckets(_ context.Context) ([]api.Bucket, error) UpdateBucketPolicy(ctx context.Context, bucketName string, policy api.BucketPolicy) error - CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) + CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) ListObjects(ctx context.Context, bucketName, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) - Object(ctx context.Context, bucketName, path string) (api.Object, error) - ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) + Object(ctx context.Context, bucketName, key string) (api.Object, error) + ObjectMetadata(ctx context.Context, bucketName, key string) (api.Object, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) - RemoveObject(ctx context.Context, bucketName, path string) error + RemoveObject(ctx context.Context, bucketName, key string) error RemoveObjects(ctx context.Context, bucketName, prefix string) error RenameObject(ctx context.Context, bucketName, from, to string, force bool) error RenameObjects(ctx context.Context, bucketName, from, to string, force bool) error - UpdateObject(ctx context.Context, bucketName, path, contractSet, ETag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error + UpdateObject(ctx context.Context, bucketName, key, contractSet, ETag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error - AbortMultipartUpload(ctx context.Context, bucketName, path string, uploadID string) (err error) - AddMultipartPart(ctx context.Context, bucketName, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) - CompleteMultipartUpload(ctx context.Context, bucketName, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) - CreateMultipartUpload(ctx context.Context, bucketName, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) + AbortMultipartUpload(ctx context.Context, bucketName, key string, uploadID string) (err error) + AddMultipartPart(ctx context.Context, bucketName, key, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) + CompleteMultipartUpload(ctx context.Context, bucketName, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) + CreateMultipartUpload(ctx context.Context, bucketName, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, _ error) MultipartUploads(ctx context.Context, bucketName, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) MultipartUploadParts(ctx context.Context, bucketName, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) diff --git a/bus/client/objects.go b/bus/client/objects.go index 6a8482160..1a1f7fc0b 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -11,7 +11,7 @@ import ( // AddObject stores the provided object under the given path. func (c *Client) AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) { - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) err = c.c.WithContext(ctx).PUT(fmt.Sprintf("/objects/%s", path), api.AddObjectRequest{ Bucket: bucket, ContractSet: contractSet, @@ -25,37 +25,37 @@ func (c *Client) AddObject(ctx context.Context, bucket, path, contractSet string // CopyObject copies the object from the source bucket and path to the // destination bucket and path. -func (c *Client) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) { +func (c *Client) CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) { err = c.c.WithContext(ctx).POST("/objects/copy", api.CopyObjectsRequest{ SourceBucket: srcBucket, DestinationBucket: dstBucket, - SourcePath: srcPath, - DestinationPath: dstPath, + SourceKey: srcKey, + DestinationKey: dstKey, MimeType: opts.MimeType, Metadata: opts.Metadata, }, &om) return } -// DeleteObject either deletes the object at the given path or if batch=true -// deletes all objects that start with the given path. -func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) { +// DeleteObject either deletes the object at the given key or if batch=true +// deletes all objects that start with the given key. +func (c *Client) DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) (err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectPathEscape(path) - err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), path)) + key = api.ObjectKeyEscape(key) + err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), key)) return } -// Objects returns the object at given path. +// Objects returns the object at given key. func (c *Client) Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (res api.Object, err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - key = api.ObjectPathEscape(key) + key = api.ObjectKeyEscape(key) key += "?" + values.Encode() err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", key), &res) @@ -68,7 +68,7 @@ func (c *Client) Objects(ctx context.Context, bucket string, prefix string, opts values.Set("bucket", bucket) opts.Apply(values) - prefix = api.ObjectPathEscape(prefix) + prefix = api.ObjectKeyEscape(prefix) prefix += "?" + values.Encode() err = c.c.WithContext(ctx).GET(fmt.Sprintf("/listobjects/%s", prefix), &resp) diff --git a/bus/routes.go b/bus/routes.go index 8bcbf12c4..dbfb71d68 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1113,7 +1113,7 @@ func (b *Bus) contractsAllHandlerDELETE(jc jape.Context) { } func (b *Bus) objectHandlerGET(jc jape.Context) { - path := jc.PathParam("key") + key := jc.PathParam("key") bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return @@ -1127,9 +1127,9 @@ func (b *Bus) objectHandlerGET(jc jape.Context) { var err error if onlymetadata { - o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, path) + o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, key) } else { - o, err = b.ms.Object(jc.Request.Context(), bucket, path) + o, err = b.ms.Object(jc.Request.Context(), bucket, key) } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -1191,7 +1191,7 @@ func (b *Bus) objectsCopyHandlerPOST(jc jape.Context) { if jc.Decode(&orr) != nil { return } - om, err := b.ms.CopyObject(jc.Request.Context(), orr.SourceBucket, orr.DestinationBucket, orr.SourcePath, orr.DestinationPath, orr.MimeType, orr.Metadata) + om, err := b.ms.CopyObject(jc.Request.Context(), orr.SourceBucket, orr.DestinationBucket, orr.SourceKey, orr.DestinationKey, orr.MimeType, orr.Metadata) if jc.Check("couldn't copy object", err) != nil { return } diff --git a/stores/sql/main.go b/stores/sql/main.go index 04111f29a..82d1da8f0 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -1014,11 +1014,12 @@ func orderByObject(sortBy, sortDir string) (orderByExprs []string, _ error) { } func ListObjects(ctx context.Context, tx Tx, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { - if delim == "" { + switch delim { + case "": resp, err = listObjectsNoDelim(ctx, tx, bucket, prefix, substring, sortBy, sortDir, marker, limit) - } else if delim == "/" { + case "/": resp, err = listObjectsSlashDelim(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) - } else { + default: err = fmt.Errorf("unsupported delimiter: '%s'", delim) } return diff --git a/worker/client/client.go b/worker/client/client.go index 6ea6172cd..6f950dfd8 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -62,7 +62,7 @@ func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), path)) return } @@ -73,7 +73,7 @@ func (c *Client) DownloadObject(ctx context.Context, w io.Writer, bucket, path s return errors.New("the given path is a directory, use ObjectEntries instead") } - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) body, _, err := c.object(ctx, bucket, path, opts) if err != nil { return err @@ -96,7 +96,7 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) opts.Apply(values) - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) path += "?" + values.Encode() // TODO: support HEAD in jape client @@ -134,7 +134,7 @@ func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.Do return nil, errors.New("the given path is a directory, use ObjectEntries instead") } - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) body, header, err := c.object(ctx, bucket, path, opts) if err != nil { return nil, err @@ -185,7 +185,7 @@ func (c *Client) State() (state api.WorkerStateResponse, err error) { // UploadMultipartUploadPart uploads part of the data for a multipart upload. func (c *Client) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) { - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) c.c.Custom("PUT", fmt.Sprintf("/multipart/%s", path), []byte{}, nil) values := make(url.Values) @@ -224,7 +224,7 @@ func (c *Client) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc // UploadObject uploads the data in r, creating an object at the given path. func (c *Client) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) c.c.Custom("PUT", fmt.Sprintf("/objects/%s", path), []byte{}, nil) values := make(url.Values) From 1debed9b74fe4f8389b5b6c651b695554f4bbd78 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 4 Sep 2024 17:23:06 +0200 Subject: [PATCH 47/98] api: call object name's 'key' instead of 'path' or 'name' --- api/multipart.go | 30 ++--- api/object.go | 4 +- api/slab.go | 10 +- autopilot/migrator.go | 24 ++-- bus/client/multipart-upload.go | 30 +++-- bus/routes.go | 16 +-- internal/test/e2e/cluster_test.go | 104 ++++++++--------- internal/test/e2e/migrations_test.go | 4 +- object/object.go | 2 +- stores/metadata.go | 24 ++-- stores/metadata_test.go | 162 +++++++++++++-------------- stores/multipart.go | 18 +-- stores/slabbuffer.go | 6 +- stores/sql/database.go | 2 +- stores/sql/main.go | 12 +- stores/sql/mysql/main.go | 10 +- stores/sql/sqlite/main.go | 10 +- stores/sql_test.go | 6 +- worker/alerts.go | 4 +- worker/bench_test.go | 2 +- worker/client/client.go | 56 ++++----- worker/mocks_test.go | 30 ++--- worker/s3/backend.go | 32 +++--- worker/s3/s3.go | 20 ++-- worker/upload.go | 12 +- worker/upload_params.go | 6 +- worker/upload_test.go | 12 +- worker/worker.go | 48 ++++---- 28 files changed, 347 insertions(+), 349 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index ecd19789f..c5b88513c 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -31,11 +31,11 @@ var ( type ( MultipartUpload struct { - Bucket string `json:"bucket"` - Key object.EncryptionKey `json:"key"` - Path string `json:"path"` - UploadID string `json:"uploadID"` - CreatedAt TimeRFC3339 `json:"createdAt"` + Bucket string `json:"bucket"` + EncryptionKey object.EncryptionKey `json:"encryptionKey"` + Key string `json:"key"` + UploadID string `json:"uploadID"` + CreatedAt TimeRFC3339 `json:"createdAt"` } MultipartListPartItem struct { @@ -65,14 +65,14 @@ type ( type ( MultipartAbortRequest struct { Bucket string `json:"bucket"` - Path string `json:"path"` + Key string `json:"key"` UploadID string `json:"uploadID"` } MultipartAddPartRequest struct { Bucket string `json:"bucket"` ETag string `json:"eTag"` - Path string `json:"path"` + Key string `json:"key"` ContractSet string `json:"contractSet"` UploadID string `json:"uploadID"` PartNumber int `json:"partNumber"` @@ -86,17 +86,17 @@ type ( MultipartCompleteRequest struct { Bucket string `json:"bucket"` Metadata ObjectUserMetadata `json:"metadata"` - Path string `json:"path"` + Key string `json:"key"` UploadID string `json:"uploadID"` Parts []MultipartCompletedPart `json:"parts"` } MultipartCreateRequest struct { - Bucket string `json:"bucket"` - Path string `json:"path"` - Key *object.EncryptionKey `json:"key"` - MimeType string `json:"mimeType"` - Metadata ObjectUserMetadata `json:"metadata"` + Bucket string `json:"bucket"` + EncryptionKey *object.EncryptionKey `json:"encryptionKey"` + Key string `json:"key"` + MimeType string `json:"mimeType"` + Metadata ObjectUserMetadata `json:"metadata"` // TODO: The next major version change should invert this to create a // key by default @@ -109,7 +109,7 @@ type ( MultipartListPartsRequest struct { Bucket string `json:"bucket"` - Path string `json:"path"` + Key string `json:"key"` UploadID string `json:"uploadID"` PartNumberMarker int `json:"partNumberMarker"` Limit int64 `json:"limit"` @@ -124,7 +124,7 @@ type ( MultipartListUploadsRequest struct { Bucket string `json:"bucket"` Prefix string `json:"prefix"` - PathMarker string `json:"pathMarker"` + KeyMarker string `json:"keyMarker"` UploadIDMarker string `json:"uploadIDMarker"` Limit int `json:"limit"` } diff --git a/api/object.go b/api/object.go index 899b181e1..fe71ddb1b 100644 --- a/api/object.go +++ b/api/object.go @@ -66,7 +66,7 @@ type ( ETag string `json:"eTag,omitempty"` Health float64 `json:"health"` ModTime TimeRFC3339 `json:"modTime"` - Name string `json:"name"` + Key string `json:"key"` Size int64 `json:"size"` MimeType string `json:"mimeType,omitempty"` } @@ -146,7 +146,7 @@ func (o ObjectMetadata) ContentType() string { return o.MimeType } - if ext := filepath.Ext(o.Name); ext != "" { + if ext := filepath.Ext(o.Key); ext != "" { return mime.TypeByExtension(ext) } diff --git a/api/slab.go b/api/slab.go index 65d19788d..f93ebe532 100644 --- a/api/slab.go +++ b/api/slab.go @@ -7,9 +7,9 @@ import ( type ( PackedSlab struct { - BufferID uint `json:"bufferID"` - Data []byte `json:"data"` - Key object.EncryptionKey `json:"key"` + BufferID uint `json:"bufferID"` + Data []byte `json:"data"` + EncryptionKey object.EncryptionKey `json:"encryptionKey"` } SlabBuffer struct { @@ -22,8 +22,8 @@ type ( } UnhealthySlab struct { - Key object.EncryptionKey `json:"key"` - Health float64 `json:"health"` + EncryptionKey object.EncryptionKey `json:"encryptionKey"` + Health float64 `json:"health"` } UploadedPackedSlab struct { diff --git a/autopilot/migrator.go b/autopilot/migrator.go index fd935cabb..251c5e893 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -50,7 +50,7 @@ type ( ) func (j *job) execute(ctx context.Context, w Worker) (_ api.MigrateSlabResponse, err error) { - slab, err := j.b.Slab(ctx, j.Key) + slab, err := j.b.Slab(ctx, j.EncryptionKey) if err != nil { return api.MigrateSlabResponse{}, fmt.Errorf("failed to fetch slab; %w", err) } @@ -161,7 +161,7 @@ func (m *migrator) performMigrations(p *workerPool) { res, err := j.execute(ctx, w) m.statsSlabMigrationSpeedMS.Track(float64(time.Since(start).Milliseconds())) if err != nil { - m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, err) + m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.EncryptionKey, j.Health, res.SurchargeApplied, err) if utils.IsErr(err, api.ErrConsensusNotSynced) { // interrupt migrations if consensus is not synced select { @@ -172,7 +172,7 @@ func (m *migrator) performMigrations(p *workerPool) { } else if !utils.IsErr(err, api.ErrSlabNotFound) { // fetch all object IDs for the slab we failed to migrate var objectIds map[string][]string - if res, err := m.objectIDsForSlabKey(ctx, j.Key); err != nil { + if res, err := m.objectIDsForSlabKey(ctx, j.EncryptionKey); err != nil { m.logger.Errorf("failed to fetch object ids for slab key; %w", err) } else { objectIds = res @@ -180,20 +180,20 @@ func (m *migrator) performMigrations(p *workerPool) { // register the alert if res.SurchargeApplied { - m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, objectIds, err)) + m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.EncryptionKey, j.Health, objectIds, err)) } else { - m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.Key, j.Health, objectIds, err)) + m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.EncryptionKey, j.Health, objectIds, err)) } } } else { - m.logger.Infof("%v: migration %d/%d succeeded, key: %v, health: %v, overpaid: %v, shards migrated: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, res.NumShardsMigrated) - m.ap.DismissAlert(ctx, alerts.IDForSlab(alertMigrationID, j.Key)) + m.logger.Infof("%v: migration %d/%d succeeded, key: %v, health: %v, overpaid: %v, shards migrated: %v", id, j.slabIdx+1, j.batchSize, j.EncryptionKey, j.Health, res.SurchargeApplied, res.NumShardsMigrated) + m.ap.DismissAlert(ctx, alerts.IDForSlab(alertMigrationID, j.EncryptionKey)) if res.SurchargeApplied { // this alert confirms the user his gouging // settings are working, it will be dismissed // automatically the next time this slab is // successfully migrated - m.ap.RegisterAlert(ctx, newCriticalMigrationSucceededAlert(j.Key)) + m.ap.RegisterAlert(ctx, newCriticalMigrationSucceededAlert(j.EncryptionKey)) } } } @@ -238,13 +238,13 @@ func (m *migrator) performMigrations(p *workerPool) { // starvation. migrateNewMap := make(map[object.EncryptionKey]*api.UnhealthySlab) for i, slab := range toMigrateNew { - migrateNewMap[slab.Key] = &toMigrateNew[i] + migrateNewMap[slab.EncryptionKey] = &toMigrateNew[i] } removed := 0 for i := 0; i < len(toMigrate)-removed; { slab := toMigrate[i] - if _, exists := migrateNewMap[slab.Key]; exists { - delete(migrateNewMap, slab.Key) // delete from map to leave only new slabs + if _, exists := migrateNewMap[slab.EncryptionKey]; exists { + delete(migrateNewMap, slab.EncryptionKey) // delete from map to leave only new slabs i++ } else { toMigrate[i] = toMigrate[len(toMigrate)-1-removed] @@ -337,7 +337,7 @@ func (m *migrator) objectIDsForSlabKey(ctx context.Context, key object.Encryptio idsPerBucket[bucket.Name] = make([]string, len(objects)) for i, object := range objects { - idsPerBucket[bucket.Name][i] = object.Name + idsPerBucket[bucket.Name][i] = object.Key } } diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index 6fd06204c..1c6ce0756 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -9,21 +9,21 @@ import ( ) // AbortMultipartUpload aborts a multipart upload. -func (c *Client) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) { +func (c *Client) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) (err error) { err = c.c.WithContext(ctx).POST("/multipart/abort", api.MultipartAbortRequest{ Bucket: bucket, - Path: path, + Key: key, UploadID: uploadID, }, nil) return } // AddMultipartPart adds a part to a multipart upload. -func (c *Client) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (c *Client) AddMultipartPart(ctx context.Context, bucket, key, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { err = c.c.WithContext(ctx).PUT("/multipart/part", api.MultipartAddPartRequest{ Bucket: bucket, ETag: eTag, - Path: path, + Key: key, ContractSet: contractSet, UploadID: uploadID, PartNumber: partNumber, @@ -33,10 +33,10 @@ func (c *Client) AddMultipartPart(ctx context.Context, bucket, path, contractSet } // CompleteMultipartUpload completes a multipart upload. -func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (resp api.MultipartCompleteResponse, err error) { +func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (resp api.MultipartCompleteResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/complete", api.MultipartCompleteRequest{ Bucket: bucket, - Path: path, + Key: key, Metadata: opts.Metadata, UploadID: uploadID, Parts: parts, @@ -45,14 +45,12 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uplo } // CreateMultipartUpload creates a new multipart upload. -func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { +func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, key string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/create", api.MultipartCreateRequest{ - Bucket: bucket, - GenerateKey: opts.GenerateKey, - Path: path, - Key: opts.Key, - MimeType: opts.MimeType, - Metadata: opts.Metadata, + Bucket: bucket, + Key: key, + MimeType: opts.MimeType, + Metadata: opts.Metadata, }, &resp) return } @@ -68,7 +66,7 @@ func (c *Client) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker err = c.c.WithContext(ctx).POST("/multipart/listuploads", api.MultipartListUploadsRequest{ Bucket: bucket, Prefix: prefix, - PathMarker: keyMarker, + KeyMarker: keyMarker, UploadIDMarker: uploadIDMarker, Limit: maxUploads, }, &resp) @@ -76,10 +74,10 @@ func (c *Client) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker } // MultipartUploadParts returns information about all parts of a multipart upload. -func (c *Client) MultipartUploadParts(ctx context.Context, bucket, path string, uploadID string, partNumberMarker int, limit int64) (resp api.MultipartListPartsResponse, err error) { +func (c *Client) MultipartUploadParts(ctx context.Context, bucket, key string, uploadID string, partNumberMarker int, limit int64) (resp api.MultipartListPartsResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/listparts", api.MultipartListPartsRequest{ Bucket: bucket, - Path: path, + Key: key, UploadID: uploadID, PartNumberMarker: partNumberMarker, Limit: limit, diff --git a/bus/routes.go b/bus/routes.go index dbfb71d68..e093afbc6 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -2116,13 +2116,13 @@ func (b *Bus) multipartHandlerCreatePOST(jc jape.Context) { var key object.EncryptionKey if req.GenerateKey { key = object.GenerateEncryptionKey() - } else if req.Key == nil { + } else if req.EncryptionKey == nil { key = object.NoOpKey } else { - key = *req.Key + key = *req.EncryptionKey } - resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, key, req.MimeType, req.Metadata) + resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Key, key, req.MimeType, req.Metadata) if jc.Check("failed to create multipart upload", err) != nil { return } @@ -2134,7 +2134,7 @@ func (b *Bus) multipartHandlerAbortPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - err := b.ms.AbortMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, req.UploadID) + err := b.ms.AbortMultipartUpload(jc.Request.Context(), req.Bucket, req.Key, req.UploadID) if jc.Check("failed to abort multipart upload", err) != nil { return } @@ -2145,7 +2145,7 @@ func (b *Bus) multipartHandlerCompletePOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.CompleteMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, req.UploadID, req.Parts, api.CompleteMultipartOptions{ + resp, err := b.ms.CompleteMultipartUpload(jc.Request.Context(), req.Bucket, req.Key, req.UploadID, req.Parts, api.CompleteMultipartOptions{ Metadata: req.Metadata, }) if jc.Check("failed to complete multipart upload", err) != nil { @@ -2174,7 +2174,7 @@ func (b *Bus) multipartHandlerUploadPartPUT(jc jape.Context) { jc.Error(errors.New("upload_id must be non-empty"), http.StatusBadRequest) return } - err := b.ms.AddMultipartPart(jc.Request.Context(), req.Bucket, req.Path, req.ContractSet, req.ETag, req.UploadID, req.PartNumber, req.Slices) + err := b.ms.AddMultipartPart(jc.Request.Context(), req.Bucket, req.Key, req.ContractSet, req.ETag, req.UploadID, req.PartNumber, req.Slices) if jc.Check("failed to upload part", err) != nil { return } @@ -2193,7 +2193,7 @@ func (b *Bus) multipartHandlerListUploadsPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.MultipartUploads(jc.Request.Context(), req.Bucket, req.Prefix, req.PathMarker, req.UploadIDMarker, req.Limit) + resp, err := b.ms.MultipartUploads(jc.Request.Context(), req.Bucket, req.Prefix, req.KeyMarker, req.UploadIDMarker, req.Limit) if jc.Check("failed to list multipart uploads", err) != nil { return } @@ -2205,7 +2205,7 @@ func (b *Bus) multipartHandlerListPartsPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.MultipartUploadParts(jc.Request.Context(), req.Bucket, req.Path, req.UploadID, req.PartNumberMarker, int64(req.Limit)) + resp, err := b.ms.MultipartUploadParts(jc.Request.Context(), req.Bucket, req.Key, req.UploadID, req.PartNumberMarker, int64(req.Limit)) if jc.Check("failed to list multipart upload parts", err) != nil { return } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 49fb43cbc..7661b14ac 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -45,13 +45,13 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { assertMetadata := func(entries []api.ObjectMetadata) { for i := range entries { // assert mod time - if !strings.HasSuffix(entries[i].Name, "/") && !entries[i].ModTime.Std().After(start.UTC()) { + if !strings.HasSuffix(entries[i].Key, "/") && !entries[i].ModTime.Std().After(start.UTC()) { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file + isDir := strings.HasSuffix(entries[i].Key, "/") && entries[i].Key != "//double/" // double is a file if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -77,7 +77,7 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { // upload the following paths uploads := []struct { - path string + key string size int }{ {"/foo/bar", 1}, @@ -90,11 +90,11 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { for _, upload := range uploads { if upload.size == 0 { - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } else { data := make([]byte, upload.size) frand.Read(data) - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } } @@ -104,16 +104,16 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { sortDir string want []api.ObjectMetadata }{ - {"/", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, - {"/", api.ObjectSortByHealth, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", api.ObjectSortByHealth, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/foo/b", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/", "", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.ObjectSortDirAsc, []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.ObjectSortDirDesc, []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/FOO/bar", Size: 6, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.ObjectSortDirAsc, []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.ObjectSortDirDesc, []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/foo/b", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, {"o/baz/quu", "", "", []api.ObjectMetadata{}}, - {"/foo", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/foo", api.ObjectSortBySize, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/foo", api.ObjectSortBySize, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, + {"/foo", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.ObjectSortDirAsc, []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.ObjectSortDirDesc, []api.ObjectMetadata{{Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}}}, } for _, test := range tests { // use the bus client @@ -151,8 +151,8 @@ func TestListObjectsWithNoDelimiter(t *testing.T) { got := res.Objects if len(got) != 1 { t.Fatalf("expected 1 object, got %v", len(got)) - } else if got[0].Name != test.want[offset].Name { - t.Fatalf("expected %v, got %v, offset %v, marker %v, sortBy %v, sortDir %v", test.want[offset].Name, got[0].Name, offset, marker, test.sortBy, test.sortDir) + } else if got[0].Key != test.want[offset].Key { + t.Fatalf("expected %v, got %v, offset %v, marker %v, sortBy %v, sortDir %v", test.want[offset].Key, got[0].Key, offset, marker, test.sortBy, test.sortDir) } marker = res.NextMarker } @@ -391,13 +391,13 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { assertMetadata := func(entries []api.ObjectMetadata) { for i := range entries { // assert mod time - if !strings.HasSuffix(entries[i].Name, "/") && !entries[i].ModTime.Std().After(start.UTC()) { + if !strings.HasSuffix(entries[i].Key, "/") && !entries[i].ModTime.Std().After(start.UTC()) { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file + isDir := strings.HasSuffix(entries[i].Key, "/") && entries[i].Key != "//double/" // double is a file if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -423,7 +423,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { // upload the following paths uploads := []struct { - path string + key string size int }{ {"/foo/bar", 1}, @@ -439,11 +439,11 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { for _, upload := range uploads { if upload.size == 0 { - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } else { data := make([]byte, upload.size) frand.Read(data) - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } } @@ -454,28 +454,28 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { sortDir string want []api.ObjectMetadata }{ - {"/", "", "", "", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"//", "", "", "", []api.ObjectMetadata{{Name: "///", Size: 8, Health: 1}, {Name: "//double/", Size: 7, Health: 1}}}, - {"///", "", "", "", []api.ObjectMetadata{{Name: "///triple", Size: 8, Health: 1}}}, - {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: 1}}}, - {"/FOO/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 9, Health: 1}}}, - {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, - - {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}}}, + {"/", "", "", "", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"//", "", "", "", []api.ObjectMetadata{{Key: "///", Size: 8, Health: 1}, {Key: "//double/", Size: 7, Health: 1}}}, + {"///", "", "", "", []api.ObjectMetadata{{Key: "///triple", Size: 8, Health: 1}}}, + {"/foo/", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/", Size: 7, Health: 1}}}, + {"/FOO/", "", "", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 9, Health: 1}}}, + {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/gab/", "", "", "", []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, + + {"/", "f", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}}}, {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, - {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}}}, + {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}}}, {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, - {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "//", Size: 15, Health: 1}}}, + {"/", "", "name", "ASC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "name", "DESC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "//", Size: 15, Health: 1}}}, - {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "ASC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "DESC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "//", Size: 15, Health: 1}}}, - {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "size", "ASC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "//", Size: 15, Health: 1}}}, + {"/", "", "size", "DESC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, } for _, test := range tests { // use the bus client @@ -524,28 +524,28 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { Delimiter: "/", SortBy: test.sortBy, SortDir: test.sortDir, - Marker: test.want[offset].Name, + Marker: test.want[offset].Key, Limit: 1, }) if err != nil { - t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %vmarker: %v\n\nerr: %v", test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, err) + t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %vmarker: %v\n\nerr: %v", test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Key, err) } assertMetadata(res.Objects) if len(res.Objects) != 1 || res.Objects[0] != test.want[offset+1] { - t.Errorf("\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.want[offset].Name, res.Objects, test.want[offset+1]) + t.Errorf("\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.want[offset].Key, res.Objects, test.want[offset+1]) } moreRemaining = len(test.want)-offset-2 > 0 if res.HasMore != moreRemaining { - t.Errorf("invalid value for hasMore (%t) at marker (%s) test (%+v)", res.HasMore, test.want[offset].Name, test) + t.Errorf("invalid value for hasMore (%t) at marker (%s) test (%+v)", res.HasMore, test.want[offset].Key, test) } } } // delete all uploads for _, upload := range uploads { - tt.OK(w.DeleteObject(context.Background(), api.DefaultBucketName, upload.path, api.DeleteObjectOptions{})) + tt.OK(w.DeleteObject(context.Background(), api.DefaultBucketName, upload.key, api.DeleteObjectOptions{})) } // assert root dir is empty @@ -1649,14 +1649,14 @@ func TestUploadPacking(t *testing.T) { frand.Read(data3) // declare helpers - download := func(path string, data []byte, offset, length int64) { + download := func(key string, data []byte, offset, length int64) { t.Helper() var buffer bytes.Buffer if err := w.DownloadObject( context.Background(), &buffer, api.DefaultBucketName, - path, + key, api.DownloadObjectOptions{Range: &api.DownloadRange{Offset: offset, Length: length}}, ); err != nil { t.Fatal(err) @@ -1682,7 +1682,7 @@ func TestUploadPacking(t *testing.T) { } var found bool for _, entry := range resp.Objects { - if entry.Name == "/"+name { + if entry.Key == "/"+name { if entry.Size != int64(len(data)) { t.Fatal("unexpected size after upload", entry.Size, len(data)) } @@ -1779,12 +1779,12 @@ func TestUploadPacking(t *testing.T) { t.Fatal("expected 2 objects", len(objs)) } sort.Slice(objs, func(i, j int) bool { - return objs[i].Name < objs[j].Name // make result deterministic + return objs[i].Key < objs[j].Key // make result deterministic }) - if objs[0].Name != "/file1" { - t.Fatal("expected file1", objs[0].Name) - } else if objs[1].Name != "/file2" { - t.Fatal("expected file2", objs[1].Name) + if objs[0].Key != "/file1" { + t.Fatal("expected file1", objs[0].Key) + } else if objs[1].Key != "/file2" { + t.Fatal("expected file2", objs[1].Key) } } @@ -2135,7 +2135,7 @@ func TestMultipartUploads(t *testing.T) { tt.OK(err) if len(lmu.Uploads) != 1 { t.Fatal("expected 1 upload got", len(lmu.Uploads)) - } else if upload := lmu.Uploads[0]; upload.UploadID != mpr.UploadID || upload.Path != objPath { + } else if upload := lmu.Uploads[0]; upload.UploadID != mpr.UploadID || upload.Key != objPath { t.Fatal("unexpected upload:", upload) } diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index b049da908..ab03a3339 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -41,8 +41,8 @@ func TestMigrations(t *testing.T) { tt := cluster.tt // create a helper to fetch used hosts - usedHosts := func(path string) map[types.PublicKey]struct{} { - res, _ := b.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) + usedHosts := func(key string) map[types.PublicKey]struct{} { + res, _ := b.Object(context.Background(), api.DefaultBucketName, key, api.GetObjectOptions{}) if res.Object == nil { t.Fatal("object not found") } diff --git a/object/object.go b/object/object.go index 95517619f..1fa4a98ec 100644 --- a/object/object.go +++ b/object/object.go @@ -117,7 +117,7 @@ func GenerateEncryptionKey() EncryptionKey { // tagged omitempty to make sure responses where no object is returned remain // clean. type Object struct { - Key EncryptionKey `json:"key,omitempty"` + Key EncryptionKey `json:"encryptionKey,omitempty"` Slabs SlabSlices `json:"slabs,omitempty"` } diff --git a/stores/metadata.go b/stores/metadata.go index 44b2406d2..7262d8eb6 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -310,9 +310,9 @@ func (s *SQLStore) RenewedContract(ctx context.Context, renewedFrom types.FileCo return } -func (s *SQLStore) Object(ctx context.Context, bucket, path string) (obj api.Object, err error) { +func (s *SQLStore) Object(ctx context.Context, bucket, key string) (obj api.Object, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - obj, err = tx.Object(ctx, bucket, path) + obj, err = tx.Object(ctx, bucket, key) return err }) return @@ -462,7 +462,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo return } -func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { +func (s *SQLStore) UpdateObject(ctx context.Context, bucket, key, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { // Sanity check input. for _, s := range o.Slabs { for i, shard := range s.Shards { @@ -487,19 +487,19 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it var err error - prune, err = tx.DeleteObject(ctx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, key) if err != nil { return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } // create the dir - dirID, err := tx.MakeDirsForPath(ctx, path) + dirID, err := tx.MakeDirsForPath(ctx, key) if err != nil { - return fmt.Errorf("failed to create directories for path '%s': %w", path, err) + return fmt.Errorf("failed to create directories for key '%s': %w", key, err) } // Insert a new object. - err = tx.InsertObject(ctx, bucket, path, contractSet, dirID, o, mimeType, eTag, metadata) + err = tx.InsertObject(ctx, bucket, key, contractSet, dirID, o, mimeType, eTag, metadata) if err != nil { return fmt.Errorf("failed to insert object: %w", err) } @@ -514,16 +514,16 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return nil } -func (s *SQLStore) RemoveObject(ctx context.Context, bucket, path string) error { +func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { var prune bool err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - prune, err = tx.DeleteObject(ctx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, key) return }) if err != nil { return fmt.Errorf("RemoveObject: failed to delete object: %w", err) } else if !prune { - return fmt.Errorf("%w: key: %s", api.ErrObjectNotFound, path) + return fmt.Errorf("%w: key: %s", api.ErrObjectNotFound, key) } s.triggerSlabPruning() return nil @@ -630,9 +630,9 @@ func (s *SQLStore) UnhealthySlabs(ctx context.Context, healthCutoff float64, set } // ObjectMetadata returns an object's metadata -func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (obj api.Object, err error) { +func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, key string) (obj api.Object, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - obj, err = tx.ObjectMetadata(ctx, bucket, path) + obj, err = tx.ObjectMetadata(ctx, bucket, key) return err }) return diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 15e595729..65d604077 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -42,10 +42,10 @@ func (s *testSQLStore) InsertSlab(slab object.Slab) { } } -func (s *SQLStore) RemoveObjectBlocking(ctx context.Context, bucket, path string) error { +func (s *SQLStore) RemoveObjectBlocking(ctx context.Context, bucket, key string) error { ts := time.Now() time.Sleep(time.Millisecond) - if err := s.RemoveObject(ctx, bucket, path); err != nil { + if err := s.RemoveObject(ctx, bucket, key); err != nil { return err } return s.waitForPruneLoop(ts) @@ -1051,7 +1051,7 @@ func TestSQLMetadataStore(t *testing.T) { ETag: testETag, Health: 1, ModTime: api.TimeRFC3339{}, - Name: objID, + Key: objID, Size: obj1.TotalSize(), MimeType: testMimeType, }, @@ -1438,7 +1438,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/foo/bar", 1}, @@ -1458,7 +1458,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - _, err := ss.addTestObject(o.path, obj) + _, err := ss.addTestObject(o.key, obj) if err != nil { t.Fatal(err) } @@ -1470,13 +1470,13 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { t.Helper() for i := range entries { // assert mod time - if !strings.HasSuffix(entries[i].Name, "/") && entries[i].ModTime.IsZero() { + if !strings.HasSuffix(entries[i].Key, "/") && entries[i].ModTime.IsZero() { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") + isDir := strings.HasSuffix(entries[i].Key, "/") if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType != testMimeType) { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -1510,26 +1510,26 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { sortDir string want []api.ObjectMetadata }{ - {"/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: .5}}}, - {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, - - {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - {"/", "F", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}}}, + {"/", "", "", "", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/foo/", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/", Size: 7, Health: .5}}}, + {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/gab/", "", "", "", []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, + + {"/", "f", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}}}, + {"/", "F", "", "", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}}}, {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, - {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}}}, + {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: .75}}}, {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, - {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, + {"/", "", "name", "ASC", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "name", "DESC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 7, Health: 1}}}, - {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "", "health", "ASC", []api.ObjectMetadata{{Key: "/foo/", Size: 10, Health: .5}, {Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "DESC", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}}}, - {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "", "size", "DESC", []api.ObjectMetadata{{Key: "/foo/", Size: 10, Health: .5}, {Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "size", "ASC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 7, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, "", -1) @@ -1567,7 +1567,7 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { continue } - resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, test.want[offset].Name, 1) + resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, test.want[offset].Key, 1) if err != nil { t.Fatal(err) } @@ -1575,12 +1575,12 @@ func TestListObjectsWithDelimiterSlash(t *testing.T) { assertMetadata(got) if len(got) != 1 || got[0] != test.want[offset+1] { - t.Fatalf("\noffset: %v\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", offset+1, test.path, test.prefix, test.want[offset].Name, got, test.want[offset+1]) + t.Fatalf("\noffset: %v\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", offset+1, test.path, test.prefix, test.want[offset].Key, got, test.want[offset+1]) } moreRemaining = len(test.want)-offset-2 > 0 if resp.HasMore != moreRemaining { - t.Fatalf("invalid value for hasMore (%t) at marker (%s) test (%+v)", resp.HasMore, test.want[offset].Name, test) + t.Fatalf("invalid value for hasMore (%t) at marker (%s) test (%+v)", resp.HasMore, test.want[offset].Key, test) } } } @@ -1591,7 +1591,7 @@ func TestListObjectsExplicitDir(t *testing.T) { defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/dir/", 0}, // empty dir - created first @@ -1604,7 +1604,7 @@ func TestListObjectsExplicitDir(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - _, err := ss.addTestObject(o.path, obj) + _, err := ss.addTestObject(o.key, obj) if err != nil { t.Fatal(err) } @@ -1628,10 +1628,10 @@ func TestListObjectsExplicitDir(t *testing.T) { want []api.ObjectMetadata }{ {"/", "", "", "", []api.ObjectMetadata{ - {Name: "/dir/", Size: 1, Health: 0.5}, - {ETag: "d34db33f", Name: "/dir2/", Size: 2, Health: 1, MimeType: testMimeType}, // has MimeType and ETag since it's a file + {Key: "/dir/", Size: 1, Health: 0.5}, + {ETag: "d34db33f", Key: "/dir2/", Size: 2, Health: 1, MimeType: testMimeType}, // has MimeType and ETag since it's a file }}, - {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Name: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, + {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Key: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, } for _, test := range tests { got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, "", -1) @@ -1653,7 +1653,7 @@ func TestListObjectsSubstring(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/foo/bar", 1}, @@ -1668,14 +1668,14 @@ func TestListObjectsSubstring(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - if _, err := ss.addTestObject(o.path, obj); err != nil { + if _, err := ss.addTestObject(o.key, obj); err != nil { t.Fatal(err) } } metadataEquals := func(got api.ObjectMetadata, want api.ObjectMetadata) bool { t.Helper() - return got.Name == want.Name && + return got.Key == want.Key && got.Size == want.Size && got.Health == want.Health } @@ -1693,16 +1693,16 @@ func TestListObjectsSubstring(t *testing.T) { } tests := []struct { - path string + key string want []api.ObjectMetadata }{ - {"/", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/foo/b", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"o/baz/quu", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"uu", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/foo/b", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"o/baz/quu", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"uu", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, } for _, test := range tests { - resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.path, "", "", "", "", -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.key, "", "", "", "", -1) if err != nil { t.Fatal(err) } @@ -1710,12 +1710,12 @@ func TestListObjectsSubstring(t *testing.T) { assertEqual(got, test.want) var marker string for offset := 0; offset < len(test.want); offset++ { - if resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.path, "", "", "", marker, 1); err != nil { + if resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.key, "", "", "", marker, 1); err != nil { t.Fatal(err) } else if got := resp.Objects; len(got) != 1 { - t.Errorf("\nkey: %v unexpected number of objects, %d != 1", test.path, len(got)) + t.Errorf("\nkey: %v unexpected number of objects, %d != 1", test.key, len(got)) } else if !metadataEquals(got[0], test.want[offset]) { - t.Errorf("\nkey: %v\ngot: %v\nwant: %v", test.path, got, test.want[offset]) + t.Errorf("\nkey: %v\ngot: %v\nwant: %v", test.key, got, test.want[offset]) } else { marker = resp.NextMarker } @@ -1850,10 +1850,10 @@ func TestUnhealthySlabs(t *testing.T) { } expected := []api.UnhealthySlab{ - {Key: obj.Slabs[2].Key, Health: 0}, - {Key: obj.Slabs[4].Key, Health: 0}, - {Key: obj.Slabs[1].Key, Health: 0.5}, - {Key: obj.Slabs[3].Key, Health: 0.5}, + {EncryptionKey: obj.Slabs[2].Key, Health: 0}, + {EncryptionKey: obj.Slabs[4].Key, Health: 0}, + {EncryptionKey: obj.Slabs[1].Key, Health: 0.5}, + {EncryptionKey: obj.Slabs[3].Key, Health: 0.5}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order") @@ -1871,8 +1871,8 @@ func TestUnhealthySlabs(t *testing.T) { } expected = []api.UnhealthySlab{ - {Key: obj.Slabs[2].Key, Health: 0}, - {Key: obj.Slabs[4].Key, Health: 0}, + {EncryptionKey: obj.Slabs[2].Key, Health: 0}, + {EncryptionKey: obj.Slabs[4].Key, Health: 0}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order", slabs, expected) @@ -2096,7 +2096,7 @@ func TestUnhealthySlabsNoRedundancy(t *testing.T) { } expected := []api.UnhealthySlab{ - {Key: obj.Slabs[1].Slab.Key, Health: -1}, + {EncryptionKey: obj.Slabs[1].Slab.Key, Health: -1}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order") @@ -2565,8 +2565,8 @@ func TestRenameObjects(t *testing.T) { // Assert paths are correct. for _, obj := range resp.Objects { - if _, exists := objectsAfterMap[obj.Name]; !exists { - t.Fatal("unexpected path", obj.Name) + if _, exists := objectsAfterMap[obj.Key]; !exists { + t.Fatal("unexpected path", obj.Key) } } @@ -2955,7 +2955,7 @@ func TestPartialSlab(t *testing.T) { assertBuffer(buffer1Name, rhpv2.SectorSize, true, true) assertBuffer(buffer2Name, 1, false, false) - buffer = fetchBuffer(packedSlabs[0].Key) + buffer = fetchBuffer(packedSlabs[0].EncryptionKey) if buffer.ID != packedSlabs[0].BufferID { t.Fatalf("wrong buffer id, %v != %v", buffer.ID, packedSlabs[0].BufferID) } @@ -2974,7 +2974,7 @@ func TestPartialSlab(t *testing.T) { t.Fatal(err) } - buffer = fetchBuffer(packedSlabs[0].Key) + buffer = fetchBuffer(packedSlabs[0].EncryptionKey) if buffer != (bufferedSlab{}) { t.Fatal("shouldn't be able to find buffer", err) } @@ -3229,8 +3229,8 @@ func TestObjectsBySlabKey(t *testing.T) { t.Fatal(err) } for i, name := range []string{"obj1", "obj2", "obj3"} { - if objs[i].Name != name { - t.Fatal("unexpected object name", objs[i].Name, name) + if objs[i].Key != name { + t.Fatal("unexpected object name", objs[i].Key, name) } if objs[i].Size != int64(i)+1 { t.Fatal("unexpected object size", objs[i].Size, i+1) @@ -3368,14 +3368,14 @@ func TestBucketObjects(t *testing.T) { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/baz" { - t.Fatal("unexpected name", entries[0].Name) + } else if entries[0].Key != "/foo/baz" { + t.Fatal("unexpected name", entries[0].Key) } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/bar" { - t.Fatal("unexpected name", entries[0].Name) + } else if entries[0].Key != "/foo/bar" { + t.Fatal("unexpected name", entries[0].Key) } // Rename foo/bar in bucket 2 using the batch rename. @@ -3385,14 +3385,14 @@ func TestBucketObjects(t *testing.T) { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/baz" { - t.Fatal("unexpected name", entries[0].Name) + } else if entries[0].Key != "/foo/baz" { + t.Fatal("unexpected name", entries[0].Key) } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/bam" { - t.Fatal("unexpected name", entries[0].Name) + } else if entries[0].Key != "/foo/bam" { + t.Fatal("unexpected name", entries[0].Key) } // Delete foo/baz in bucket 1 but first try bucket 2 since that should fail. @@ -3476,8 +3476,8 @@ func TestCopyObject(t *testing.T) { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/bar" || entries[1].Name != "/foo" { - t.Fatal("unexpected names", entries[0].Name, entries[1].Name) + } else if entries[0].Key != "/bar" || entries[1].Key != "/foo" { + t.Fatal("unexpected names", entries[0].Key, entries[1].Key) } else if om.ModTime.IsZero() { t.Fatal("expected mod time to be set") } @@ -3489,8 +3489,8 @@ func TestCopyObject(t *testing.T) { t.Fatal(err) } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) - } else if entries[0].Name != "/bar" { - t.Fatal("unexpected names", entries[0].Name, entries[1].Name) + } else if entries[0].Key != "/bar" { + t.Fatal("unexpected names", entries[0].Key, entries[1].Key) } else if om.ModTime.IsZero() { t.Fatal("expected mod time to be set") } @@ -3575,7 +3575,7 @@ func TestListObjectsNoDelimiter(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/foo/bar", 1}, @@ -3589,7 +3589,7 @@ func TestListObjectsNoDelimiter(t *testing.T) { // assert mod time & clear it afterwards so we can compare assertModTime := func(entries []api.ObjectMetadata) { for i := range entries { - if !strings.HasSuffix(entries[i].Name, "/") && entries[i].ModTime.IsZero() { + if !strings.HasSuffix(entries[i].Key, "/") && entries[i].ModTime.IsZero() { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} @@ -3601,7 +3601,7 @@ func TestListObjectsNoDelimiter(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - if _, err := ss.addTestObject(o.path, obj); err != nil { + if _, err := ss.addTestObject(o.key, obj); err != nil { t.Fatal(err) } } @@ -3626,16 +3626,16 @@ func TestListObjectsNoDelimiter(t *testing.T) { marker string want []api.ObjectMetadata }{ - {"/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", "ASC", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", "DESC", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, - {"/", "health", "ASC", "", []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "health", "DESC", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/foo/b", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/", "", "", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", "ASC", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", "DESC", "", []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/FOO/bar", Size: 6, Health: 1}}}, + {"/", "health", "ASC", "", []api.ObjectMetadata{{Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "health", "DESC", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/foo/b", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, {"o/baz/quu", "", "", "", []api.ObjectMetadata{}}, - {"/foo", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/foo", "size", "ASC", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/foo", "size", "DESC", "", []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, + {"/foo", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/foo", "size", "ASC", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/foo", "size", "DESC", "", []api.ObjectMetadata{{Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}}}, } // set common fields for i := range tests { @@ -3671,8 +3671,8 @@ func TestListObjectsNoDelimiter(t *testing.T) { got := res.Objects if len(got) != 1 { t.Fatalf("expected 1 object, got %v", len(got)) - } else if got[0].Name != test.want[offset].Name { - t.Fatalf("expected %v, got %v, offset %v, marker %v", test.want[offset].Name, got[0].Name, offset, marker) + } else if got[0].Key != test.want[offset].Key { + t.Fatalf("expected %v, got %v, offset %v, marker %v", test.want[offset].Key, got[0].Key, offset, marker) } marker = res.NextMarker } diff --git a/stores/multipart.go b/stores/multipart.go index ec987619f..532942303 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -10,10 +10,10 @@ import ( sql "go.sia.tech/renterd/stores/sql" ) -func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) { +func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) { var uploadID string err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - uploadID, err = tx.InsertMultipartUpload(ctx, bucket, path, ec, mimeType, metadata) + uploadID, err = tx.InsertMultipartUpload(ctx, bucket, key, ec, mimeType, metadata) return }) if err != nil { @@ -24,9 +24,9 @@ func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, path strin }, err } -func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, key, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.AddMultipartPart(ctx, bucket, path, contractSet, eTag, uploadID, partNumber, slices) + return tx.AddMultipartPart(ctx, bucket, key, contractSet, eTag, uploadID, partNumber, slices) }) } @@ -54,9 +54,9 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri return resp, err } -func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { +func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error { err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.AbortMultipartUpload(ctx, bucket, path, uploadID) + return tx.AbortMultipartUpload(ctx, bucket, key, uploadID) }) if err != nil { return err @@ -65,7 +65,7 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string return nil } -func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path string, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) { +func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, key string, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) { // Sanity check input parts. if !sort.SliceIsSorted(parts, func(i, j int) bool { return parts[i].PartNumber < parts[j].PartNumber @@ -82,13 +82,13 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str var prune bool err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { // Delete potentially existing object. - prune, err = tx.DeleteObject(ctx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, key) if err != nil { return fmt.Errorf("failed to delete object: %w", err) } // Complete upload - eTag, err = tx.CompleteMultipartUpload(ctx, bucket, path, uploadID, parts, opts) + eTag, err = tx.CompleteMultipartUpload(ctx, bucket, key, uploadID, parts, opts) if err != nil { return fmt.Errorf("failed to complete multipart upload: %w", err) } diff --git a/stores/slabbuffer.go b/stores/slabbuffer.go index 5e8a542b8..7e61480e1 100644 --- a/stores/slabbuffer.go +++ b/stores/slabbuffer.go @@ -334,9 +334,9 @@ func (mgr *SlabBufferManager) SlabsForUpload(ctx context.Context, lockingDuratio return nil, err } slabs = append(slabs, api.PackedSlab{ - BufferID: buffer.dbID, - Data: data, - Key: buffer.slabKey, + BufferID: buffer.dbID, + Data: data, + EncryptionKey: buffer.slabKey, }) if len(slabs) == limit { break diff --git a/stores/sql/database.go b/stores/sql/database.go index c127a4fb9..1705e5b62 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -169,7 +169,7 @@ type ( // InsertMultipartUpload creates a new multipart upload and returns a // unique upload ID. - InsertMultipartUpload(ctx context.Context, bucket, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) + InsertMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) // InvalidateSlabHealthByFCID invalidates the health of all slabs that // are associated with any of the provided contracts. diff --git a/stores/sql/main.go b/stores/sql/main.go index 82d1da8f0..41bacfd8e 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -392,7 +392,7 @@ func CopyObject(ctx context.Context, tx sql.Tx, srcBucket, dstBucket, srcKey, ds // helper to fetch metadata fetchMetadata := func(objID int64) (om api.ObjectMetadata, err error) { err = tx.QueryRow(ctx, "SELECT etag, health, created_at, object_id, size, mime_type FROM objects WHERE id = ?", objID). - Scan(&om.ETag, &om.Health, (*time.Time)(&om.ModTime), &om.Name, &om.Size, &om.MimeType) + Scan(&om.ETag, &om.Health, (*time.Time)(&om.ModTime), &om.Key, &om.Size, &om.MimeType) if err != nil { return api.ObjectMetadata{}, fmt.Errorf("failed to fetch new object: %w", err) } @@ -1134,7 +1134,7 @@ func MultipartUploads(ctx context.Context, tx sql.Tx, bucket, prefix, keyMarker, if limitUsed && len(uploads) > int(limit) { hasMore = true uploads = uploads[:len(uploads)-1] - nextPathMarker = uploads[len(uploads)-1].Path + nextPathMarker = uploads[len(uploads)-1].Key nextUploadIDMarker = uploads[len(uploads)-1].UploadID } @@ -2124,7 +2124,7 @@ func UnhealthySlabs(ctx context.Context, tx sql.Tx, healthCutoff float64, set st var slabs []api.UnhealthySlab for rows.Next() { var slab api.UnhealthySlab - if err := rows.Scan((*EncryptionKey)(&slab.Key), &slab.Health); err != nil { + if err := rows.Scan((*EncryptionKey)(&slab.EncryptionKey), &slab.Health); err != nil { return nil, fmt.Errorf("failed to scan unhealthy slab: %w", err) } slabs = append(slabs, slab) @@ -2285,7 +2285,7 @@ func scanBucket(s Scanner) (api.Bucket, error) { } func scanMultipartUpload(s Scanner) (resp api.MultipartUpload, _ error) { - err := s.Scan(&resp.Bucket, (*EncryptionKey)(&resp.Key), &resp.Path, &resp.UploadID, &resp.CreatedAt) + err := s.Scan(&resp.Bucket, (*EncryptionKey)(&resp.EncryptionKey), &resp.Key, &resp.UploadID, &resp.CreatedAt) if errors.Is(err, dsql.ErrNoRows) { return api.MultipartUpload{}, api.ErrMultipartUploadNotFound } else if err != nil { @@ -2732,7 +2732,7 @@ func listObjectsNoDelim(ctx context.Context, tx Tx, bucket, prefix, substring, s objects = objects[:len(objects)-1] if len(objects) > 0 { hasMore = true - nextMarker = objects[len(objects)-1].Name + nextMarker = objects[len(objects)-1].Key } } @@ -2899,7 +2899,7 @@ func listObjectsSlashDelim(ctx context.Context, tx Tx, bucket, prefix, sortBy, s objects = objects[:len(objects)-1] if len(objects) > 0 { hasMore = true - nextMarker = objects[len(objects)-1].Name + nextMarker = objects[len(objects)-1].Key } } diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 09b420345..f995b804b 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -91,8 +91,8 @@ func (b *MainDatabase) wrapTxn(tx sql.Tx) *MainDatabaseTx { return &MainDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} } -func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { - return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) +func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error { + return ssql.AbortMultipartUpload(ctx, tx, bucket, key, uploadID) } func (tx *MainDatabaseTx) Accounts(ctx context.Context, owner string) ([]api.Account, error) { @@ -530,8 +530,8 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { - return ssql.ObjectMetadata(ctx, tx, bucket, path) +func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) { + return ssql.ObjectMetadata(ctx, tx, bucket, key) } func (tx *MainDatabaseTx) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey object.EncryptionKey) (metadata []api.ObjectMetadata, err error) { @@ -744,7 +744,7 @@ func (tx MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Accoun } func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md api.ObjectMetadata, err error) { - dst := []any{&md.Name, &md.Size, &md.Health, &md.MimeType, &md.ModTime, &md.ETag} + dst := []any{&md.Key, &md.Size, &md.Health, &md.MimeType, &md.ModTime, &md.ETag} dst = append(dst, others...) if err := s.Scan(dst...); err != nil { return api.ObjectMetadata{}, fmt.Errorf("failed to scan object metadata: %w", err) diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 6596848d3..2e1652103 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -94,8 +94,8 @@ func (tx *MainDatabaseTx) Accounts(ctx context.Context, owner string) ([]api.Acc return ssql.Accounts(ctx, tx, owner) } -func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { - return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) +func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error { + return ssql.AbortMultipartUpload(ctx, tx, bucket, key, uploadID) } func (tx *MainDatabaseTx) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error { @@ -527,8 +527,8 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { - return ssql.ObjectMetadata(ctx, tx, bucket, path) +func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) { + return ssql.ObjectMetadata(ctx, tx, bucket, key) } func (tx *MainDatabaseTx) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey object.EncryptionKey) (metadata []api.ObjectMetadata, err error) { @@ -743,7 +743,7 @@ func (tx *MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Accou func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md api.ObjectMetadata, err error) { var createdAt string - dst := []any{&md.Name, &md.Size, &md.Health, &md.MimeType, &createdAt, &md.ETag} + dst := []any{&md.Key, &md.Size, &md.Health, &md.MimeType, &createdAt, &md.ETag} dst = append(dst, others...) if err := s.Scan(dst...); err != nil { return api.ObjectMetadata{}, fmt.Errorf("failed to scan object metadata: %w", err) diff --git a/stores/sql_test.go b/stores/sql_test.go index 0846254cb..c85800094 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -282,10 +282,10 @@ func (s *testSQLStore) Retry(tries int, durationBetweenAttempts time.Duration, f } } -func (s *testSQLStore) addTestObject(path string, o object.Object) (api.Object, error) { - if err := s.UpdateObjectBlocking(context.Background(), api.DefaultBucketName, path, testContractSet, testETag, testMimeType, testMetadata, o); err != nil { +func (s *testSQLStore) addTestObject(key string, o object.Object) (api.Object, error) { + if err := s.UpdateObjectBlocking(context.Background(), api.DefaultBucketName, key, testContractSet, testETag, testMimeType, testMetadata, o); err != nil { return api.Object{}, err - } else if obj, err := s.Object(context.Background(), api.DefaultBucketName, path); err != nil { + } else if obj, err := s.Object(context.Background(), api.DefaultBucketName, key); err != nil { return api.Object{}, err } else { return obj, nil diff --git a/worker/alerts.go b/worker/alerts.go index 664698fb6..02598c770 100644 --- a/worker/alerts.go +++ b/worker/alerts.go @@ -13,14 +13,14 @@ func randomAlertID() types.Hash256 { return frand.Entropy256() } -func newDownloadFailedAlert(bucket, path string, offset, length, contracts int64, err error) alerts.Alert { +func newDownloadFailedAlert(bucket, key string, offset, length, contracts int64, err error) alerts.Alert { return alerts.Alert{ ID: randomAlertID(), Severity: alerts.SeverityError, Message: "Download failed", Data: map[string]any{ "bucket": bucket, - "path": path, + "key": key, "offset": offset, "length": length, "contracts": contracts, diff --git a/worker/bench_test.go b/worker/bench_test.go index 60a238dfc..3cde1792a 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -37,7 +37,7 @@ func BenchmarkDownloaderSingleObject(b *testing.B) { if err != nil { b.Fatal(err) } - o, err := w.os.Object(context.Background(), testBucket, up.path, api.GetObjectOptions{}) + o, err := w.os.Object(context.Background(), testBucket, up.key, api.GetObjectOptions{}) if err != nil { b.Fatal(err) } diff --git a/worker/client/client.go b/worker/client/client.go index 6f950dfd8..95a997277 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -57,24 +57,24 @@ func (c *Client) Contracts(ctx context.Context, hostTimeout time.Duration) (resp } // DeleteObject deletes the object at the given path. -func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) { +func (c *Client) DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) (err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectKeyEscape(path) - err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), path)) + key = api.ObjectKeyEscape(key) + err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), key)) return } -// DownloadObject downloads the object at the given path. -func (c *Client) DownloadObject(ctx context.Context, w io.Writer, bucket, path string, opts api.DownloadObjectOptions) (err error) { - if strings.HasSuffix(path, "/") { - return errors.New("the given path is a directory, use ObjectEntries instead") +// DownloadObject downloads the object at the given key. +func (c *Client) DownloadObject(ctx context.Context, w io.Writer, bucket, key string, opts api.DownloadObjectOptions) (err error) { + if strings.HasSuffix(key, "/") { + return errors.New("the given key is a directory, use ObjectEntries instead") } - path = api.ObjectKeyEscape(path) - body, _, err := c.object(ctx, bucket, path, opts) + key = api.ObjectKeyEscape(key) + body, _, err := c.object(ctx, bucket, key, opts) if err != nil { return err } @@ -89,18 +89,18 @@ func (c *Client) DownloadStats() (resp api.DownloadStatsResponse, err error) { return } -// HeadObject returns the metadata of the object at the given path. -func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { - c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", path), nil, nil) +// HeadObject returns the metadata of the object at the given key. +func (c *Client) HeadObject(ctx context.Context, bucket, key string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", key), nil, nil) values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) opts.Apply(values) - path = api.ObjectKeyEscape(path) - path += "?" + values.Encode() + key = api.ObjectKeyEscape(key) + key += "?" + values.Encode() // TODO: support HEAD in jape client - req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) + req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, key), http.NoBody) if err != nil { panic(err) } @@ -128,14 +128,14 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H return &head, nil } -// GetObject returns the object at given path alongside its metadata. -func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { - if strings.HasSuffix(path, "/") { +// GetObject returns the object at given key alongside its metadata. +func (c *Client) GetObject(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { + if strings.HasSuffix(key, "/") { return nil, errors.New("the given path is a directory, use ObjectEntries instead") } - path = api.ObjectKeyEscape(path) - body, header, err := c.object(ctx, bucket, path, opts) + key = api.ObjectKeyEscape(key) + body, header, err := c.object(ctx, bucket, key, opts) if err != nil { return nil, err } @@ -223,14 +223,14 @@ func (c *Client) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc } // UploadObject uploads the data in r, creating an object at the given path. -func (c *Client) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { - path = api.ObjectKeyEscape(path) - c.c.Custom("PUT", fmt.Sprintf("/objects/%s", path), []byte{}, nil) +func (c *Client) UploadObject(ctx context.Context, r io.Reader, bucket, key string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { + key = api.ObjectKeyEscape(key) + c.c.Custom("PUT", fmt.Sprintf("/objects/%s", key), []byte{}, nil) values := make(url.Values) values.Set("bucket", bucket) opts.ApplyValues(values) - u, err := url.Parse(fmt.Sprintf("%v/objects/%v", c.c.BaseURL, path)) + u, err := url.Parse(fmt.Sprintf("%v/objects/%v", c.c.BaseURL, key)) if err != nil { panic(err) } @@ -271,13 +271,13 @@ func (c *Client) NotifyEvent(ctx context.Context, e webhooks.Event) (err error) return } -func (c *Client) object(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (_ io.ReadCloser, _ http.Header, err error) { +func (c *Client) object(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (_ io.ReadCloser, _ http.Header, err error) { values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) - path += "?" + values.Encode() + key += "?" + values.Encode() - c.c.Custom("GET", fmt.Sprintf("/objects/%s", path), nil, (*[]api.ObjectMetadata)(nil)) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) + c.c.Custom("GET", fmt.Sprintf("/objects/%s", key), nil, (*[]api.ObjectMetadata)(nil)) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, key), http.NoBody) if err != nil { panic(err) } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 440ca9403..4e14cf7da 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -386,7 +386,7 @@ func (os *objectStoreMock) DeleteHostSector(ctx context.Context, hk types.Public return nil } -func (os *objectStoreMock) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error { +func (os *objectStoreMock) DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) error { return nil } @@ -433,7 +433,7 @@ func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minS return []object.SlabSlice{ss}, os.totalSlabBufferSize() > os.slabBufferMaxSizeSoft, nil } -func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.Object, error) { +func (os *objectStoreMock) Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (api.Object, error) { os.mu.Lock() defer os.mu.Unlock() @@ -443,20 +443,20 @@ func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts } // check if the object exists - if _, exists := os.objects[bucket][path]; !exists { + if _, exists := os.objects[bucket][key]; !exists { return api.Object{}, api.ErrObjectNotFound } // clone to ensure the store isn't unwillingly modified var o object.Object - if b, err := json.Marshal(os.objects[bucket][path]); err != nil { + if b, err := json.Marshal(os.objects[bucket][key]); err != nil { panic(err) } else if err := json.Unmarshal(b, &o); err != nil { panic(err) } return api.Object{ - ObjectMetadata: api.ObjectMetadata{Name: path, Size: o.TotalSize()}, + ObjectMetadata: api.ObjectMetadata{Key: key, Size: o.TotalSize()}, Object: &o, }, nil } @@ -480,7 +480,7 @@ func (os *objectStoreMock) Slab(ctx context.Context, key object.EncryptionKey) ( os.mu.Lock() defer os.mu.Unlock() - os.forEachObject(func(bucket, path string, o object.Object) { + os.forEachObject(func(bucket, objKey string, o object.Object) { for _, s := range o.Slabs { if s.Slab.Key.String() == key.String() { slab = s.Slab @@ -496,13 +496,13 @@ func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contra os.mu.Lock() defer os.mu.Unlock() - os.forEachObject(func(bucket, path string, o object.Object) { + os.forEachObject(func(bucket, objKey string, o object.Object) { for i, slab := range o.Slabs { if slab.Key.String() != s.Key.String() { continue } // update slab - shards := os.objects[bucket][path].Slabs[i].Slab.Shards + shards := os.objects[bucket][objKey].Slabs[i].Slab.Shards for sI := range shards { // overwrite latest host shards[sI].LatestHost = s.Shards[sI].LatestHost @@ -523,7 +523,7 @@ func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contra } } } - os.objects[bucket][path].Slabs[i].Slab.Shards = shards + os.objects[bucket][objKey].Slabs[i].Slab.Shards = shards return } }) @@ -544,9 +544,9 @@ func (os *objectStoreMock) PackedSlabsForUpload(ctx context.Context, lockingDura if ps.parameterKey == parameterKey && time.Now().After(ps.lockedUntil) { ps.lockedUntil = time.Now().Add(lockingDuration) pss = append(pss, api.PackedSlab{ - BufferID: ps.bufferID, - Data: ps.data, - Key: ps.slabKey, + BufferID: ps.bufferID, + Data: ps.data, + EncryptionKey: ps.slabKey, }) if len(pss) == limit { break @@ -566,9 +566,9 @@ func (os *objectStoreMock) MarkPackedSlabsUploaded(ctx context.Context, slabs [] } slabKeyToSlab := make(map[string]*object.Slab) - os.forEachObject(func(bucket, path string, o object.Object) { + os.forEachObject(func(bucket, objKey string, o object.Object) { for i, slab := range o.Slabs { - slabKeyToSlab[slab.Slab.Key.String()] = &os.objects[bucket][path].Slabs[i].Slab + slabKeyToSlab[slab.Slab.Key.String()] = &os.objects[bucket][objKey].Slabs[i].Slab } }) @@ -604,7 +604,7 @@ func (os *objectStoreMock) setSlabBufferMaxSizeSoft(n int) { os.slabBufferMaxSizeSoft = n } -func (os *objectStoreMock) forEachObject(fn func(bucket, path string, o object.Object)) { +func (os *objectStoreMock) forEachObject(fn func(bucket, key string, o object.Object)) { for bucket, objects := range os.objects { for path, object := range objects { fn(bucket, path, object) diff --git a/worker/s3/backend.go b/worker/s3/backend.go index e1963666a..2fefec1c8 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -115,7 +115,7 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 // Loop over the entries and add them to the response. for _, object := range objects { - key := strings.TrimPrefix(object.Name, "/") + key := strings.TrimPrefix(object.Key, "/") if prefix.HasDelimiter && strings.HasSuffix(key, prefix.Delimiter) { response.AddPrefix(key) continue @@ -199,7 +199,7 @@ func (s *s3) DeleteBucket(ctx context.Context, name string) error { // If the backend is a VersionedBackend, GetObject retrieves the latest version. // TODO: Range requests starting from the end are not supported yet. Backend // needs to be updated for that. -func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) { +func (s *s3) GetObject(ctx context.Context, bucketName, key string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) { if rangeRequest != nil && rangeRequest.FromEnd { return nil, gofakes3.ErrorMessage(gofakes3.ErrNotImplemented, "range request from end not supported") } @@ -213,11 +213,11 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range opts.Range = &api.DownloadRange{Offset: rangeRequest.Start, Length: length} } - res, err := s.w.GetObject(ctx, bucketName, objectName, opts) + res, err := s.w.GetObject(ctx, bucketName, key, opts) if utils.IsErr(err, api.ErrBucketNotFound) { return nil, gofakes3.BucketNotFound(bucketName) } else if utils.IsErr(err, api.ErrObjectNotFound) { - return nil, gofakes3.KeyNotFound(objectName) + return nil, gofakes3.KeyNotFound(key) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } @@ -248,7 +248,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range return &gofakes3.Object{ Hash: etag, - Name: gofakes3.URLEncode(objectName), + Name: gofakes3.URLEncode(key), Metadata: res.Metadata, Size: res.Size, Contents: res.Content, @@ -265,10 +265,10 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // // HeadObject should return a NotFound() error if the object does not // exist. -func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.w.HeadObject(ctx, bucketName, objectName, api.HeadObjectOptions{}) +func (s *s3) HeadObject(ctx context.Context, bucketName, key string) (*gofakes3.Object, error) { + res, err := s.w.HeadObject(ctx, bucketName, key, api.HeadObjectOptions{}) if utils.IsErr(err, api.ErrObjectNotFound) { - return nil, gofakes3.KeyNotFound(objectName) + return nil, gofakes3.KeyNotFound(key) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } @@ -291,7 +291,7 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go return &gofakes3.Object{ Hash: hash, - Name: gofakes3.URLEncode(objectName), + Name: gofakes3.URLEncode(key), Metadata: metadata, Size: res.Size, Contents: io.NopCloser(bytes.NewReader(nil)), @@ -313,8 +313,8 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go // Removes the null version (if there is one) of an object and inserts a // delete marker, which becomes the latest version of the object. If there // isn't a null version, Amazon S3 does not remove any objects. -func (s *s3) DeleteObject(ctx context.Context, bucketName, objectName string) (gofakes3.ObjectDeleteResult, error) { - err := s.b.DeleteObject(ctx, bucketName, objectName, api.DeleteObjectOptions{}) +func (s *s3) DeleteObject(ctx context.Context, bucketName, key string) (gofakes3.ObjectDeleteResult, error) { + err := s.b.DeleteObject(ctx, bucketName, key, api.DeleteObjectOptions{}) if utils.IsErr(err, api.ErrBucketNotFound) { return gofakes3.ObjectDeleteResult{}, gofakes3.BucketNotFound(bucketName) } else if utils.IsErr(err, api.ErrObjectNotFound) { @@ -354,17 +354,17 @@ func (s *s3) PutObject(ctx context.Context, bucketName, key string, meta map[str func (s *s3) DeleteMulti(ctx context.Context, bucketName string, objects ...string) (gofakes3.MultiDeleteResult, error) { var res gofakes3.MultiDeleteResult - for _, objectName := range objects { - err := s.b.DeleteObject(ctx, bucketName, objectName, api.DeleteObjectOptions{}) + for _, key := range objects { + err := s.b.DeleteObject(ctx, bucketName, key, api.DeleteObjectOptions{}) if err != nil && !utils.IsErr(err, api.ErrObjectNotFound) { res.Error = append(res.Error, gofakes3.ErrorResult{ - Key: objectName, + Key: key, Code: gofakes3.ErrInternal, Message: err.Error(), }) } else { res.Deleted = append(res.Deleted, gofakes3.ObjectID{ - Key: objectName, + Key: key, VersionID: "", // not supported }) } @@ -432,7 +432,7 @@ func (s *s3) ListMultipartUploads(ctx context.Context, bucket string, marker *go var uploads []gofakes3.ListMultipartUploadItem for _, upload := range resp.Uploads { uploads = append(uploads, gofakes3.ListMultipartUploadItem{ - Key: upload.Path[1:], + Key: upload.Key[1:], UploadID: gofakes3.UploadID(upload.UploadID), Initiated: gofakes3.NewContentTime(upload.CreatedAt.Std()), }) diff --git a/worker/s3/s3.go b/worker/s3/s3.go index 8c334fa16..347e3c5cc 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -30,14 +30,14 @@ type Bus interface { DeleteBucket(ctx context.Context, bucketName string) error ListBuckets(ctx context.Context) (buckets []api.Bucket, err error) - AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) - CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) - DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) + AddObject(ctx context.Context, bucket, key, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) + CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) + DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) (err error) Objects(ctx context.Context, bucket, prefix string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) - AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) - CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) - CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (api.MultipartCreateResponse, error) + AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) (err error) + CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) + CreateMultipartUpload(ctx context.Context, bucket, key string, opts api.CreateMultipartOptions) (api.MultipartCreateResponse, error) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) MultipartUploadParts(ctx context.Context, bucket, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) @@ -47,10 +47,10 @@ type Bus interface { } type Worker interface { - GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) - HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) - UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) - UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) + GetObject(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) + HeadObject(ctx context.Context, bucket, key string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) + UploadObject(ctx context.Context, r io.Reader, bucket, key string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) + UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, key, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) } func (l *gofakes3Logger) Print(level gofakes3.LogLevel, v ...interface{}) { diff --git a/worker/upload.go b/worker/upload.go index 6ced77f9a..1e1dca9a1 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -154,16 +154,16 @@ func (w *Worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime w.uploadManager = newUploadManager(w.shutdownCtx, w, w.bus, w.bus, w.bus, maxMemory, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } -func (w *Worker) upload(ctx context.Context, bucket, path string, rs api.RedundancySettings, r io.Reader, contracts []api.ContractMetadata, opts ...UploadOption) (_ string, err error) { +func (w *Worker) upload(ctx context.Context, bucket, key string, rs api.RedundancySettings, r io.Reader, contracts []api.ContractMetadata, opts ...UploadOption) (_ string, err error) { // apply the options - up := defaultParameters(bucket, path, rs) + up := defaultParameters(bucket, key, rs) for _, opt := range opts { opt(&up) } // if not given, try decide on a mime type using the file extension if !up.multipart && up.mimeType == "" { - up.mimeType = mime.TypeByExtension(filepath.Ext(up.path)) + up.mimeType = mime.TypeByExtension(filepath.Ext(up.key)) // if mime type is still not known, wrap the reader with a mime reader if up.mimeType == "" { @@ -540,13 +540,13 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a if up.multipart { // persist the part - err = mgr.os.AddMultipartPart(ctx, up.bucket, up.path, up.contractSet, eTag, up.uploadID, up.partNumber, o.Slabs) + err = mgr.os.AddMultipartPart(ctx, up.bucket, up.key, up.contractSet, eTag, up.uploadID, up.partNumber, o.Slabs) if err != nil { return bufferSizeLimitReached, "", fmt.Errorf("couldn't add multi part: %w", err) } } else { // persist the object - err = mgr.os.AddObject(ctx, up.bucket, up.path, up.contractSet, o, api.AddObjectOptions{MimeType: up.mimeType, ETag: eTag, Metadata: up.metadata}) + err = mgr.os.AddObject(ctx, up.bucket, up.key, up.contractSet, o, api.AddObjectOptions{MimeType: up.mimeType, ETag: eTag, Metadata: up.metadata}) if err != nil { return bufferSizeLimitReached, "", fmt.Errorf("couldn't add object: %w", err) } @@ -561,7 +561,7 @@ func (mgr *uploadManager) UploadPackedSlab(ctx context.Context, rs api.Redundanc defer cancel() // build the shards - shards := encryptPartialSlab(ps.Data, ps.Key, uint8(rs.MinShards), uint8(rs.TotalShards)) + shards := encryptPartialSlab(ps.Data, ps.EncryptionKey, uint8(rs.MinShards), uint8(rs.TotalShards)) // create the upload upload, err := mgr.newUpload(len(shards), contracts, bh, lockPriority) diff --git a/worker/upload_params.go b/worker/upload_params.go index 109488bb9..b20ec6485 100644 --- a/worker/upload_params.go +++ b/worker/upload_params.go @@ -7,7 +7,7 @@ import ( type uploadParameters struct { bucket string - path string + key string multipart bool uploadID string @@ -25,10 +25,10 @@ type uploadParameters struct { metadata api.ObjectUserMetadata } -func defaultParameters(bucket, path string, rs api.RedundancySettings) uploadParameters { +func defaultParameters(bucket, key string, rs api.RedundancySettings) uploadParameters { return uploadParameters{ bucket: bucket, - path: path, + key: key, ec: object.GenerateEncryptionKey(), // random key encryptionOffset: 0, // from the beginning diff --git a/worker/upload_test.go b/worker/upload_test.go index c67044101..d36c67a6e 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -221,8 +221,8 @@ func TestUploadPackedSlab(t *testing.T) { var c int uploadBytes := func(n int) { t.Helper() - params.path = fmt.Sprintf("%s_%d", t.Name(), c) - _, err := w.upload(context.Background(), params.bucket, params.path, testRedundancySettings, bytes.NewReader(frand.Bytes(n)), w.Contracts(), opts...) + params.key = fmt.Sprintf("%s_%d", t.Name(), c) + _, err := w.upload(context.Background(), params.bucket, params.key, testRedundancySettings, bytes.NewReader(frand.Bytes(n)), w.Contracts(), opts...) if err != nil { t.Fatal(err) } @@ -599,7 +599,7 @@ func TestUploadRegression(t *testing.T) { // upload data ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, err := w.upload(ctx, params.bucket, params.path, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) + _, err := w.upload(ctx, params.bucket, params.key, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) if !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } @@ -608,7 +608,7 @@ func TestUploadRegression(t *testing.T) { unblock() // upload data - _, err = w.upload(context.Background(), params.bucket, params.path, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) + _, err = w.upload(context.Background(), params.bucket, params.key, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) if err != nil { t.Fatal(err) } @@ -660,10 +660,10 @@ func TestUploadSingleSectorSlowHosts(t *testing.T) { } } -func testParameters(path string) uploadParameters { +func testParameters(key string) uploadParameters { return uploadParameters{ bucket: testBucket, - path: path, + key: key, ec: object.GenerateEncryptionKey(), // random key encryptionOffset: 0, // from the beginning diff --git a/worker/worker.go b/worker/worker.go index 2ce7f78a8..2ec8c835e 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -113,8 +113,8 @@ type ( Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) // NOTE: used for upload - AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) error - AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) + AddObject(ctx context.Context, bucket, key, contractSet string, o object.Object, opts api.AddObjectOptions) error + AddMultipartPart(ctx context.Context, bucket, key, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) AddPartialSlab(ctx context.Context, data []byte, minShards, totalShards uint8, contractSet string) (slabs []object.SlabSlice, slabBufferMaxSizeSoftReached bool, err error) AddUploadingSector(ctx context.Context, uID api.UploadID, id types.FileContractID, root types.Hash256) error FinishUpload(ctx context.Context, uID api.UploadID) error @@ -124,8 +124,8 @@ type ( // NOTE: used by worker Bucket(_ context.Context, bucket string) (api.Bucket, error) - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.Object, error) - DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error + Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (api.Object, error) + DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) error MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) } @@ -664,7 +664,7 @@ func (w *Worker) objectsHandlerHEAD(jc jape.Context) { return } - // parse path + // parse key path := jc.PathParam("key") var off int @@ -745,8 +745,8 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { return } - path := jc.PathParam("key") - if path == "" { + key := jc.PathParam("key") + if key == "" { jc.Error(errors.New("no path provided"), http.StatusBadRequest) return } @@ -763,7 +763,7 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { return } - gor, err := w.GetObject(ctx, bucket, path, api.DownloadObjectOptions{ + gor, err := w.GetObject(ctx, bucket, key, api.DownloadObjectOptions{ Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { @@ -778,7 +778,7 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { defer gor.Content.Close() // serve the content - serveContent(jc.ResponseWriter, jc.Request, path, gor.Content, gor.HeadObjectResponse) + serveContent(jc.ResponseWriter, jc.Request, key, gor.Content, gor.HeadObjectResponse) } func (w *Worker) objectsHandlerPUT(jc jape.Context) { @@ -857,7 +857,7 @@ func (w *Worker) multipartUploadHandlerPUT(jc jape.Context) { ctx := jc.Request.Context() // grab the path - path := jc.PathParam("path") + path := jc.PathParam("key") // decode the contract set from the query string var contractset string @@ -1146,7 +1146,7 @@ func (w *Worker) Handler() http.Handler { "PUT /objects/*key": w.objectsHandlerPUT, "DELETE /objects/*key": w.objectsHandlerDELETE, - "PUT /multipart/*path": w.multipartUploadHandlerPUT, + "PUT /multipart/*key": w.multipartUploadHandlerPUT, "GET /state": w.stateHandlerGET, }) @@ -1292,9 +1292,9 @@ func isErrHostUnreachable(err error) bool { utils.IsErr(err, errors.New("cannot assign requested address")) } -func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.Object, error) { +func (w *Worker) headObject(ctx context.Context, bucket, key string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.Object, error) { // fetch object - res, err := w.bus.Object(ctx, bucket, path, api.GetObjectOptions{ + res, err := w.bus.Object(ctx, bucket, key, api.GetObjectOptions{ OnlyMetadata: onlyMetadata, }) if err != nil { @@ -1355,9 +1355,9 @@ func (w *Worker) FundAccount(ctx context.Context, fcid types.FileContractID, hk }) } -func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { +func (w *Worker) GetObject(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { // head object - hor, res, err := w.headObject(ctx, bucket, path, false, api.HeadObjectOptions{ + hor, res, err := w.headObject(ctx, bucket, key, false, api.HeadObjectOptions{ Range: opts.Range, }) if err != nil { @@ -1400,7 +1400,7 @@ func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.Do if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errDownloadCancelled) && !errors.Is(err, io.ErrClosedPipe) { - w.registerAlert(newDownloadFailedAlert(bucket, path, offset, length, int64(len(contracts)), err)) + w.registerAlert(newDownloadFailedAlert(bucket, key, offset, length, int64(len(contracts)), err)) } return fmt.Errorf("failed to download object: %w", err) } @@ -1420,8 +1420,8 @@ func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.Do }, nil } -func (w *Worker) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { - res, _, err := w.headObject(ctx, bucket, path, true, opts) +func (w *Worker) HeadObject(ctx context.Context, bucket, key string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + res, _, err := w.headObject(ctx, bucket, key, true, opts) return res, err } @@ -1444,7 +1444,7 @@ func (w *Worker) SyncAccount(ctx context.Context, fcid types.FileContractID, hk return nil } -func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { +func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, key string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { // prepare upload params up, err := w.prepareUploadParams(ctx, bucket, opts.ContractSet, opts.MinShards, opts.TotalShards) if err != nil { @@ -1461,7 +1461,7 @@ func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str } // upload - eTag, err := w.upload(ctx, bucket, path, up.RedundancySettings, r, contracts, + eTag, err := w.upload(ctx, bucket, key, up.RedundancySettings, r, contracts, WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithMimeType(opts.MimeType), @@ -1469,9 +1469,9 @@ func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str WithObjectUserMetadata(opts.Metadata), ) if err != nil { - w.logger.With(zap.Error(err)).With("path", path).With("bucket", bucket).Error("failed to upload object") + w.logger.With(zap.Error(err)).With("key", key).With("bucket", bucket).Error("failed to upload object") if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { - w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, opts.MimeType, up.RedundancySettings.MinShards, up.RedundancySettings.TotalShards, len(contracts), up.UploadPacking, false, err)) + w.registerAlert(newUploadFailedAlert(bucket, key, up.ContractSet, opts.MimeType, up.RedundancySettings.MinShards, up.RedundancySettings.TotalShards, len(contracts), up.UploadPacking, false, err)) } return nil, fmt.Errorf("couldn't upload object: %w", err) } @@ -1501,13 +1501,13 @@ func (w *Worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithPacking(up.UploadPacking), - WithCustomKey(upload.Key), + WithCustomKey(upload.EncryptionKey), WithPartNumber(partNumber), WithUploadID(uploadID), } // make sure only one of the following is set - if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && opts.EncryptionOffset == nil { + if encryptionEnabled := !upload.EncryptionKey.IsNoopKey(); encryptionEnabled && opts.EncryptionOffset == nil { return nil, fmt.Errorf("%w: if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set", api.ErrInvalidMultipartEncryptionSettings) } else if opts.EncryptionOffset != nil && *opts.EncryptionOffset < 0 { return nil, fmt.Errorf("%w: encryption offset must be positive", api.ErrInvalidMultipartEncryptionSettings) From 7c23b18a9907823370aeea56185f91789967fdc4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 5 Sep 2024 09:38:44 +0200 Subject: [PATCH 48/98] readme: update --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index c20749935..c9ac7cfa0 100644 --- a/README.md +++ b/README.md @@ -54,10 +54,6 @@ overview of all settings configurable through the CLI. | `Log.Database.Level` | Logger level for database queries (info\|warn\|error). Defaults to 'warn' | `warn` | `--log.database.level` | `RENTERD_LOG_DATABASE_LEVEL`, `RENTERD_LOG_LEVEL` | `log.database.level` | | `Log.Database.IgnoreRecordNotFoundError` | Enable ignoring 'not found' errors resulting from database queries. Defaults to 'true' | `true` | `--log.database.ignoreRecordNotFoundError` | `RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR` | `log.database.ignoreRecordNotFoundError` | | `Log.Database.SlowThreshold` | Threshold for slow queries in logger. Defaults to 100ms | `100ms` | `--log.database.slowThreshold` | `RENTERD_LOG_DATABASE_SLOW_THRESHOLD` | `log.database.slowThreshold` | -| `Log.Database.Level (DEPRECATED)` | Logger level | `warn` | `--db.logger.logLevel` | `RENTERD_DB_LOGGER_LOG_LEVEL` | `log.database.level` | -| `Log.Database.IgnoreRecordNotFoundError (DEPRECATED)` | Ignores 'not found' errors in logger | `true` | `--db.logger.ignoreNotFoundError`| `RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR` | `log.ignoreRecordNotFoundError` | -| `Log.Database.SlowThreshold (DEPRECATED)` | Threshold for slow queries in logger | `100ms` | `--db.logger.slowThreshold` | `RENTERD_DB_LOGGER_SLOW_THRESHOLD` | `log.slowThreshold` | -| `Log.Path (DEPRECATED)` | Path to directory for logs | - | `--log-path` | `RENTERD_LOG_PATH` | `log.path` | | `Database.MySQL.URI` | Database URI for the bus | - | `--db.uri` | `RENTERD_DB_URI` | `database.mysql.uri` | | `Database.MySQL.User` | Database username for the bus | `renterd` | `--db.user` | `RENTERD_DB_USER` | `database.mysql.user` | | `Database.MySQL.Password` | Database password for the bus | - | - | `RENTERD_DB_PASSWORD` | `database.mysql.password` | @@ -70,7 +66,6 @@ overview of all settings configurable through the CLI. | `Bus.GatewayAddr` | Address for Sia peer connections | `:9981` | `--bus.gatewayAddr` | `RENTERD_BUS_GATEWAY_ADDR` | `bus.gatewayAddr` | | `Bus.RemoteAddr` | Remote address for the bus | - | - | `RENTERD_BUS_REMOTE_ADDR` | `bus.remoteAddr` | | `Bus.RemotePassword` | Remote password for the bus | - | - | `RENTERD_BUS_API_PASSWORD` | `bus.remotePassword` | -| `Bus.PersistInterval` | Interval for persisting consensus updates | `1m` | `--bus.persistInterval` | - | `bus.persistInterval` | | `Bus.UsedUTXOExpiry` | Expiry for used UTXOs in transactions | `24h` | `--bus.usedUTXOExpiry` | - | `bus.usedUtxoExpiry` | | `Bus.SlabBufferCompletionThreshold` | Threshold for slab buffer upload | `4096` | `--bus.slabBufferCompletionThreshold` | `RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD` | `bus.slabBufferCompletionThreshold` | | `Worker.AllowPrivateIPs` | Allows hosts with private IPs | - | `--worker.allowPrivateIPs` | - | `worker.allowPrivateIPs` | @@ -102,7 +97,6 @@ overview of all settings configurable through the CLI. | `S3.Enabled` | Enables/disables S3 API | `true` | `--s3.enabled` | `RENTERD_S3_ENABLED` | `s3.enabled` | | `S3.HostBucketBases` | Enables bucket rewriting in the router for the provided bases | - | `--s3.hostBucketBases` | `RENTERD_S3_HOST_BUCKET_BASES` | `s3.hostBucketBases` | | `S3.HostBucketEnabled` | Enables bucket rewriting in the router | - | `--s3.hostBucketEnabled` | `RENTERD_S3_HOST_BUCKET_ENABLED` | `s3.hostBucketEnabled` | -| `S3.KeypairsV4 (DEPRECATED)` | V4 keypairs for S3 | - | - | - | `s3.keypairsV4` | ### Single-Node Setup From a09f927b99c60ace5ed2390e1e3b384ed3d13108 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 5 Sep 2024 11:54:57 +0200 Subject: [PATCH 49/98] autopilot: remove /hosts route --- autopilot/autopilot.go | 43 -------- bus/bus.go | 2 +- bus/routes.go | 10 +- stores/hostdb.go | 14 ++- stores/hostdb_test.go | 210 ++++++++++++++++++++++++++++++++++---- stores/sql/database.go | 2 +- stores/sql/main.go | 40 ++++---- stores/sql/mysql/main.go | 4 +- stores/sql/sqlite/main.go | 4 +- 9 files changed, 235 insertions(+), 94 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 52d24c696..966c7b93b 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -161,7 +161,6 @@ func (ap *Autopilot) Handler() http.Handler { "GET /config": ap.configHandlerGET, "PUT /config": ap.configHandlerPUT, "POST /config": ap.configHandlerPOST, - "POST /hosts": ap.hostsHandlerPOST, "GET /host/:hostKey": ap.hostHandlerGET, "GET /state": ap.stateHandlerGET, "POST /trigger": ap.triggerHandlerPOST, @@ -736,48 +735,6 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { jc.Encode(api.HostResponse{Host: hi}) } -func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.HostsRequest - if jc.Decode(&req) != nil { - return - } else if req.AutopilotID != "" && req.AutopilotID != ap.id { - jc.Error(errors.New("invalid autopilot id"), http.StatusBadRequest) - return - } - - hosts, err := ap.bus.Hosts(jc.Request.Context(), api.HostOptions{ - AutopilotID: ap.id, - Offset: req.Offset, - Limit: req.Limit, - FilterMode: req.FilterMode, - UsabilityMode: req.UsabilityMode, - AddressContains: req.AddressContains, - KeyIn: req.KeyIn, - }) - if jc.Check("failed to get host info", err) != nil { - return - } - resps := make([]api.HostResponse, len(hosts)) - for i, host := range hosts { - if check, ok := host.Checks[ap.id]; ok { - resps[i] = api.HostResponse{ - Host: host, - Checks: &api.HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), - }, - } - } else { - resps[i] = api.HostResponse{Host: host} - } - } - jc.Encode(resps) -} - func (ap *Autopilot) stateHandlerGET(jc jape.Context) { ap.mu.Lock() pruning, pLastStart := ap.pruning, ap.pruningLastStart // TODO: move to a 'pruner' type diff --git a/bus/bus.go b/bus/bus.go index c4b916a43..d8f156cc9 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -197,7 +197,7 @@ type ( Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) - Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) RecordHostScans(ctx context.Context, scans []api.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error diff --git a/bus/routes.go b/bus/routes.go index 3d9c32da3..50cfb23e0 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -632,7 +632,15 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { req.Limit = -1 } - hosts, err := b.hs.Hosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) + hosts, err := b.hs.Hosts(jc.Request.Context(), api.HostOptions{ + AutopilotID: req.AutopilotID, + FilterMode: req.FilterMode, + UsabilityMode: req.UsabilityMode, + AddressContains: req.AddressContains, + KeyIn: req.KeyIn, + Offset: req.Offset, + Limit: req.Limit, + }) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return } diff --git a/stores/hostdb.go b/stores/hostdb.go index 22f9ab9f3..1df6a30d6 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -17,7 +17,15 @@ var ( // Host returns information about a host. func (s *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) { - hosts, err := s.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hostKey}, 0, 1) + hosts, err := s.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + AddressContains: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + KeyIn: []types.PublicKey{hostKey}, + Offset: 0, + Limit: 1, + }) if err != nil { return api.Host{}, err } else if len(hosts) == 0 { @@ -48,10 +56,10 @@ func (s *SQLStore) ResetLostSectors(ctx context.Context, hk types.PublicKey) err }) } -func (s *SQLStore) Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { +func (s *SQLStore) Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) { var hosts []api.Host err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - hosts, err = tx.Hosts(ctx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) + hosts, err = tx.Hosts(ctx, opts) return }) return hosts, err diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index c62d67adc..fdcd68327 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -38,7 +38,15 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + allHosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -63,7 +71,15 @@ func TestSQLHostDB(t *testing.T) { } // Same thing again but with hosts. - hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -123,7 +139,15 @@ func TestHosts(t *testing.T) { hk1, hk2, hk3 := hks[0], hks[1], hks[2] // search all hosts - his, err := ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err := ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -131,19 +155,43 @@ func TestHosts(t *testing.T) { } // assert offset & limit are taken into account - his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: 1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { t.Fatal("unexpected") } - his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 2) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 1, + Limit: 2, + }) if err != nil { t.Fatal(err) } else if len(his) != 2 { t.Fatal("unexpected") } - his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 3, 1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 3, + Limit: 1, + }) if err != nil { t.Fatal(err) } else if len(his) != 0 { @@ -151,16 +199,48 @@ func TestHosts(t *testing.T) { } // assert address and key filters are taken into account - if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1001", nil, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "com:1001", + KeyIn: nil, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 2 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: []types.PublicKey{hk2, hk3}, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 2 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "com:1002", + KeyIn: []types.PublicKey{hk2, hk3}, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk1}, 0, -1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "com:1002", + KeyIn: []types.PublicKey{hk1}, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } @@ -169,7 +249,15 @@ func TestHosts(t *testing.T) { if err != nil { t.Fatal(err) } - his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 2 { @@ -177,7 +265,15 @@ func TestHosts(t *testing.T) { } else if his[0].PublicKey != (types.PublicKey{2}) || his[1].PublicKey != (types.PublicKey{3}) { t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) } - his, err = ss.Hosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeBlocked, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -232,7 +328,15 @@ func TestHosts(t *testing.T) { } // fetch all hosts - his, err = ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -249,7 +353,15 @@ func TestHosts(t *testing.T) { } // assert autopilot filter is taken into account - his, err = ss.Hosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: ap1, + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -271,7 +383,15 @@ func TestHosts(t *testing.T) { if err != nil { t.Fatal(err) } - his, err = ss.Hosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: ap1, + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUsable, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -283,7 +403,15 @@ func TestHosts(t *testing.T) { t.Fatal("unexpected", c1, ok) } - his, err = ss.Hosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: ap1, + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUnusable, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -599,7 +727,15 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -679,21 +815,45 @@ func TestSQLHostAllowlist(t *testing.T) { assertHosts := func(total, allowed, blocked int) error { t.Helper() - hosts, err := ss.Hosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err := ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { return err } if len(hosts) != total { return fmt.Errorf("invalid number of hosts: %v", len(hosts)) } - hosts, err = ss.Hosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { return err } if len(hosts) != allowed { return fmt.Errorf("invalid number of hosts: %v", len(hosts)) } - hosts, err = ss.Hosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeBlocked, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { return err } @@ -767,7 +927,15 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } diff --git a/stores/sql/database.go b/stores/sql/database.go index 0dc72b32f..22e767388 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -186,7 +186,7 @@ type ( InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error // Hosts returns a list of hosts that match the provided filters - Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) // HostsForScanning returns a list of hosts to scan which haven't been // scanned since at least maxLastScan. diff --git a/stores/sql/main.go b/stores/sql/main.go index 6d4a82a75..60d4bf3d7 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -662,8 +662,8 @@ func HostBlocklist(ctx context.Context, tx sql.Tx) ([]string, error) { return blocklist, nil } -func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - if offset < 0 { +func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, error) { + if opts.Offset < 0 { return nil, ErrNegativeOffset } @@ -675,12 +675,12 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, } // validate filterMode - switch filterMode { + switch opts.FilterMode { case api.HostFilterModeAllowed: case api.HostFilterModeBlocked: case api.HostFilterModeAll: default: - return nil, fmt.Errorf("invalid filter mode: %v", filterMode) + return nil, fmt.Errorf("invalid filter mode: %v", opts.FilterMode) } var whereExprs []string @@ -688,8 +688,8 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, // fetch autopilot id var autopilotID int64 - if autopilot != "" { - if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", autopilot). + if opts.AutopilotID != "" { + if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", opts.AutopilotID). Scan(&autopilotID); errors.Is(err, dsql.ErrNoRows) { return nil, api.ErrAutopilotNotFound } else if err != nil { @@ -698,7 +698,7 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, } // filter allowlist/blocklist - switch filterMode { + switch opts.FilterMode { case api.HostFilterModeAllowed: if hasAllowlist { whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") @@ -721,28 +721,28 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, } // filter address - if addressContains != "" { + if opts.AddressContains != "" { whereExprs = append(whereExprs, "h.net_address LIKE ?") - args = append(args, "%"+addressContains+"%") + args = append(args, "%"+opts.AddressContains+"%") } // filter public key - if len(keyIn) > 0 { - pubKeys := make([]any, len(keyIn)) - for i, pk := range keyIn { + if len(opts.KeyIn) > 0 { + pubKeys := make([]any, len(opts.KeyIn)) + for i, pk := range opts.KeyIn { pubKeys[i] = PublicKey(pk) } - placeholders := strings.Repeat("?, ", len(keyIn)-1) + "?" + placeholders := strings.Repeat("?, ", len(opts.KeyIn)-1) + "?" whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) args = append(args, pubKeys...) } // filter usability whereApExpr := "" - if autopilot != "" { + if opts.AutopilotID != "" { whereApExpr = "AND hc.db_autopilot_id = ?" } - switch usabilityMode { + switch opts.UsabilityMode { case api.UsabilityFilterModeUsable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) args = append(args, autopilotID) @@ -752,10 +752,10 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, } // offset + limit - if limit == -1 { - limit = math.MaxInt64 + if opts.Limit == -1 { + opts.Limit = math.MaxInt64 } - offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) + offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", opts.Limit, opts.Offset) // fetch stored data for each host rows, err := tx.Query(ctx, "SELECT host_id, SUM(size) FROM contracts GROUP BY host_id") @@ -837,9 +837,9 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, // query host checks var apExpr string - if autopilot != "" { + if opts.AutopilotID != "" { apExpr = "WHERE ap.identifier = ?" - args = append(args, autopilot) + args = append(args, opts.AutopilotID) } rows, err = tx.Query(ctx, fmt.Sprintf(` SELECT h.public_key, ap.identifier, hc.usability_blocked, hc.usability_offline, hc.usability_low_score, hc.usability_redundant_ip, diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index c6eb0bf7d..df9ce0b04 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -407,8 +407,8 @@ func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { return ssql.HostBlocklist(ctx, tx) } -func (tx *MainDatabaseTx) Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - return ssql.Hosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) +func (tx *MainDatabaseTx) Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) { + return ssql.Hosts(ctx, tx, opts) } func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 5472d5de7..9ef185e2b 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -396,8 +396,8 @@ func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { return ssql.HostBlocklist(ctx, tx) } -func (tx *MainDatabaseTx) Hosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - return ssql.Hosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) +func (tx *MainDatabaseTx) Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) { + return ssql.Hosts(ctx, tx, opts) } func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { From 434e11f37341933db1e2b0f1b11663f653427d38 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 5 Sep 2024 13:12:58 +0200 Subject: [PATCH 50/98] autopilot: fix jape check --- autopilot/autopilot.go | 41 +++------------------ autopilot/client.go | 21 ----------- internal/test/e2e/cluster_test.go | 61 +++++++++++++++---------------- 3 files changed, 35 insertions(+), 88 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 966c7b93b..f6db87b99 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -158,12 +158,11 @@ func (ap *Autopilot) Config(ctx context.Context) (api.Autopilot, error) { // Handler returns an HTTP handler that serves the autopilot api. func (ap *Autopilot) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ - "GET /config": ap.configHandlerGET, - "PUT /config": ap.configHandlerPUT, - "POST /config": ap.configHandlerPOST, - "GET /host/:hostKey": ap.hostHandlerGET, - "GET /state": ap.stateHandlerGET, - "POST /trigger": ap.triggerHandlerPOST, + "GET /config": ap.configHandlerGET, + "PUT /config": ap.configHandlerPUT, + "POST /config": ap.configHandlerPOST, + "GET /state": ap.stateHandlerGET, + "POST /trigger": ap.triggerHandlerPOST, }) } @@ -705,36 +704,6 @@ func (ap *Autopilot) triggerHandlerPOST(jc jape.Context) { }) } -func (ap *Autopilot) hostHandlerGET(jc jape.Context) { - var hk types.PublicKey - if jc.DecodeParam("hostKey", &hk) != nil { - return - } - - hi, err := ap.bus.Host(jc.Request.Context(), hk) - if jc.Check("failed to get host info", err) != nil { - return - } - - check, ok := hi.Checks[ap.id] - if ok { - jc.Encode(api.HostResponse{ - Host: hi, - Checks: &api.HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), - }, - }) - return - } - - jc.Encode(api.HostResponse{Host: hi}) -} - func (ap *Autopilot) stateHandlerGET(jc jape.Context) { ap.mu.Lock() pruning, pLastStart := ap.pruning, ap.pruningLastStart // TODO: move to a 'pruner' type diff --git a/autopilot/client.go b/autopilot/client.go index 22ab93f36..05592662b 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -2,9 +2,7 @@ package autopilot import ( "context" - "fmt" - "go.sia.tech/core/types" "go.sia.tech/jape" "go.sia.tech/renterd/api" ) @@ -33,25 +31,6 @@ func (c *Client) UpdateConfig(cfg api.AutopilotConfig) error { return c.c.PUT("/config", cfg) } -// HostInfo returns information about the host with given host key. -func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostResponse, err error) { - err = c.c.GET(fmt.Sprintf("/host/%s", hostKey), &resp) - return -} - -// HostInfo returns information about all hosts. -func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostResponse, err error) { - err = c.c.POST("/hosts", api.HostsRequest{ - Offset: offset, - Limit: limit, - FilterMode: filterMode, - UsabilityMode: usabilityMode, - AddressContains: addressContains, - KeyIn: keyIn, - }, &resp) - return -} - // State returns the current state of the autopilot. func (c *Client) State() (state api.AutopilotStateResponse, err error) { err = c.c.GET("/state", &state) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index d6c5ed0bc..00450116a 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -295,64 +295,63 @@ func TestNewTestCluster(t *testing.T) { hosts, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) for _, host := range hosts { - hi, err := cluster.Autopilot.HostInfo(host.PublicKey) + hi, err := cluster.Bus.Host(context.Background(), host.PublicKey) if err != nil { t.Fatal(err) - } - if hi.Checks.ScoreBreakdown.Score() == 0 { - js, _ := json.MarshalIndent(hi.Checks.ScoreBreakdown, "", " ") + } else if checks := hi.Checks[testApCfg().ID]; checks == (api.HostCheck{}) { + t.Fatal("host check not found") + } else if checks.Score.Score() == 0 { + js, _ := json.MarshalIndent(checks.Score, "", " ") t.Fatalf("score shouldn't be 0 because that means one of the fields was 0: %s", string(js)) - } - if hi.Checks.Score == 0 { - t.Fatal("score shouldn't be 0") - } - if !hi.Checks.Usable { + } else if !checks.Usability.IsUsable() { t.Fatal("host should be usable") - } - if len(hi.Checks.UnusableReasons) != 0 { + } else if len(checks.Usability.UnusableReasons()) != 0 { t.Fatal("usable hosts don't have any reasons set") - } - if reflect.DeepEqual(hi.Host, api.Host{}) { + } else if reflect.DeepEqual(hi, api.Host{}) { t.Fatal("host wasn't set") - } - if hi.Host.Settings.Release == "" { + } else if hi.Settings.Release == "" { t.Fatal("release should be set") } } - hostInfos, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + hostInfos, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + }) tt.OK(err) allHosts := make(map[types.PublicKey]struct{}) for _, hi := range hostInfos { - if hi.Checks.ScoreBreakdown.Score() == 0 { - js, _ := json.MarshalIndent(hi.Checks.ScoreBreakdown, "", " ") + if checks := hi.Checks[testApCfg().ID]; checks == (api.HostCheck{}) { + t.Fatal("host check not found") + } else if checks.Score.Score() == 0 { + js, _ := json.MarshalIndent(checks.Score, "", " ") t.Fatalf("score shouldn't be 0 because that means one of the fields was 0: %s", string(js)) - } - if hi.Checks.Score == 0 { - t.Fatal("score shouldn't be 0") - } - if !hi.Checks.Usable { + } else if !checks.Usability.IsUsable() { t.Fatal("host should be usable") - } - if len(hi.Checks.UnusableReasons) != 0 { + } else if len(checks.Usability.UnusableReasons()) != 0 { t.Fatal("usable hosts don't have any reasons set") - } - if reflect.DeepEqual(hi.Host, api.Host{}) { + } else if reflect.DeepEqual(hi, api.Host{}) { t.Fatal("host wasn't set") } - allHosts[hi.Host.PublicKey] = struct{}{} + allHosts[hi.PublicKey] = struct{}{} } - hostInfosUnusable, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) + hostInfosUnusable, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.UsabilityFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUnusable, + }) tt.OK(err) if len(hostInfosUnusable) != 0 { t.Fatal("there should be no unusable hosts", len(hostInfosUnusable)) } - hostInfosUsable, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + hostInfosUsable, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.UsabilityFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUsable, + }) tt.OK(err) for _, hI := range hostInfosUsable { - delete(allHosts, hI.Host.PublicKey) + delete(allHosts, hI.PublicKey) } if len(hostInfosUsable) != len(hostInfos) || len(allHosts) != 0 { t.Fatalf("result for 'usable' should match the result for 'all', \n\nall: %+v \n\nusable: %+v", hostInfos, hostInfosUsable) From 3934a8fb3e01503d69120f5fc6470ca427b89b5d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 5 Sep 2024 13:38:08 +0200 Subject: [PATCH 51/98] stores: debug logging --- stores/sql/main.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/stores/sql/main.go b/stores/sql/main.go index 60d4bf3d7..31fad2bf7 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -723,6 +723,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er // filter address if opts.AddressContains != "" { whereExprs = append(whereExprs, "h.net_address LIKE ?") + fmt.Println("append1", opts.AddressContains) args = append(args, "%"+opts.AddressContains+"%") } @@ -734,6 +735,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } placeholders := strings.Repeat("?, ", len(opts.KeyIn)-1) + "?" whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) + fmt.Println("append2", pubKeys) args = append(args, pubKeys...) } @@ -745,10 +747,12 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er switch opts.UsabilityMode { case api.UsabilityFilterModeUsable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) + fmt.Println("append3", autopilotID) args = append(args, autopilotID) case api.UsabilityFilterModeUnusable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) args = append(args, autopilotID) + fmt.Println("append4", autopilotID) } // offset + limit @@ -802,7 +806,9 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er %s `, blockedExpr, whereExpr, offsetLimitStr), args...) if err != nil { - return nil, fmt.Errorf("failed to fetch hosts: %w", err) + inOpts, _ := json.MarshalIndent(opts, " ", " ") + fmt.Println(string(inOpts)) + return nil, fmt.Errorf("failed to fetch hosts: %w %v %v", err, string(inOpts), args) } defer rows.Close() From 0ab35a801655ed2c1f637de0de34a0334fd5f26f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 5 Sep 2024 14:27:18 +0200 Subject: [PATCH 52/98] stores: remove logging --- bus/routes.go | 5 +++++ internal/test/e2e/cluster_test.go | 2 ++ stores/hostdb_test.go | 2 +- stores/sql/main.go | 32 +++++++++++++++---------------- 4 files changed, 24 insertions(+), 17 deletions(-) diff --git a/bus/routes.go b/bus/routes.go index 50cfb23e0..a49742724 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -608,6 +608,11 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { return } + if req.AutopilotID == "" && req.UsabilityMode != api.UsabilityFilterModeAll { + jc.Error(errors.New("need to specify autopilot id when usability mode isn't 'all'"), http.StatusBadRequest) + return + } + // validate the filter mode switch req.FilterMode { case api.HostFilterModeAllowed: diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 00450116a..9c3383d83 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -337,6 +337,7 @@ func TestNewTestCluster(t *testing.T) { } hostInfosUnusable, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + AutopilotID: testApCfg().ID, FilterMode: api.UsabilityFilterModeAll, UsabilityMode: api.UsabilityFilterModeUnusable, }) @@ -346,6 +347,7 @@ func TestNewTestCluster(t *testing.T) { } hostInfosUsable, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + AutopilotID: testApCfg().ID, FilterMode: api.UsabilityFilterModeAll, UsabilityMode: api.UsabilityFilterModeUsable, }) diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index fdcd68327..82e73191e 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -370,7 +370,7 @@ func TestHosts(t *testing.T) { // assert h1 and h2 have the expected checks if c1, ok := his[0].Checks[ap1]; !ok || c1 != h1c { - t.Fatal("unexpected", c1, ok) + t.Fatal("unexpected", c1, ok, his[0]) } else if c2, ok := his[1].Checks[ap1]; !ok || c2 != h2c1 { t.Fatal("unexpected", c2, ok) } else if _, ok := his[1].Checks[ap2]; ok { diff --git a/stores/sql/main.go b/stores/sql/main.go index 31fad2bf7..95d933fa9 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -28,7 +28,10 @@ import ( "lukechampine.com/frand" ) -var ErrNegativeOffset = errors.New("offset can not be negative") +var ( + ErrNegativeOffset = errors.New("offset can not be negative") + ErrMissingAutopilotID = errors.New("missing autopilot id") +) // helper types type ( @@ -665,6 +668,8 @@ func HostBlocklist(ctx context.Context, tx sql.Tx) ([]string, error) { func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, error) { if opts.Offset < 0 { return nil, ErrNegativeOffset + } else if opts.AutopilotID == "" && opts.UsabilityMode != "" && opts.UsabilityMode != api.UsabilityFilterModeAll { + return nil, fmt.Errorf("%w: have to specify autopilot id when filter mode isn't 'all'", ErrMissingAutopilotID) } var hasAllowlist, hasBlocklist bool @@ -723,7 +728,6 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er // filter address if opts.AddressContains != "" { whereExprs = append(whereExprs, "h.net_address LIKE ?") - fmt.Println("append1", opts.AddressContains) args = append(args, "%"+opts.AddressContains+"%") } @@ -735,7 +739,6 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } placeholders := strings.Repeat("?, ", len(opts.KeyIn)-1) + "?" whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) - fmt.Println("append2", pubKeys) args = append(args, pubKeys...) } @@ -743,16 +746,15 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er whereApExpr := "" if opts.AutopilotID != "" { whereApExpr = "AND hc.db_autopilot_id = ?" - } - switch opts.UsabilityMode { - case api.UsabilityFilterModeUsable: - whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) - fmt.Println("append3", autopilotID) - args = append(args, autopilotID) - case api.UsabilityFilterModeUnusable: - whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) - args = append(args, autopilotID) - fmt.Println("append4", autopilotID) + + switch opts.UsabilityMode { + case api.UsabilityFilterModeUsable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) + args = append(args, autopilotID) + case api.UsabilityFilterModeUnusable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) + args = append(args, autopilotID) + } } // offset + limit @@ -806,9 +808,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er %s `, blockedExpr, whereExpr, offsetLimitStr), args...) if err != nil { - inOpts, _ := json.MarshalIndent(opts, " ", " ") - fmt.Println(string(inOpts)) - return nil, fmt.Errorf("failed to fetch hosts: %w %v %v", err, string(inOpts), args) + return nil, fmt.Errorf("failed to fetch hosts: %w", err) } defer rows.Close() From 2cab31c631e8121021710b810f2e2b9dbaf99296 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 5 Sep 2024 17:21:29 +0200 Subject: [PATCH 53/98] all: rename pin setting --- api/events.go | 2 +- api/setting.go | 10 +++++----- bus/bus.go | 31 +++++++------------------------ bus/client/settings.go | 6 +++--- bus/routes.go | 15 ++++++++------- internal/bus/pinmanager.go | 10 +++++----- internal/bus/pinmanager_test.go | 26 +++++++++++++------------- internal/test/e2e/cluster.go | 2 +- stores/settings.go | 32 ++++++++++++++++---------------- 9 files changed, 59 insertions(+), 75 deletions(-) diff --git a/api/events.go b/api/events.go index bd6dcd776..43620eda7 100644 --- a/api/events.go +++ b/api/events.go @@ -64,7 +64,7 @@ type ( EventSettingUpdate struct { GougingSettings *GougingSettings `json:"gougingSettings,omitempty"` - PinnedSettings *PinnedSettings `json:"pinnedSettings,omitempty"` + PinnedSettings *PinningSettings `json:"pinnedSettings,omitempty"` S3Settings *S3Settings `json:"s3Settings,omitempty"` UploadSettings *UploadSettings `json:"uploadSettings,omitempty"` Timestamp time.Time `json:"timestamp"` diff --git a/api/setting.go b/api/setting.go index a9443d6be..668acb243 100644 --- a/api/setting.go +++ b/api/setting.go @@ -40,7 +40,7 @@ var ( // DefaultPinnedSettings define the default pin settings the bus is // configured with on startup. - DefaultPinnedSettings = PinnedSettings{ + DefaultPinnedSettings = PinningSettings{ Enabled: false, Currency: "usd", ForexEndpointURL: "https://api.siascan.com/exchange-rate/siacoin", @@ -120,10 +120,10 @@ type ( MigrationSurchargeMultiplier uint64 `json:"migrationSurchargeMultiplier"` } - // PinnedSettings holds the configuration for pinning certain settings to a + // PinningSettings holds the configuration for pinning certain settings to a // specific currency (e.g., USD). It uses a Forex API to fetch the current // exchange rate, allowing users to set prices in USD instead of SC. - PinnedSettings struct { + PinningSettings struct { // Enabled can be used to either enable or temporarily disable price // pinning. If enabled, both the currency and the Forex endpoint URL // must be valid. @@ -137,7 +137,7 @@ type ( ForexEndpointURL string `json:"forexEndpointURL"` // Threshold is a percentage between 0 and 1 that determines when the - // pinned settings are updated based on the exchange rate at the time. + // pinning settings are updated based on the exchange rate at the time. Threshold float64 `json:"threshold"` // Autopilots contains the pinned settings for every autopilot. @@ -202,7 +202,7 @@ func (p Pin) IsPinned() bool { } // Validate returns an error if the price pin settings are not considered valid. -func (pps PinnedSettings) Validate() error { +func (pps PinningSettings) Validate() error { if pps.ForexEndpointURL == "" { return fmt.Errorf("price pin settings must have a forex endpoint URL") } diff --git a/bus/bus.go b/bus/bus.go index e26842664..a5e575d44 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -34,7 +34,6 @@ import ( "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" - "golang.org/x/crypto/blake2b" ) const ( @@ -292,8 +291,8 @@ type ( GougingSettings(ctx context.Context) (api.GougingSettings, error) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error - PinnedSettings(ctx context.Context) (api.PinnedSettings, error) - UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error + PinningSettings(ctx context.Context) (api.PinningSettings, error) + UpdatePinningSettings(ctx context.Context, ps api.PinningSettings) error UploadSettings(ctx context.Context) (api.UploadSettings, error) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error @@ -578,24 +577,6 @@ func (b *Bus) addRenewedContract(ctx context.Context, renewedFrom types.FileCont return r, nil } -func (b *Bus) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { - seed := blake2b.Sum256(append(b.deriveSubKey("renterkey"), hostKey[:]...)) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk -} - -func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { - seed := blake2b.Sum256(append(b.masterKey[:], []byte(purpose)...)) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk -} - func (b *Bus) broadcastContract(ctx context.Context, fcid types.FileContractID) (txnID types.TransactionID, _ error) { // acquire contract lock indefinitely and defer the release lockID, err := b.contractLocker.Acquire(ctx, lockingPriorityRenew, fcid, time.Duration(math.MaxInt64)) @@ -615,7 +596,7 @@ func (b *Bus) broadcastContract(ctx context.Context, fcid types.FileContractID) } // derive the renter key - renterKey := b.deriveRenterKey(c.HostKey) + renterKey := b.masterKey.DeriveContractKey(c.HostKey) // fetch revision rev, err := b.rhp2.SignedRevision(ctx, c.HostIP, c.HostKey, renterKey, fcid, time.Minute) @@ -659,7 +640,7 @@ func (b *Bus) broadcastContract(ctx context.Context, fcid types.FileContractID) func (b *Bus) formContract(ctx context.Context, hostSettings rhpv2.HostSettings, renterAddress types.Address, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostIP string, endHeight uint64) (rhpv2.ContractRevision, error) { // derive the renter key - renterKey := b.deriveRenterKey(hostKey) + renterKey := b.masterKey.DeriveContractKey(hostKey) // prepare the transaction cs := b.cm.TipState() @@ -753,6 +734,9 @@ func (b *Bus) prepareRenew(cs consensus.State, revision types.FileContractRevisi } func (b *Bus) renewContract(ctx context.Context, cs consensus.State, gp api.GougingParams, c api.ContractMetadata, hs rhpv2.HostSettings, renterFunds, minNewCollateral, maxFundAmount types.Currency, endHeight, expectedNewStorage uint64) (rhpv2.ContractRevision, types.Currency, types.Currency, error) { + // derive the renter key + renterKey := b.masterKey.DeriveContractKey(c.HostKey) + // acquire contract lock indefinitely and defer the release lockID, err := b.contractLocker.Acquire(ctx, lockingPriorityRenew, c.ID, time.Duration(math.MaxInt64)) if err != nil { @@ -772,7 +756,6 @@ func (b *Bus) renewContract(ctx context.Context, cs consensus.State, gp api.Goug // renew contract gc := gouging.NewChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, nil, nil) - renterKey := b.deriveRenterKey(c.HostKey) prepareRenew := b.prepareRenew(cs, rev, hs.Address, b.w.Address(), renterFunds, minNewCollateral, maxFundAmount, endHeight, expectedNewStorage) newRevision, txnSet, contractPrice, fundAmount, err := b.rhp3.Renew(ctx, gc, rev, renterKey, c.HostKey, c.SiamuxAddr, prepareRenew, b.w.SignTransaction) if err != nil { diff --git a/bus/client/settings.go b/bus/client/settings.go index 46d3e8708..25f9ae4bb 100644 --- a/bus/client/settings.go +++ b/bus/client/settings.go @@ -18,13 +18,13 @@ func (c *Client) UpdateGougingSettings(ctx context.Context, gs api.GougingSettin } // PricePinningSettings returns the contract set settings. -func (c *Client) PricePinningSettings(ctx context.Context) (ps api.PinnedSettings, err error) { +func (c *Client) PricePinningSettings(ctx context.Context) (ps api.PinningSettings, err error) { err = c.c.WithContext(ctx).GET("/settings/pinned", &ps) return } -// UpdatePinnedSettings updates the given setting. -func (c *Client) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { +// UpdatePinningSettings updates the given setting. +func (c *Client) UpdatePinningSettings(ctx context.Context, ps api.PinningSettings) error { return c.c.WithContext(ctx).PUT("/settings/pinned", ps) } diff --git a/bus/routes.go b/bus/routes.go index 6334c24c1..188664e85 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -912,7 +912,8 @@ func (b *Bus) contractPruneHandlerPOST(jc jape.Context) { } // prune the contract - rev, spending, pruned, remaining, err := b.rhp2.PruneContract(pruneCtx, b.deriveRenterKey(c.HostKey), gc, c.HostIP, c.HostKey, fcid, c.RevisionNumber, func(fcid types.FileContractID, roots []types.Hash256) ([]uint64, error) { + rk := b.masterKey.DeriveContractKey(c.HostKey) + rev, spending, pruned, remaining, err := b.rhp2.PruneContract(pruneCtx, rk, gc, c.HostIP, c.HostKey, fcid, c.RevisionNumber, func(fcid types.FileContractID, roots []types.Hash256) ([]uint64, error) { indices, err := b.ms.PrunableContractRoots(ctx, fcid, roots) if err != nil { return nil, err @@ -1487,8 +1488,8 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { } func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { - pps, err := b.ss.PinnedSettings(jc.Request.Context()) - if jc.Check("failed to get pinned settings", err) == nil { + pps, err := b.ss.PinningSettings(jc.Request.Context()) + if jc.Check("failed to get pinning settings", err) == nil { // populate the Autopilots map with the current autopilots aps, err := b.as.Autopilots(jc.Request.Context()) if jc.Check("failed to fetch autopilots", err) != nil { @@ -1507,19 +1508,19 @@ func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { } func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { - var ps api.PinnedSettings + var ps api.PinningSettings if jc.Decode(&ps) != nil { return } else if err := ps.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update pinned settings, error: %v", err), http.StatusBadRequest) + jc.Error(fmt.Errorf("couldn't update pinning settings, error: %v", err), http.StatusBadRequest) return } else if ps.Enabled { if _, err := ibus.NewForexClient(ps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), ps.Currency); err != nil { - jc.Error(fmt.Errorf("couldn't update pinned settings, forex API unreachable,error: %v", err), http.StatusBadRequest) + jc.Error(fmt.Errorf("couldn't update pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) return } } - if jc.Check("could not update pinned settings", b.ss.UpdatePinnedSettings(jc.Request.Context(), ps)) == nil { + if jc.Check("could not update pinning settings", b.ss.UpdatePinningSettings(jc.Request.Context(), ps)) == nil { b.broadcastAction(webhooks.Event{ Module: api.ModuleSetting, Event: api.EventUpdate, diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 8929b9064..a02f65dc3 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -24,8 +24,8 @@ type ( GougingSettings(ctx context.Context) (api.GougingSettings, error) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error - PinnedSettings(ctx context.Context) (api.PinnedSettings, error) - UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error + PinningSettings(ctx context.Context) (api.PinningSettings, error) + UpdatePinningSettings(ctx context.Context, ps api.PinningSettings) error } ) @@ -309,10 +309,10 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { pm.logger.Debugw("updating prices", zap.Bool("forced", forced)) - // fetch pinned settings - settings, err := pm.s.PinnedSettings(ctx) + // fetch pinning settings + settings, err := pm.s.PinningSettings(ctx) if err != nil { - return fmt.Errorf("failed to fetch pinned settings: %w", err) + return fmt.Errorf("failed to fetch pinning settings: %w", err) } else if !settings.Enabled { pm.logger.Debug("price pinning is disabled, skipping price update") return nil diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 8716197ee..85272610c 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -112,7 +112,7 @@ func (api *mockForexAPI) setUnreachable(unreachable bool) { type mockPinStore struct { mu sync.Mutex gs api.GougingSettings - ps api.PinnedSettings + ps api.PinningSettings autopilots map[string]api.Autopilot } @@ -149,18 +149,18 @@ func (ms *mockPinStore) UpdateGougingSettings(ctx context.Context, gs api.Gougin return nil } -func (ms *mockPinStore) PinnedSettings(ctx context.Context) (api.PinnedSettings, error) { +func (ms *mockPinStore) PinningSettings(ctx context.Context) (api.PinningSettings, error) { ms.mu.Lock() defer ms.mu.Unlock() return ms.ps, nil } -func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { +func (ms *mockPinStore) UpdatePinningSettings(ctx context.Context, ps api.PinningSettings) error { b, err := json.Marshal(ps) if err != nil { return err } - var cloned api.PinnedSettings + var cloned api.PinningSettings if err := json.Unmarshal(b, &cloned); err != nil { return err } @@ -221,7 +221,7 @@ func TestPinManager(t *testing.T) { pps.Currency = "usd" pps.Threshold = 0.5 pps.ForexEndpointURL = forex.s.URL - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) // assert price manager is running now if cnt := len(rates()); cnt < 1 { @@ -236,7 +236,7 @@ func TestPinManager(t *testing.T) { pps.GougingSettingsPins.MaxDownload = api.Pin{Value: 3, Pinned: false} pps.GougingSettingsPins.MaxStorage = api.Pin{Value: 3, Pinned: false} pps.GougingSettingsPins.MaxUpload = api.Pin{Value: 3, Pinned: false} - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) // assert gouging settings are unchanged if gss, _ := ms.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { @@ -245,14 +245,14 @@ func TestPinManager(t *testing.T) { // enable the max download pin, with the threshold at 0.5 it should remain unchanged pps.GougingSettingsPins.MaxDownload.Pinned = true - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) if gss, _ := ms.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } // lower the threshold, gouging settings should be updated pps.Threshold = 0.05 - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) if gss, _ := ms.GougingSettings(context.Background()); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) } @@ -261,7 +261,7 @@ func TestPinManager(t *testing.T) { pps.GougingSettingsPins.MaxDownload.Pinned = true pps.GougingSettingsPins.MaxStorage.Pinned = true pps.GougingSettingsPins.MaxUpload.Pinned = true - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) // assert they're all updated if gss, _ := ms.GougingSettings(context.Background()); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || @@ -284,7 +284,7 @@ func TestPinManager(t *testing.T) { }, } pps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) // assert autopilot was not updated if app, _ := ms.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { @@ -294,7 +294,7 @@ func TestPinManager(t *testing.T) { // enable the pin pins.Allowance.Pinned = true pps.Autopilots[testAutopilotID] = pins - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) // assert autopilot was updated if app, _ := ms.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { @@ -305,7 +305,7 @@ func TestPinManager(t *testing.T) { forex.setUnreachable(true) // assert alert was registered - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) res, _ := a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) == 0 { t.Fatalf("expected 1 alert, got %d", len(a.alerts)) @@ -315,7 +315,7 @@ func TestPinManager(t *testing.T) { forex.setUnreachable(false) // assert alert was dismissed - ms.UpdatePinnedSettings(context.Background(), pps) + ms.UpdatePinningSettings(context.Background(), pps) res, _ = a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) != 0 { t.Fatalf("expected 0 alerts, got %d", len(a.alerts)) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index c1afeed35..9f2b2c0ac 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -502,7 +502,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { // Update the bus settings. tt.OK(busClient.UpdateGougingSettings(ctx, test.GougingSettings)) - tt.OK(busClient.UpdatePinnedSettings(ctx, test.PricePinSettings)) + tt.OK(busClient.UpdatePinningSettings(ctx, test.PricePinSettings)) tt.OK(busClient.UpdateUploadSettings(ctx, us)) tt.OK(busClient.UpdateS3Settings(ctx, s3)) diff --git a/stores/settings.go b/stores/settings.go index 2c9fc9a6b..843d8e986 100644 --- a/stores/settings.go +++ b/stores/settings.go @@ -12,10 +12,10 @@ import ( ) const ( - SettingGouging = "gouging" - SettingPinned = "pinned" - SettingS3 = "s3" - SettingUpload = "upload" + SettingGouging = "gouging" + SettingPricePinning = "pricepinning" + SettingS3 = "s3" + SettingUpload = "upload" ) func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { @@ -27,13 +27,13 @@ func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSett return s.updateSetting(ctx, SettingGouging, gs) } -func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, err error) { - err = s.fetchSetting(ctx, SettingPinned, &ps) +func (s *SQLStore) PinningSettings(ctx context.Context) (ps api.PinningSettings, err error) { + err = s.fetchSetting(ctx, SettingPricePinning, &ps) return } -func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { - return s.updateSetting(ctx, SettingPinned, ps) +func (s *SQLStore) UpdatePinningSettings(ctx context.Context, ps api.PinningSettings) error { + return s.updateSetting(ctx, SettingPricePinning, ps) } func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, err error) { @@ -93,22 +93,22 @@ func (s *SQLStore) MigrateV2Settings(ctx context.Context) error { } } - // migrate pinned settings + // migrate pinning settings value, err = tx.Setting(ctx, "pricepinning") if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { return err } else if err == nil { - var ps api.PinnedSettings + var ps api.PinningSettings if err := json.Unmarshal([]byte(value), &ps); err != nil { - s.logger.Warnw("failed to unmarshal pinned settings, using default", zap.Error(err)) - value = s.defaultSetting(SettingPinned) + s.logger.Warnw("failed to unmarshal pinning settings, using default", zap.Error(err)) + value = s.defaultSetting(SettingPricePinning) } else if err := ps.Validate(); err != nil { - s.logger.Warnw("failed to migrate pinned settings, using default", zap.Error(err)) - value = s.defaultSetting(SettingPinned) + s.logger.Warnw("failed to migrate pinning settings, using default", zap.Error(err)) + value = s.defaultSetting(SettingPricePinning) } // update setting and delete old value - if err := tx.UpdateSetting(ctx, SettingPinned, value); err != nil { + if err := tx.UpdateSetting(ctx, SettingPricePinning, value); err != nil { return err } else if err := tx.DeleteSetting(ctx, "pricepinning"); err != nil { return err @@ -266,7 +266,7 @@ func (s *SQLStore) defaultSetting(key string) string { case SettingGouging: b, _ := json.Marshal(api.DefaultGougingSettings) return string(b) - case SettingPinned: + case SettingPricePinning: b, _ := json.Marshal(api.DefaultPinnedSettings) return string(b) case SettingS3: From 3f82d0bb89b3e930d5acafd776171cbdbcbe66c3 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 10:40:07 +0200 Subject: [PATCH 54/98] stores: move migration --- api/setting.go | 15 +- bus/bus.go | 7 - cmd/renterd/node.go | 8 +- internal/sql/migrations.go | 149 ++++++++++++++++ internal/test/e2e/cluster.go | 15 +- stores/bench_test.go | 2 +- stores/settings.go | 159 +----------------- stores/sql/mysql/main.go | 10 +- stores/sql/mysql/metrics.go | 2 +- .../main/migration_00017_settings.sql | 1 + stores/sql/sqlite/main.go | 10 +- stores/sql/sqlite/metrics.go | 2 +- .../main/migration_00017_settings.sql | 1 + stores/sql_test.go | 13 +- 14 files changed, 201 insertions(+), 193 deletions(-) create mode 100644 stores/sql/mysql/migrations/main/migration_00017_settings.sql create mode 100644 stores/sql/sqlite/migrations/main/migration_00017_settings.sql diff --git a/api/setting.go b/api/setting.go index 668acb243..bf97581b4 100644 --- a/api/setting.go +++ b/api/setting.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "go.sia.tech/core/consensus" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" ) @@ -66,16 +65,20 @@ var ( // DefaultUploadSettings define the default upload settings the bus is // configured with on startup. -func DefaultUploadSettings(network *consensus.Network) UploadSettings { +func DefaultUploadSettings(network string) UploadSettings { + rs := RedundancySettings{ + MinShards: 10, + TotalShards: 30, + } + if network != "mainnet" { + rs = DefaultRedundancySettingsTestnet + } return UploadSettings{ Packing: UploadPackingSettings{ Enabled: true, SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB }, - Redundancy: RedundancySettings{ - MinShards: 10, - TotalShards: 30, - }, + Redundancy: rs, } } diff --git a/bus/bus.go b/bus/bus.go index a5e575d44..d1d7b6dbd 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -299,8 +299,6 @@ type ( S3Settings(ctx context.Context) (api.S3Settings, error) UpdateS3Settings(ctx context.Context, s3as api.S3Settings) error - - MigrateV2Settings(ctx context.Context) error } WalletMetricsRecorder interface { @@ -365,11 +363,6 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa rhp3: rhp3.New(rhp.NewFallbackDialer(store, net.Dialer{}, l), l), } - // migrate settings store - if err := store.MigrateV2Settings(ctx); err != nil { - return nil, err - } - // create contract locker b.contractLocker = ibus.NewContractLocker() diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 6721408ca..9af925bf9 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -491,11 +491,11 @@ func buildStoreConfig(am alerts.Alerter, cfg config.Config, pk types.PrivateKey, if err != nil { return stores.Config{}, fmt.Errorf("failed to open MySQL metrics database: %w", err) } - dbMain, err = mysql.NewMainDatabase(connMain, logger, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold) + dbMain, err = mysql.NewMainDatabase(connMain, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold, cfg.Network, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create MySQL main database: %w", err) } - dbMetrics, err = mysql.NewMetricsDatabase(connMetrics, logger, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold) + dbMetrics, err = mysql.NewMetricsDatabase(connMetrics, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create MySQL metrics database: %w", err) } @@ -511,7 +511,7 @@ func buildStoreConfig(am alerts.Alerter, cfg config.Config, pk types.PrivateKey, if err != nil { return stores.Config{}, fmt.Errorf("failed to open SQLite main database: %w", err) } - dbMain, err = sqlite.NewMainDatabase(db, logger, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold) + dbMain, err = sqlite.NewMainDatabase(db, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold, cfg.Network, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create SQLite main database: %w", err) } @@ -520,7 +520,7 @@ func buildStoreConfig(am alerts.Alerter, cfg config.Config, pk types.PrivateKey, if err != nil { return stores.Config{}, fmt.Errorf("failed to open SQLite metrics database: %w", err) } - dbMetrics, err = sqlite.NewMetricsDatabase(dbm, logger, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold) + dbMetrics, err = sqlite.NewMetricsDatabase(dbm, cfg.Log.Database.SlowThreshold, cfg.Log.Database.SlowThreshold, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create SQLite metrics database: %w", err) } diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index 9b98be300..a7368ba72 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -3,10 +3,13 @@ package sql import ( "context" "embed" + "encoding/json" "fmt" "strings" + "time" "unicode/utf8" + "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -28,6 +31,7 @@ type ( MainMigrator interface { Migrator MakeDirsForPath(ctx context.Context, tx Tx, path string) (int64, error) + Network() string } ) @@ -211,6 +215,151 @@ var ( return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00016_account_owner", log) }, }, + { + ID: "00017_settings", + Migrate: func(tx Tx) error { + log.Infof("performing %s migration '00017_settings'", dbIdentifier) + + // fetch all settings + rows, err := tx.Query(ctx, "SELECT key, value FROM settings") + if err != nil { + return fmt.Errorf("failed to fetch settings: %v", err) + } + defer rows.Close() + + settings := make(map[string]string) + for rows.Next() { + var k, v string + if err := rows.Scan(&k, &v); err != nil { + _ = rows.Close() + return fmt.Errorf("failed to scan setting: %v", err) + } + settings[k] = v + } + + // migrate gouging settings + if v, ok := settings["gouging"]; ok { + var gs api.GougingSettings + err := json.Unmarshal([]byte(v), &gs) + if err == nil { + err = gs.Validate() + } + if err != nil { + log.Warnf("gouging settings are not being migrated, err: %v", err) + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "gouging"); err != nil { + return fmt.Errorf("failed to delete gouging settings: %v", err) + } + } + } else { + log.Warn("no pricepinning settings found") + } + + // migrate pinning settings + if v, ok := settings["pricepinning"]; ok { + var ps api.PinningSettings + err := json.Unmarshal([]byte(v), &ps) + if err == nil { + err = ps.Validate() + } + if err != nil { + log.Warnf("pricepinning settings are not being migrated, err: %v", err) + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "pricepinning"); err != nil { + return fmt.Errorf("failed to delete pricepinning settings: %v", err) + } + } else { + b, _ := json.Marshal(ps) + if _, err := tx.Exec(ctx, "UPDATE settings SET value = ? WHERE `key` = ?", string(b), "pricepinning"); err != nil { + return fmt.Errorf("failed to update pricepinning settings: %v", err) + } + } + } else { + log.Warn("no pricepinning settings found") + } + + // migrate S3 authentication settings + if v, ok := settings["s3authentication"]; ok { + var s3s api.S3Settings + err := json.Unmarshal([]byte(v), &s3s.Authentication) + if err == nil { + err = s3s.Validate() + } + if err == nil { + b, _ := json.Marshal(s3s) + if _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value)", + time.Now(), "s3", string(b)); err != nil { + return fmt.Errorf("failed to insert s3 settings: %v", err) + } + } else { + log.Warnf("s3authentication settings are not being migrated, err: %v", err) + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "s3authentication"); err != nil { + log.Warnf("failed to delete s3authentication settings: %v", err) + } + } + } else { + log.Warn("no s3authentication setting found") + } + + // migrate upload settings + us := api.DefaultUploadSettings(m.Network()) + + if v, ok := settings["contractset"]; ok { + var css struct { + Default string `json:"default"` + } + if err := json.Unmarshal([]byte(v), &css); err != nil { + log.Warnf("contractset settings are not being migrated, err: %v", err) + } else { + us.DefaultContractSet = css.Default + } + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "contractset"); err != nil { + return err + } + } + + if v, ok := settings["uploadpacking"]; ok { + var ups api.UploadPackingSettings + if err := json.Unmarshal([]byte(v), &ups); err != nil { + log.Warnf("uploadpacking settings are not being migrated, err: %v", err) + } else { + us.Packing = ups + } + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "uploadpacking"); err != nil { + return err + } + } + + if v, ok := settings["redundancy"]; ok { + var rs api.RedundancySettings + err := json.Unmarshal([]byte(v), &rs) + if err == nil { + err = rs.Validate() + } + if err != nil { + log.Warnf("redundancy settings are not being migrated, err: %v", err) + } else { + us.Redundancy = rs + } + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "redundancy"); err != nil { + return err + } + } + + // update upload settings + if err := us.Validate(); err != nil { + log.Warnf("upload settings are not being migrated, err: %v", err) + return err // developer error + } else { + b, _ := json.Marshal(us) + if _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value)", + time.Now(), "upload", string(b)); err != nil { + return fmt.Errorf("failed to insert s3 settings: %v", err) + } + } + + log.Info("migration '00017_settings' complete") + return nil + }, + }, } } MetricsMigrations = func(ctx context.Context, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 9f2b2c0ac..2af725d02 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -545,14 +545,15 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { } func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, pk types.PrivateKey, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, *chain.Manager, bus.Store, error) { + network, genesis := testNetwork() + // create store alertsMgr := alerts.NewManager() - storeCfg, err := buildStoreConfig(alertsMgr, dir, cfg.SlabBufferCompletionThreshold, cfgDb, pk, logger) + storeCfg, err := buildStoreConfig(alertsMgr, dir, network.Name, cfg.SlabBufferCompletionThreshold, cfgDb, pk, logger) if err != nil { return nil, nil, nil, nil, err } - network, genesis := testNetwork() sqlStore, err := stores.NewSQLStore(storeCfg, network) if err != nil { return nil, nil, nil, nil, err @@ -1119,7 +1120,7 @@ func testApCfg() config.Autopilot { } } -func buildStoreConfig(am alerts.Alerter, dir string, slabBufferCompletionThreshold int64, cfg dbConfig, pk types.PrivateKey, logger *zap.Logger) (stores.Config, error) { +func buildStoreConfig(am alerts.Alerter, dir, network string, slabBufferCompletionThreshold int64, cfg dbConfig, pk types.PrivateKey, logger *zap.Logger) (stores.Config, error) { // create database connections var dbMain sql.Database var dbMetrics sql.MetricsDatabase @@ -1143,11 +1144,11 @@ func buildStoreConfig(am alerts.Alerter, dir string, slabBufferCompletionThresho if err != nil { return stores.Config{}, fmt.Errorf("failed to open MySQL metrics database: %w", err) } - dbMain, err = mysql.NewMainDatabase(connMain, logger, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold) + dbMain, err = mysql.NewMainDatabase(connMain, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold, network, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create MySQL main database: %w", err) } - dbMetrics, err = mysql.NewMetricsDatabase(connMetrics, logger, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold) + dbMetrics, err = mysql.NewMetricsDatabase(connMetrics, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create MySQL metrics database: %w", err) } @@ -1163,7 +1164,7 @@ func buildStoreConfig(am alerts.Alerter, dir string, slabBufferCompletionThresho if err != nil { return stores.Config{}, fmt.Errorf("failed to open SQLite main database: %w", err) } - dbMain, err = sqlite.NewMainDatabase(db, logger, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold) + dbMain, err = sqlite.NewMainDatabase(db, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold, network, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create SQLite main database: %w", err) } @@ -1172,7 +1173,7 @@ func buildStoreConfig(am alerts.Alerter, dir string, slabBufferCompletionThresho if err != nil { return stores.Config{}, fmt.Errorf("failed to open SQLite metrics database: %w", err) } - dbMetrics, err = sqlite.NewMetricsDatabase(dbm, logger, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold) + dbMetrics, err = sqlite.NewMetricsDatabase(dbm, cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold, logger) if err != nil { return stores.Config{}, fmt.Errorf("failed to create SQLite metrics database: %w", err) } diff --git a/stores/bench_test.go b/stores/bench_test.go index 60f75b52f..65e58caa5 100644 --- a/stores/bench_test.go +++ b/stores/bench_test.go @@ -153,7 +153,7 @@ func newTestDB(ctx context.Context, dir string) (*sqlite.MainDatabase, error) { return nil, err } - dbMain, err := sqlite.NewMainDatabase(db, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMain, err := sqlite.NewMainDatabase(db, 100*time.Millisecond, 100*time.Millisecond, "mainnet", zap.NewNop()) if err != nil { return nil, err } diff --git a/stores/settings.go b/stores/settings.go index 843d8e986..ac3c4d312 100644 --- a/stores/settings.go +++ b/stores/settings.go @@ -8,7 +8,6 @@ import ( "go.sia.tech/renterd/api" sql "go.sia.tech/renterd/stores/sql" - "go.uber.org/zap" ) const ( @@ -54,162 +53,6 @@ func (s *SQLStore) UpdateS3Settings(ctx context.Context, ss api.S3Settings) erro return s.updateSetting(ctx, SettingS3, ss) } -// MigrateV2Settings migrates the settings from the old format to the new, -// migrating the existing settings over to the new types and removing the old -// settings. If a setting is not present in the database it will be set to its -// default setting. If an existing setting is not valid, the default will be -// used and a warning will get logged. -func (s *SQLStore) MigrateV2Settings(ctx context.Context) error { - return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - // escape early if none of the old settings are present - var found bool - for _, key := range []string{ - "pricepinning", - "s3authentication", - "contractset", - "redundancy", - "uploadpacking", - } { - if _, err := tx.Setting(ctx, key); err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if err == nil { - found = true - break - } - } - if !found { - return nil - } - - s.logger.Info("migrating settings...") - - // migrate gouging settings - value, err := tx.Setting(ctx, "gouging") - if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if errors.Is(err, sql.ErrSettingNotFound) { - if err := tx.UpdateSetting(ctx, SettingGouging, s.defaultSetting(SettingGouging)); err != nil { - return err - } - } - - // migrate pinning settings - value, err = tx.Setting(ctx, "pricepinning") - if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if err == nil { - var ps api.PinningSettings - if err := json.Unmarshal([]byte(value), &ps); err != nil { - s.logger.Warnw("failed to unmarshal pinning settings, using default", zap.Error(err)) - value = s.defaultSetting(SettingPricePinning) - } else if err := ps.Validate(); err != nil { - s.logger.Warnw("failed to migrate pinning settings, using default", zap.Error(err)) - value = s.defaultSetting(SettingPricePinning) - } - - // update setting and delete old value - if err := tx.UpdateSetting(ctx, SettingPricePinning, value); err != nil { - return err - } else if err := tx.DeleteSetting(ctx, "pricepinning"); err != nil { - return err - } - } - - // migrate s3 settings - value, err = tx.Setting(ctx, "s3authentication") - if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if err == nil { - var s3s api.S3Settings - if err := json.Unmarshal([]byte(value), &s3s.Authentication); err != nil { - s.logger.Warnw("failed to unmarshal S3 authentication settings, using default", zap.Error(err)) - s3s = api.DefaultS3Settings - } else if err := s3s.Validate(); err != nil { - s.logger.Warnw("failed to migrate S3 settings, using default", zap.Error(err)) - s3s = api.DefaultS3Settings - } - - // update setting and delete old value - update, _ := json.Marshal(s3s) - if err := tx.UpdateSetting(ctx, SettingS3, string(update)); err != nil { - return err - } else if err := tx.DeleteSetting(ctx, "s3authentication"); err != nil { - return err - } - } - - us := api.DefaultUploadSettings(s.network) - - // migrate contractset settings - value, err = tx.Setting(ctx, "contractset") - if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if err == nil { - var css struct { - Default string `json:"default"` - } - if err := json.Unmarshal([]byte(value), &css); err != nil { - s.logger.Warnw("failed to unmarshal contractset setting, using default", zap.Error(err)) - } else { - us.DefaultContractSet = css.Default - } - - // delete old value - if err := tx.DeleteSetting(ctx, "contractset"); err != nil { - return err - } - } - - // migrate redundancy settings - value, err = tx.Setting(ctx, "redundancy") - if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if err == nil { - var rs api.RedundancySettings - if err := json.Unmarshal([]byte(value), &rs); err != nil { - s.logger.Warnw("failed to unmarshal redundancy settings, using default", zap.Error(err)) - } else if err := rs.Validate(); err != nil { - s.logger.Warnw("failed to migrate redundancy settings, using default", zap.Error(err)) - } else { - us.Redundancy = rs - } - - // delete old value - if err := tx.DeleteSetting(ctx, "redundancy"); err != nil { - return err - } - } - - // migrate uploadpacking settings - value, err = tx.Setting(ctx, "uploadpacking") - if err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return err - } else if err == nil { - var ups api.UploadPackingSettings - if err := json.Unmarshal([]byte(value), &ups); err != nil { - s.logger.Warnw("failed to unmarshal uploadpacking settings, using default", zap.Error(err)) - } else { - us.Packing = ups - } - - // delete old value - if err := tx.DeleteSetting(ctx, "uploadpacking"); err != nil { - return err - } - } - - // update upload settings - if update, err := json.Marshal(us); err != nil { - return fmt.Errorf("failed to marshal upload settings: %w", err) - } else if err := tx.UpdateSetting(ctx, SettingUpload, string(update)); err != nil { - return err - } - - s.logger.Info("successfully migrated settings") - return nil - }) -} - func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{}) error { s.settingsMu.Lock() defer s.settingsMu.Unlock() @@ -273,7 +116,7 @@ func (s *SQLStore) defaultSetting(key string) string { b, _ := json.Marshal(api.DefaultS3Settings) return string(b) case SettingUpload: - b, _ := json.Marshal(api.DefaultUploadSettings(s.network)) + b, _ := json.Marshal(api.DefaultUploadSettings(s.network.Name)) return string(b) default: panic("unknown setting") // developer error diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index af47d7a22..12e3fba14 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -32,6 +32,8 @@ const ( type ( MainDatabase struct { + network string + db *sql.DB log *zap.SugaredLogger } @@ -43,10 +45,12 @@ type ( ) // NewMainDatabase creates a new MySQL backend. -func NewMainDatabase(db *dsql.DB, log *zap.Logger, lqd, ltd time.Duration) (*MainDatabase, error) { +func NewMainDatabase(db *dsql.DB, lqd, ltd time.Duration, network string, log *zap.Logger) (*MainDatabase, error) { log = log.Named("main") store, err := sql.NewDB(db, log, deadlockMsgs, lqd, ltd) return &MainDatabase{ + network: network, + db: store, log: log.Sugar(), }, err @@ -81,6 +85,10 @@ func (b *MainDatabase) Migrate(ctx context.Context) error { return sql.PerformMigrations(ctx, b, migrationsFs, "main", sql.MainMigrations(ctx, b, migrationsFs, b.log)) } +func (b *MainDatabase) Network() string { + return b.network +} + func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.DatabaseTx) error) error { return b.db.Transaction(ctx, func(tx sql.Tx) error { return fn(b.wrapTxn(tx)) diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go index e7ef23813..3f642d878 100644 --- a/stores/sql/mysql/metrics.go +++ b/stores/sql/mysql/metrics.go @@ -30,7 +30,7 @@ type ( var _ ssql.MetricsDatabaseTx = (*MetricsDatabaseTx)(nil) // NewMetricsDatabase creates a new MySQL backend. -func NewMetricsDatabase(db *dsql.DB, log *zap.Logger, lqd, ltd time.Duration) (*MetricsDatabase, error) { +func NewMetricsDatabase(db *dsql.DB, lqd, ltd time.Duration, log *zap.Logger) (*MetricsDatabase, error) { log = log.Named("metrics") store, err := sql.NewDB(db, log, deadlockMsgs, lqd, ltd) return &MetricsDatabase{ diff --git a/stores/sql/mysql/migrations/main/migration_00017_settings.sql b/stores/sql/mysql/migrations/main/migration_00017_settings.sql new file mode 100644 index 000000000..09a7b0592 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00017_settings.sql @@ -0,0 +1 @@ +-- placeholder diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index b8bfe2771..d6ceafe6d 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -31,6 +31,8 @@ const ( type ( MainDatabase struct { + network string + db *sql.DB log *zap.SugaredLogger } @@ -42,10 +44,12 @@ type ( ) // NewMainDatabase creates a new SQLite backend. -func NewMainDatabase(db *dsql.DB, log *zap.Logger, lqd, ltd time.Duration) (*MainDatabase, error) { +func NewMainDatabase(db *dsql.DB, lqd, ltd time.Duration, network string, log *zap.Logger) (*MainDatabase, error) { log = log.Named("main") store, err := sql.NewDB(db, log, deadlockMsgs, lqd, ltd) return &MainDatabase{ + network: network, + db: store, log: log.Sugar(), }, err @@ -80,6 +84,10 @@ func (b *MainDatabase) Migrate(ctx context.Context) error { return sql.PerformMigrations(ctx, b, migrationsFs, "main", sql.MainMigrations(ctx, b, migrationsFs, b.log)) } +func (b *MainDatabase) Network() string { + return b.network +} + func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.DatabaseTx) error) error { return b.db.Transaction(ctx, func(tx sql.Tx) error { return fn(b.wrapTxn(tx)) diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go index df912d7c7..fd46c1222 100644 --- a/stores/sql/sqlite/metrics.go +++ b/stores/sql/sqlite/metrics.go @@ -29,7 +29,7 @@ type ( var _ ssql.MetricsDatabaseTx = (*MetricsDatabaseTx)(nil) // NewSQLiteDatabase creates a new SQLite backend. -func NewMetricsDatabase(db *dsql.DB, log *zap.Logger, lqd, ltd time.Duration) (*MetricsDatabase, error) { +func NewMetricsDatabase(db *dsql.DB, lqd, ltd time.Duration, log *zap.Logger) (*MetricsDatabase, error) { log = log.Named("metrics") store, err := sql.NewDB(db, log, deadlockMsgs, lqd, ltd) return &MetricsDatabase{ diff --git a/stores/sql/sqlite/migrations/main/migration_00017_settings.sql b/stores/sql/sqlite/migrations/main/migration_00017_settings.sql new file mode 100644 index 000000000..09a7b0592 --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00017_settings.sql @@ -0,0 +1 @@ +-- placeholder diff --git a/stores/sql_test.go b/stores/sql_test.go index 48fb7a6a9..fba4b8327 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -25,6 +25,7 @@ import ( ) const ( + testNetwork = "zen" testContractSet = "test" testMimeType = "application/octet-stream" testETag = "d34db33f" @@ -97,11 +98,11 @@ func (cfg *testSQLStoreConfig) dbConnections() (sql.Database, sql.MetricsDatabas if err != nil { return nil, nil, fmt.Errorf("failed to open MySQL metrics database: %w", err) } - dbMain, err = mysql.NewMainDatabase(connMain, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMain, err = mysql.NewMainDatabase(connMain, 100*time.Millisecond, 100*time.Millisecond, testNetwork, zap.NewNop()) if err != nil { return nil, nil, fmt.Errorf("failed to create MySQL main database: %w", err) } - dbMetrics, err = mysql.NewMetricsDatabase(connMetrics, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMetrics, err = mysql.NewMetricsDatabase(connMetrics, 100*time.Millisecond, 100*time.Millisecond, zap.NewNop()) if err != nil { return nil, nil, fmt.Errorf("failed to create MySQL metrics database: %w", err) } @@ -115,11 +116,11 @@ func (cfg *testSQLStoreConfig) dbConnections() (sql.Database, sql.MetricsDatabas if err != nil { return nil, nil, fmt.Errorf("failed to open SQLite metrics database: %w", err) } - dbMain, err = sqlite.NewMainDatabase(connMain, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMain, err = sqlite.NewMainDatabase(connMain, 100*time.Millisecond, 100*time.Millisecond, testNetwork, zap.NewNop()) if err != nil { return nil, nil, fmt.Errorf("failed to create SQLite main database: %w", err) } - dbMetrics, err = sqlite.NewMetricsDatabase(connMetrics, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMetrics, err = sqlite.NewMetricsDatabase(connMetrics, 100*time.Millisecond, 100*time.Millisecond, zap.NewNop()) if err != nil { return nil, nil, fmt.Errorf("failed to create SQLite metrics database: %w", err) } @@ -133,11 +134,11 @@ func (cfg *testSQLStoreConfig) dbConnections() (sql.Database, sql.MetricsDatabas if err != nil { return nil, nil, fmt.Errorf("failed to open ephemeral SQLite metrics database: %w", err) } - dbMain, err = sqlite.NewMainDatabase(connMain, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMain, err = sqlite.NewMainDatabase(connMain, 100*time.Millisecond, 100*time.Millisecond, testNetwork, zap.NewNop()) if err != nil { return nil, nil, fmt.Errorf("failed to create ephemeral SQLite main database: %w", err) } - dbMetrics, err = sqlite.NewMetricsDatabase(connMetrics, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + dbMetrics, err = sqlite.NewMetricsDatabase(connMetrics, 100*time.Millisecond, 100*time.Millisecond, zap.NewNop()) if err != nil { return nil, nil, fmt.Errorf("failed to create ephemeral SQLite metrics database: %w", err) } From f2ba95a373ab5e2c8c2a2c181ef746cc91f17751 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 10:04:05 +0200 Subject: [PATCH 55/98] sql: add index for db_directory_id column in SQLite --- internal/sql/migrations.go | 6 ++++++ internal/worker/accounts.go | 18 +++++++++--------- .../main/migration_00018_idx_db_directory.sql | 1 + .../main/migration_00018_idx_db_directory.sql | 1 + stores/sql/sqlite/migrations/main/schema.sql | 1 + 5 files changed, 18 insertions(+), 9 deletions(-) create mode 100644 stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql create mode 100644 stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index c17473542..5f886fe1a 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -217,6 +217,12 @@ var ( return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00017_unix_ms", log) }, }, + { + ID: "00018_idx_db_directory", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00018_idx_db_directory", log) + }, + }, } } MetricsMigrations = func(ctx context.Context, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { diff --git a/internal/worker/accounts.go b/internal/worker/accounts.go index 1022075f1..f73c9f529 100644 --- a/internal/worker/accounts.go +++ b/internal/worker/accounts.go @@ -336,7 +336,7 @@ func (a *AccountMgr) refillAccounts() { defer cancel() // refill - err := a.refillAccount(rCtx, c, cs.BlockHeight, a.revisionSubmissionBuffer) + refilled, err := a.refillAccount(rCtx, c, cs.BlockHeight, a.revisionSubmissionBuffer) // determine whether to log something shouldLog := true @@ -351,7 +351,7 @@ func (a *AccountMgr) refillAccounts() { if err != nil && shouldLog { a.logger.Error("failed to refill account for host", zap.Stringer("hostKey", contract.HostKey), zap.Error(err)) - } else { + } else if refilled { a.logger.Infow("successfully refilled account for host", zap.Stringer("hostKey", contract.HostKey), zap.Error(err)) } }(c) @@ -359,7 +359,7 @@ func (a *AccountMgr) refillAccounts() { } } -func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMetadata, bh, revisionSubmissionBuffer uint64) error { +func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMetadata, bh, revisionSubmissionBuffer uint64) (bool, error) { // fetch the account account := a.Account(contract.HostKey) @@ -367,7 +367,7 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet // trying to refill the account would result in the host not returning the // revision and returning an obfuscated error if (bh + revisionSubmissionBuffer) > contract.WindowStart { - return fmt.Errorf("contract %v is too close to the proof window to be revised", contract.ID) + return false, fmt.Errorf("contract %v is too close to the proof window to be revised", contract.ID) } // check if a host is potentially cheating before refilling. @@ -382,7 +382,7 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet "drift", account.Drift.String(), ) _ = a.alerts.RegisterAlert(a.shutdownCtx, alert) - return fmt.Errorf("not refilling account since host is potentially cheating: %w", errMaxDriftExceeded) + return false, fmt.Errorf("not refilling account since host is potentially cheating: %w", errMaxDriftExceeded) } else { _ = a.alerts.DismissAlerts(a.shutdownCtx, alerts.IDForAccount(alertAccountRefillID, account.ID)) } @@ -392,7 +392,7 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet // sync the account err := a.syncer.SyncAccount(ctx, contract.ID, contract.HostKey, contract.SiamuxAddr) if err != nil { - return fmt.Errorf("failed to sync account's balance: %w", err) + return false, fmt.Errorf("failed to sync account's balance: %w", err) } // refetch the account after syncing @@ -401,15 +401,15 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet // check if refill is needed if account.Balance.Cmp(minBalance) >= 0 { - return nil + return false, nil } // fund the account err := a.funder.FundAccount(ctx, contract.ID, contract.HostKey, maxBalance) if err != nil { - return fmt.Errorf("failed to fund account: %w", err) + return false, fmt.Errorf("failed to fund account: %w", err) } - return nil + return true, nil } // WithSync syncs an accounts balance with the bus. To do so, the account is diff --git a/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql b/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql new file mode 100644 index 000000000..5757fd280 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS `idx_objects_db_directory_id` ON `objects`(`db_directory_id`); diff --git a/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql b/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql @@ -0,0 +1 @@ + diff --git a/stores/sql/sqlite/migrations/main/schema.sql b/stores/sql/sqlite/migrations/main/schema.sql index 6d8d0ee6c..af4c59654 100644 --- a/stores/sql/sqlite/migrations/main/schema.sql +++ b/stores/sql/sqlite/migrations/main/schema.sql @@ -58,6 +58,7 @@ CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); CREATE INDEX `idx_objects_size` ON `objects`(`size`); CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); CREATE INDEX `idx_objects_created_at` ON `objects`(`created_at`); +CREATE INDEX `idx_objects_db_directory_id` ON `objects`(`db_directory_id`); -- dbMultipartUpload CREATE TABLE `multipart_uploads` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`key` blob,`upload_id` text NOT NULL,`object_id` text NOT NULL,`db_bucket_id` integer NOT NULL,`mime_type` text,CONSTRAINT `fk_multipart_uploads_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`) ON DELETE CASCADE); From 7fd0986df007c715876cad7848f93a475da59422 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 10:58:15 +0200 Subject: [PATCH 56/98] bus: add explorer section to config --- README.md | 2 + api/bus.go | 8 +++ api/setting.go | 25 +++----- bus/bus.go | 20 +++--- bus/explorer.go | 81 ++++++++++++++++++++++++ bus/routes.go | 9 +-- cmd/renterd/config.go | 7 +++ cmd/renterd/node.go | 5 +- config/config.go | 9 ++- internal/bus/forex.go | 51 --------------- internal/bus/pinmanager.go | 24 ++++--- internal/bus/pinmanager_test.go | 100 +++++++++++++----------------- internal/test/e2e/cluster.go | 22 +++++-- internal/test/e2e/cluster_test.go | 4 +- 14 files changed, 215 insertions(+), 152 deletions(-) create mode 100644 bus/explorer.go delete mode 100644 internal/bus/forex.go diff --git a/README.md b/README.md index c20749935..d5b2ae3ad 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,8 @@ overview of all settings configurable through the CLI. | `S3.HostBucketBases` | Enables bucket rewriting in the router for the provided bases | - | `--s3.hostBucketBases` | `RENTERD_S3_HOST_BUCKET_BASES` | `s3.hostBucketBases` | | `S3.HostBucketEnabled` | Enables bucket rewriting in the router | - | `--s3.hostBucketEnabled` | `RENTERD_S3_HOST_BUCKET_ENABLED` | `s3.hostBucketEnabled` | | `S3.KeypairsV4 (DEPRECATED)` | V4 keypairs for S3 | - | - | - | `s3.keypairsV4` | +| `Explorer.Disable` | Disables explorer service | `false` | `--explorer.disable` | `RENTERD_EXPLORER_DISABLE` | `explorer.disable` | +| `Explorer.URL` | URL of service to retrieve data about the Sia network | `https://api.siascan.com` | `--explorer.url` | `RENTERD_EXPLORER_URL` | `explorer.url` | ### Single-Node Setup diff --git a/api/bus.go b/api/bus.go index 3b73469e3..bf417d3b4 100644 --- a/api/bus.go +++ b/api/bus.go @@ -10,6 +10,7 @@ import ( var ( ErrMarkerNotFound = errors.New("marker not found") ErrMaxFundAmountExceeded = errors.New("renewal exceeds max fund amount") + ErrExplorerDisabled = errors.New("explorer is disabled") ) type ( @@ -65,6 +66,13 @@ type ( StartTime TimeRFC3339 `json:"startTime"` Network string `json:"network"` BuildState + Explorer ExplorerState `json:"explorer"` + } + + // ExplorerState contains static information about explorer data sources. + ExplorerState struct { + Enabled bool `json:"enabled"` + URL string `json:"url,omitempty"` } ContractSetUpdateRequest struct { diff --git a/api/setting.go b/api/setting.go index 5976b00b2..cb82eeafb 100644 --- a/api/setting.go +++ b/api/setting.go @@ -54,10 +54,9 @@ var ( // configured with on startup. These values can be adjusted using the // settings API. DefaultPricePinSettings = PricePinSettings{ - Enabled: false, - Currency: "usd", - ForexEndpointURL: "https://api.siascan.com/exchange-rate/siacoin", - Threshold: 0.05, + Enabled: false, + Currency: "usd", + Threshold: 0.05, } // DefaultUploadPackingSettings define the default upload packing settings @@ -132,22 +131,18 @@ type ( MigrationSurchargeMultiplier uint64 `json:"migrationSurchargeMultiplier"` } - // PricePinSettings holds the configuration for pinning certain settings to - // a specific currency (e.g., USD). It uses a Forex API to fetch the current - // exchange rate, allowing users to set prices in USD instead of SC. + // PricePinSettings holds the configuration for pinning certain settings to a + // specific currency (e.g., USD). It uses the configured explorer to fetch + // the current exchange rate, allowing users to set prices in USD instead of + // SC. PricePinSettings struct { // Enabled can be used to either enable or temporarily disable price - // pinning. If enabled, both the currency and the Forex endpoint URL - // must be valid. + // pinning. If enabled, the currency and threshold must be valid. Enabled bool `json:"enabled"` // Currency is the external three-letter currency code. Currency string `json:"currency"` - // ForexEndpointURL is the endpoint that returns the exchange rate for - // Siacoin against the underlying currency. - ForexEndpointURL string `json:"forexEndpointURL"` - // Threshold is a percentage between 0 and 1 that determines when the // pinned settings are updated based on the exchange rate at the time. Threshold float64 `json:"threshold"` @@ -205,8 +200,8 @@ func (p Pin) IsPinned() bool { // Validate returns an error if the price pin settings are not considered valid. func (pps PricePinSettings) Validate() error { - if pps.ForexEndpointURL == "" { - return fmt.Errorf("price pin settings must have a forex endpoint URL") + if !pps.Enabled { + return nil } if pps.Currency == "" { return fmt.Errorf("price pin settings must have a currency") diff --git a/bus/bus.go b/bus/bus.go index 832b603b0..418c9eae0 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -311,6 +311,7 @@ type Bus struct { pinMgr PinManager webhooksMgr WebhooksManager cm ChainManager + e Explorer cs ChainSubscriber s Syncer w Wallet @@ -333,7 +334,7 @@ type Bus struct { } // New returns a new Bus -func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, l *zap.Logger) (_ *Bus, err error) { +func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, e Explorer, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, l *zap.Logger) (_ *Bus, err error) { l = l.Named("bus") b := &Bus{ @@ -343,6 +344,7 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa accounts: store, s: s, cm: cm, + e: e, w: w, hs: store, as: store, @@ -371,7 +373,7 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa b.sectors = ibus.NewSectorsCache() // create pin manager - b.pinMgr = ibus.NewPinManager(b.alerts, wm, store, defaultPinUpdateInterval, defaultPinRateWindow, l) + b.pinMgr = ibus.NewPinManager(b.alerts, wm, e, store, defaultPinUpdateInterval, defaultPinRateWindow, l) // create chain subscriber b.cs = ibus.NewChainSubscriber(wm, cm, store, w, announcementMaxAge, l) @@ -753,10 +755,6 @@ func (b *Bus) initSettings(ctx context.Context) error { } else if err := pps.Validate(); err != nil { // overwrite values with defaults var updates []string - if pps.ForexEndpointURL == "" { - pps.ForexEndpointURL = api.DefaultPricePinSettings.ForexEndpointURL - updates = append(updates, fmt.Sprintf("set PricePinSettings.ForexEndpointURL to %v", pps.ForexEndpointURL)) - } if pps.Currency == "" { pps.Currency = api.DefaultPricePinSettings.Currency updates = append(updates, fmt.Sprintf("set PricePinSettings.Currency to %v", pps.Currency)) @@ -774,9 +772,15 @@ func (b *Bus) initSettings(ctx context.Context) error { b.logger.Warn(fmt.Sprintf("updated price pinning settings are invalid (%v), they have been overwritten with the default settings", err)) updated, _ = json.Marshal(api.DefaultPricePinSettings) } - if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { - return err + return fmt.Errorf("failed to update setting '%v': %w", api.SettingPricePinning, err) + } + } else if pps.Enabled && !b.e.Enabled() { + b.logger.Warn("pinning can not be enabled, explorer is disabled, pinning will be disabled") + pps.Enabled = false + updated, _ := json.Marshal(pps) + if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { + return fmt.Errorf("failed to update setting '%v': %w", api.SettingPricePinning, err) } } diff --git a/bus/explorer.go b/bus/explorer.go new file mode 100644 index 000000000..2040403b1 --- /dev/null +++ b/bus/explorer.go @@ -0,0 +1,81 @@ +package bus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "go.sia.tech/renterd/api" +) + +type ( + // An explorer retrieves data about the Sia network from an external source. + Explorer interface { + Enabled() bool + BaseURL() string + SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) + } + + explorer struct { + enabled bool + url string + } +) + +// NewExplorer returns a new Explorer. +func NewExplorer(url string, enabled bool) Explorer { + return &explorer{ + enabled: enabled, + url: url, + } +} + +// BaseURL returns the base URL of the Explorer. +func (e *explorer) BaseURL() string { + return e.url +} + +// Enabled returns true if the explorer is enabled. +func (e *explorer) Enabled() bool { + return e.enabled +} + +// SiacoinExchangeRate returns the exchange rate for the given currency. +func (e *explorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { + // return early if the explorer is disabled + if !e.enabled { + return 0, api.ErrExplorerDisabled + } + + // create request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/exchange-rate/siacoin/%s", e.url, currency), http.NoBody) + if err != nil { + return 0, fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Accept", "application/json") + + // create http client + resp, err := http.DefaultClient.Do(req) + if err != nil { + return 0, fmt.Errorf("failed to send request: %w", err) + } + defer resp.Body.Close() + + // check status code + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + var errorMessage string + if err := json.NewDecoder(io.LimitReader(resp.Body, 1024)).Decode(&errorMessage); err != nil { + return 0, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + return 0, errors.New(errorMessage) + } + + // decode exchange rate + if err := json.NewDecoder(resp.Body).Decode(&rate); err != nil { + return 0, fmt.Errorf("failed to decode response: %w", err) + } + return +} diff --git a/bus/routes.go b/bus/routes.go index d8ffd2997..4df4193a3 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -17,7 +17,6 @@ import ( rhp3 "go.sia.tech/renterd/internal/rhp/v3" - ibus "go.sia.tech/renterd/internal/bus" "go.sia.tech/renterd/internal/gouging" rhp2 "go.sia.tech/renterd/internal/rhp/v2" @@ -1722,11 +1721,9 @@ func (b *Bus) settingKeyHandlerPUT(jc jape.Context) { } else if err := pps.Validate(); err != nil { jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid settings, error: %v", err), http.StatusBadRequest) return - } else if pps.Enabled { - if _, err := ibus.NewForexClient(pps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), pps.Currency); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) - return - } + } else if pps.Enabled && !b.e.Enabled() { + jc.Error(fmt.Errorf("pinning can not be enabled, %w", api.ErrExplorerDisabled), http.StatusBadRequest) + return } b.pinMgr.TriggerUpdate() } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index e1200f121..608c2a62a 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -68,6 +68,9 @@ func defaultConfig() config.Config { MetricsDatabase: "renterd_metrics", }, }, + Explorer: config.ExplorerData{ + URL: "https://api.siascan.com", + }, Log: config.Log{ Path: "", // deprecated. included for compatibility. Level: "", @@ -340,6 +343,10 @@ func parseCLIFlags(cfg *config.Config) { flag.StringVar(&hostBasesStr, "s3.hostBases", "", "Enables bucket rewriting in the router for specific hosts provided via comma-separated list (overrides with RENTERD_S3_HOST_BUCKET_BASES)") flag.BoolVar(&cfg.S3.HostBucketEnabled, "s3.hostBucketEnabled", cfg.S3.HostBucketEnabled, "Enables bucket rewriting in the router for all hosts (overrides with RENTERD_S3_HOST_BUCKET_ENABLED)") + // explorer + flag.StringVar(&cfg.Explorer.URL, "explorer.url", cfg.Explorer.URL, "URL of service to retrieve data about the Sia network (overrides with RENTERD_EXPLORER_URL)") + flag.BoolVar(&cfg.Explorer.Disable, "explorer.disable", cfg.Explorer.Disable, "Disables explorer service (overrides with RENTERD_EXPLORER_DISABLE)") + // custom usage flag.Usage = func() { log.Print(usageHeader) diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index a9758439c..11968ff52 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -311,6 +311,9 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network } cm := chain.NewManager(store, state) + // create explorer + e := bus.NewExplorer(cfg.Explorer.URL, !cfg.Explorer.Disable) + // create wallet w, err := wallet.NewSingleAddressWallet(pk, cm, sqlStore, wallet.WithReservationDuration(cfg.Bus.UsedUTXOExpiry)) if err != nil { @@ -382,7 +385,7 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network // create bus announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, e, s, w, sqlStore, announcementMaxAgeHours, logger) if err != nil { return nil, nil, fmt.Errorf("failed to create bus: %w", err) } diff --git a/config/config.go b/config/config.go index 6755d3869..82828c85e 100644 --- a/config/config.go +++ b/config/config.go @@ -24,7 +24,14 @@ type ( Worker Worker `yaml:"worker,omitempty"` S3 S3 `yaml:"s3,omitempty"` - Database Database `yaml:"database,omitempty"` + Database Database `yaml:"database,omitempty"` + Explorer ExplorerData `yaml:"explorer,omitempty"` + } + + // ExplorerData contains the configuration for using an external explorer. + ExplorerData struct { + Disable bool `yaml:"disable,omitempty"` + URL string `yaml:"url,omitempty"` } // HTTP contains the configuration for the HTTP server. diff --git a/internal/bus/forex.go b/internal/bus/forex.go deleted file mode 100644 index b6544b911..000000000 --- a/internal/bus/forex.go +++ /dev/null @@ -1,51 +0,0 @@ -package bus - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" -) - -type ( - client struct { - url string - } -) - -func NewForexClient(url string) *client { - return &client{url: url} -} - -func (f *client) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { - // create request - req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/%s", f.url, currency), http.NoBody) - if err != nil { - return 0, fmt.Errorf("failed to create request: %w", err) - } - req.Header.Set("Accept", "application/json") - - // create http client - resp, err := http.DefaultClient.Do(req) - if err != nil { - return 0, fmt.Errorf("failed to send request: %w", err) - } - defer resp.Body.Close() - - // check status code - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - var errorMessage string - if err := json.NewDecoder(io.LimitReader(resp.Body, 1024)).Decode(&errorMessage); err != nil { - return 0, fmt.Errorf("unexpected status code: %d", resp.StatusCode) - } - return 0, errors.New(errorMessage) - } - - // decode exchange rate - if err := json.NewDecoder(resp.Body).Decode(&rate); err != nil { - return 0, fmt.Errorf("failed to decode response: %w", err) - } - return -} diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 32e283812..38056b39d 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -24,11 +24,17 @@ type ( UpdateAutopilot(ctx context.Context, ap api.Autopilot) error UpdateSetting(ctx context.Context, key, value string) error } + + Explorer interface { + Enabled() bool + SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) + } ) type ( pinManager struct { a alerts.Alerter + e Explorer s Store broadcaster webhooks.Broadcaster @@ -50,9 +56,10 @@ type ( // NewPinManager returns a new PinManager, responsible for pinning prices to a // fixed value in an underlying currency. The returned pin manager is already // running and can be stopped by calling Shutdown. -func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, s Store, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { +func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, e Explorer, s Store, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { pm := &pinManager{ a: alerts, + e: e, s: s, broadcaster: broadcaster, @@ -66,11 +73,14 @@ func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, s St } // start the pin manager - pm.wg.Add(1) - go func() { - pm.run() - pm.wg.Done() - }() + if e.Enabled() { + pm.wg.Add(1) + go func() { + pm.run() + pm.wg.Done() + }() + } + return pm } @@ -334,7 +344,7 @@ func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { } // fetch exchange rate - rate, err := NewForexClient(settings.ForexEndpointURL).SiacoinExchangeRate(ctx, settings.Currency) + rate, err := pm.e.SiacoinExchangeRate(ctx, settings.Currency) if err != nil { return fmt.Errorf("failed to fetch exchange rate for '%s': %w", settings.Currency, err) } else if rate <= 0 { diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index e5158836d..77d74c55e 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -4,8 +4,6 @@ import ( "context" "encoding/json" "errors" - "net/http" - "net/http/httptest" "reflect" "sync" "testing" @@ -71,42 +69,36 @@ func (meb *mockBroadcaster) BroadcastAction(ctx context.Context, e webhooks.Even return nil } -type mockForexAPI struct { - s *httptest.Server - +type mockExplorer struct { mu sync.Mutex rate float64 unreachable bool } -func newTestForexAPI() *mockForexAPI { - api := &mockForexAPI{rate: 1} - api.s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - api.mu.Lock() - defer api.mu.Unlock() - if api.unreachable { - w.WriteHeader(http.StatusInternalServerError) - return - } - json.NewEncoder(w).Encode(api.rate) - })) - return api +func (e *mockExplorer) Enabled() bool { + return true } -func (api *mockForexAPI) Close() { - api.s.Close() +func (e *mockExplorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { + e.mu.Lock() + defer e.mu.Unlock() + + if e.unreachable { + return 0, errors.New("unreachable") + } + return e.rate, nil } -func (api *mockForexAPI) setRate(rate float64) { - api.mu.Lock() - defer api.mu.Unlock() - api.rate = rate +func (e *mockExplorer) setRate(rate float64) { + e.mu.Lock() + defer e.mu.Unlock() + e.rate = rate } -func (api *mockForexAPI) setUnreachable(unreachable bool) { - api.mu.Lock() - defer api.mu.Unlock() - api.unreachable = unreachable +func (e *mockExplorer) setUnreachable(unreachable bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.unreachable = unreachable } type mockPinStore struct { @@ -186,16 +178,13 @@ func (ms *mockPinStore) UpdateAutopilot(ctx context.Context, autopilot api.Autop func TestPinManager(t *testing.T) { // mock dependencies - ms := newTestStore() - eb := &mockBroadcaster{} a := &mockAlerter{} - - // mock forex api - forex := newTestForexAPI() - defer forex.Close() + b := &mockBroadcaster{} + e := &mockExplorer{rate: 1} + s := newTestStore() // create a pinmanager - pm := NewPinManager(a, eb, ms, testUpdateInterval, time.Minute, zap.NewNop()) + pm := NewPinManager(a, b, e, s, testUpdateInterval, time.Minute, zap.NewNop()) defer func() { if err := pm.Shutdown(context.Background()); err != nil { t.Fatal(err) @@ -220,8 +209,7 @@ func TestPinManager(t *testing.T) { pps.Enabled = true pps.Currency = "usd" pps.Threshold = 0.5 - pps.ForexEndpointURL = forex.s.URL - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) // assert price manager is running now if cnt := len(rates()); cnt < 1 { @@ -229,31 +217,31 @@ func TestPinManager(t *testing.T) { } // update exchange rate and fetch current gouging settings - forex.setRate(2.5) - gs := ms.gougingSettings() + e.setRate(2.5) + gs := s.gougingSettings() // configure all pins but disable them for now pps.GougingSettingsPins.MaxDownload = api.Pin{Value: 3, Pinned: false} pps.GougingSettingsPins.MaxStorage = api.Pin{Value: 3, Pinned: false} pps.GougingSettingsPins.MaxUpload = api.Pin{Value: 3, Pinned: false} - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) // assert gouging settings are unchanged - if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + if gss := s.gougingSettings(); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } // enable the max download pin, with the threshold at 0.5 it should remain unchanged pps.GougingSettingsPins.MaxDownload.Pinned = true - ms.updatPinnedSettings(pps) - if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + s.updatPinnedSettings(pps) + if gss := s.gougingSettings(); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } // lower the threshold, gouging settings should be updated pps.Threshold = 0.05 - ms.updatPinnedSettings(pps) - if gss := ms.gougingSettings(); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { + s.updatPinnedSettings(pps) + if gss := s.gougingSettings(); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) } @@ -261,20 +249,20 @@ func TestPinManager(t *testing.T) { pps.GougingSettingsPins.MaxDownload.Pinned = true pps.GougingSettingsPins.MaxStorage.Pinned = true pps.GougingSettingsPins.MaxUpload.Pinned = true - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) // assert they're all updated - if gss := ms.gougingSettings(); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || + if gss := s.gougingSettings(); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || gss.MaxStoragePrice.Equals(gs.MaxStoragePrice) || gss.MaxUploadPrice.Equals(gs.MaxUploadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss, gs) } // increase rate so average isn't catching up to us - forex.setRate(3) + e.setRate(3) // fetch autopilot - ap, _ := ms.Autopilot(context.Background(), testAutopilotID) + ap, _ := s.Autopilot(context.Background(), testAutopilotID) // add autopilot pin, but disable it pins := api.AutopilotPins{ @@ -284,38 +272,38 @@ func TestPinManager(t *testing.T) { }, } pps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) // assert autopilot was not updated - if app, _ := ms.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { + if app, _ := s.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { t.Fatalf("expected autopilot to not be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) } // enable the pin pins.Allowance.Pinned = true pps.Autopilots[testAutopilotID] = pins - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) // assert autopilot was updated - if app, _ := ms.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { + if app, _ := s.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { t.Fatalf("expected autopilot to be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) } // make forex API return an error - forex.setUnreachable(true) + e.setUnreachable(true) // assert alert was registered - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) res, _ := a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) == 0 { t.Fatalf("expected 1 alert, got %d", len(a.alerts)) } // make forex API return a valid response - forex.setUnreachable(false) + e.setUnreachable(false) // assert alert was dismissed - ms.updatPinnedSettings(pps) + s.updatPinnedSettings(pps) res, _ = a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) != 0 { t.Fatalf("expected 0 alerts, got %d", len(a.alerts)) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 133868029..b7f554550 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -93,6 +93,11 @@ type dbConfig struct { RetryTxIntervals []time.Duration } +type explorerConfig struct { + URL string + Disable bool +} + func (tc *TestCluster) Accounts() []api.Account { tc.tt.Helper() accounts, err := tc.Worker.Accounts(context.Background()) @@ -264,7 +269,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if opts.walletKey != nil { wk = *opts.walletKey } - busCfg, workerCfg, apCfg, dbCfg := testBusCfg(), testWorkerCfg(), testApCfg(), testDBCfg() + busCfg, workerCfg, apCfg, dbCfg, explorerCfg := testBusCfg(), testWorkerCfg(), testApCfg(), testDBCfg(), testExplorerCfg() if opts.busCfg != nil { busCfg = *opts.busCfg } @@ -364,7 +369,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { // Create bus. busDir := filepath.Join(dir, "bus") - b, bShutdownFn, cm, bs, err := newTestBus(ctx, busDir, busCfg, dbCfg, wk, logger) + b, bShutdownFn, cm, bs, err := newTestBus(ctx, busDir, busCfg, dbCfg, explorerCfg, wk, logger) tt.OK(err) busAuth := jape.BasicAuth(busPassword) @@ -537,7 +542,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { return cluster } -func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, pk types.PrivateKey, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, *chain.Manager, bus.Store, error) { +func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, cfgExplorer explorerConfig, pk types.PrivateKey, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, *chain.Manager, bus.Store, error) { // create store alertsMgr := alerts.NewManager() storeCfg, err := buildStoreConfig(alertsMgr, dir, cfg.SlabBufferCompletionThreshold, cfgDb, pk, logger) @@ -580,6 +585,9 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, } cm := chain.NewManager(store, state) + // create explorer + e := bus.NewExplorer(cfgExplorer.URL, !cfgExplorer.Disable) + // create wallet w, err := wallet.NewSingleAddressWallet(pk, cm, sqlStore, wallet.WithReservationDuration(cfg.UsedUTXOExpiry)) if err != nil { @@ -631,7 +639,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create bus announcementMaxAgeHours := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, e, s, w, sqlStore, announcementMaxAgeHours, logger) if err != nil { return nil, nil, nil, nil, err } @@ -1084,6 +1092,12 @@ func testDBCfg() dbConfig { } } +func testExplorerCfg() explorerConfig { + return explorerConfig{ + Disable: true, + } +} + func testWorkerCfg() config.Worker { return config.Worker{ AccountsRefillInterval: time.Second, diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 12c839a82..7cb589ad4 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -189,9 +189,7 @@ func TestNewTestCluster(t *testing.T) { // PricePinningSettings should have default values pps, err := b.PricePinningSettings(context.Background()) tt.OK(err) - if pps.ForexEndpointURL == "" { - t.Fatal("expected default value for ForexEndpointURL") - } else if pps.Currency == "" { + if pps.Currency == "" { t.Fatal("expected default value for Currency") } else if pps.Threshold == 0 { t.Fatal("expected default value for Threshold") From 829e6bf4fd3d51c688547373ebf23dbf674cf4d3 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 12:04:12 +0200 Subject: [PATCH 57/98] sql: fix migration --- internal/sql/migrations.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index aeedc3d54..b6bc8b435 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -226,7 +226,7 @@ var ( log.Infof("performing %s migration '00018_settings'", dbIdentifier) // fetch all settings - rows, err := tx.Query(ctx, "SELECT key, value FROM settings") + rows, err := tx.Query(ctx, "SELECT `key`, value FROM settings") if err != nil { return fmt.Errorf("failed to fetch settings: %v", err) } @@ -297,9 +297,9 @@ var ( } } else { log.Warnf("s3authentication settings are not being migrated, err: %v", err) - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "s3authentication"); err != nil { - log.Warnf("failed to delete s3authentication settings: %v", err) - } + } + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "s3authentication"); err != nil { + log.Warnf("failed to delete s3authentication settings: %v", err) } } else { log.Warn("no s3authentication setting found") From 8aa18a03b504b9d3cdc899b61a83f1f5b51fbf9d Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 12:30:38 +0200 Subject: [PATCH 58/98] db: update migration --- internal/sql/migrations.go | 42 ++++++++++++++++++++++---------------- stores/sql/mysql/main.go | 5 +++++ stores/sql/sqlite/main.go | 5 +++++ 3 files changed, 34 insertions(+), 18 deletions(-) diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index b6bc8b435..12e21cb4b 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "strings" - "time" "unicode/utf8" "go.sia.tech/renterd/api" @@ -31,6 +30,7 @@ type ( MainMigrator interface { Migrator MakeDirsForPath(ctx context.Context, tx Tx, path string) (int64, error) + UpdateSetting(ctx context.Context, tx Tx, key, value string) error } ) @@ -236,7 +236,6 @@ var ( for rows.Next() { var k, v string if err := rows.Scan(&k, &v); err != nil { - _ = rows.Close() return fmt.Errorf("failed to scan setting: %v", err) } settings[k] = v @@ -266,17 +265,18 @@ var ( if err == nil { err = ps.Validate() } - if err != nil { - log.Warnf("pricepinning settings are not being migrated, err: %v", err) - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "pricepinning"); err != nil { - return fmt.Errorf("failed to delete pricepinning settings: %v", err) - } - } else { - b, _ := json.Marshal(ps) - if _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value)", - time.Now(), "pinned", string(b)); err != nil { + if err == nil { + updated, _ := json.Marshal(ps) + if err := m.UpdateSetting(ctx, tx, "pinned", string(updated)); err != nil { return fmt.Errorf("failed to insert pinned settings: %v", err) } + } else { + log.Warnf("pricepinning settings are not being migrated, err: %v", err) + } + + // always delete because it got renamed + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "pricepinning"); err != nil { + log.Warnf("failed to delete pricepinning settings: %v", err) } } else { log.Warn("no pricepinning settings found") @@ -290,14 +290,15 @@ var ( err = s3s.Validate() } if err == nil { - b, _ := json.Marshal(s3s) - if _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value)", - time.Now(), "s3", string(b)); err != nil { + updated, _ := json.Marshal(s3s) + if err := m.UpdateSetting(ctx, tx, "s3", string(updated)); err != nil { return fmt.Errorf("failed to insert s3 settings: %v", err) } } else { log.Warnf("s3authentication settings are not being migrated, err: %v", err) } + + // always delete because it got renamed if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "s3authentication"); err != nil { log.Warnf("failed to delete s3authentication settings: %v", err) } @@ -317,6 +318,8 @@ var ( } else { us.DefaultContractSet = css.Default } + + // always delete because it got replaced if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "contractset"); err != nil { return err } @@ -329,6 +332,8 @@ var ( } else { us.Packing = ups } + + // always delete because it got replaced if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "uploadpacking"); err != nil { return err } @@ -345,6 +350,8 @@ var ( } else { us.Redundancy = rs } + + // always delete because it got replaced if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "redundancy"); err != nil { return err } @@ -355,10 +362,9 @@ var ( log.Warnf("upload settings are not being migrated, err: %v", err) return err // developer error } else { - b, _ := json.Marshal(us) - if _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value)", - time.Now(), "upload", string(b)); err != nil { - return fmt.Errorf("failed to insert s3 settings: %v", err) + updated, _ := json.Marshal(us) + if err := m.UpdateSetting(ctx, tx, "upload", string(updated)); err != nil { + return fmt.Errorf("failed to insert upload settings: %v", err) } } diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 457db78b7..71625a343 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -87,6 +87,11 @@ func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.Database }) } +func (b *MainDatabase) UpdateSetting(ctx context.Context, tx sql.Tx, key, value string) error { + mtx := b.wrapTxn(tx) + return mtx.UpdateSetting(ctx, key, value) +} + func (b *MainDatabase) Version(ctx context.Context) (string, string, error) { return version(ctx, b.db) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 7adaaaeab..f7e839a4a 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -86,6 +86,11 @@ func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.Database }) } +func (b *MainDatabase) UpdateSetting(ctx context.Context, tx sql.Tx, key, value string) error { + mtx := b.wrapTxn(tx) + return mtx.UpdateSetting(ctx, key, value) +} + func (b *MainDatabase) Version(ctx context.Context) (string, string, error) { return version(ctx, b.db) } From b47b078a9f964bb1008fbca7b65205e30052af09 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 13:29:24 +0200 Subject: [PATCH 59/98] bus: remove Enabled from pinned settings --- api/setting.go | 24 +++++++++++++++----- bus/bus.go | 9 ++------ bus/routes.go | 6 ++++- internal/bus/pinmanager.go | 4 ++-- internal/bus/pinmanager_test.go | 39 +++++++++++++++++++++++---------- 5 files changed, 54 insertions(+), 28 deletions(-) diff --git a/api/setting.go b/api/setting.go index cb82eeafb..7e472cb66 100644 --- a/api/setting.go +++ b/api/setting.go @@ -54,7 +54,6 @@ var ( // configured with on startup. These values can be adjusted using the // settings API. DefaultPricePinSettings = PricePinSettings{ - Enabled: false, Currency: "usd", Threshold: 0.05, } @@ -136,10 +135,6 @@ type ( // the current exchange rate, allowing users to set prices in USD instead of // SC. PricePinSettings struct { - // Enabled can be used to either enable or temporarily disable price - // pinning. If enabled, the currency and threshold must be valid. - Enabled bool `json:"enabled"` - // Currency is the external three-letter currency code. Currency string `json:"currency"` @@ -198,9 +193,26 @@ func (p Pin) IsPinned() bool { return p.Pinned && p.Value > 0 } +// Enabled returns true if any pins are enabled. +func (pps PricePinSettings) Enabled() bool { + if pps.GougingSettingsPins.MaxDownload.Pinned || + pps.GougingSettingsPins.MaxStorage.Pinned || + pps.GougingSettingsPins.MaxUpload.Pinned { + return true + } + + for _, pin := range pps.Autopilots { + if pin.Allowance.Pinned { + return true + } + } + + return false +} + // Validate returns an error if the price pin settings are not considered valid. func (pps PricePinSettings) Validate() error { - if !pps.Enabled { + if !pps.Enabled() { return nil } if pps.Currency == "" { diff --git a/bus/bus.go b/bus/bus.go index 418c9eae0..45627408e 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -775,13 +775,8 @@ func (b *Bus) initSettings(ctx context.Context) error { if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { return fmt.Errorf("failed to update setting '%v': %w", api.SettingPricePinning, err) } - } else if pps.Enabled && !b.e.Enabled() { - b.logger.Warn("pinning can not be enabled, explorer is disabled, pinning will be disabled") - pps.Enabled = false - updated, _ := json.Marshal(pps) - if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { - return fmt.Errorf("failed to update setting '%v': %w", api.SettingPricePinning, err) - } + } else if pps.Enabled() && !b.e.Enabled() { + return fmt.Errorf("price pinning can not be enabled, %w", api.ErrExplorerDisabled) } return nil diff --git a/bus/routes.go b/bus/routes.go index 4df4193a3..15dee0850 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1721,7 +1721,7 @@ func (b *Bus) settingKeyHandlerPUT(jc jape.Context) { } else if err := pps.Validate(); err != nil { jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid settings, error: %v", err), http.StatusBadRequest) return - } else if pps.Enabled && !b.e.Enabled() { + } else if pps.Enabled() && !b.e.Enabled() { jc.Error(fmt.Errorf("pinning can not be enabled, %w", api.ErrExplorerDisabled), http.StatusBadRequest) return } @@ -2051,6 +2051,10 @@ func (b *Bus) stateHandlerGET(jc jape.Context) { OS: runtime.GOOS, BuildTime: api.TimeRFC3339(build.BuildTime()), }, + Explorer: api.ExplorerState{ + Enabled: b.e.Enabled(), + URL: b.e.BaseURL(), + }, Network: b.cm.TipState().Network.Name, }) } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 38056b39d..97e139bd5 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -338,8 +338,8 @@ func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { return nil } else if err != nil { return fmt.Errorf("failed to fetch pinned settings: %w", err) - } else if !settings.Enabled { - pm.logger.Debug("price pinning is disabled, skipping price update") + } else if !settings.Enabled() { + pm.logger.Debug("no pinned settings, skipping price update") return nil } diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 77d74c55e..4154f64a9 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "errors" + "os" "reflect" "sync" "testing" @@ -16,6 +17,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) const ( @@ -176,6 +178,19 @@ func (ms *mockPinStore) UpdateAutopilot(ctx context.Context, autopilot api.Autop return nil } +func newTestLoggerCustom(level zapcore.Level) *zap.Logger { + config := zap.NewProductionEncoderConfig() + config.EncodeTime = zapcore.RFC3339TimeEncoder + config.EncodeLevel = zapcore.CapitalColorLevelEncoder + config.StacktraceKey = "" + consoleEncoder := zapcore.NewConsoleEncoder(config) + + return zap.New( + zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), level), + zap.AddCaller(), + zap.AddStacktrace(level), + ) +} func TestPinManager(t *testing.T) { // mock dependencies a := &mockAlerter{} @@ -184,7 +199,8 @@ func TestPinManager(t *testing.T) { s := newTestStore() // create a pinmanager - pm := NewPinManager(a, b, e, s, testUpdateInterval, time.Minute, zap.NewNop()) + + pm := NewPinManager(a, b, e, s, testUpdateInterval, time.Minute, newTestLoggerCustom(zap.DebugLevel)) defer func() { if err := pm.Shutdown(context.Background()); err != nil { t.Fatal(err) @@ -206,18 +222,11 @@ func TestPinManager(t *testing.T) { // enable price pinning pps := api.DefaultPricePinSettings - pps.Enabled = true pps.Currency = "usd" pps.Threshold = 0.5 s.updatPinnedSettings(pps) - // assert price manager is running now - if cnt := len(rates()); cnt < 1 { - t.Fatal("expected at least one rate") - } - // update exchange rate and fetch current gouging settings - e.setRate(2.5) gs := s.gougingSettings() // configure all pins but disable them for now @@ -231,9 +240,15 @@ func TestPinManager(t *testing.T) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } - // enable the max download pin, with the threshold at 0.5 it should remain unchanged + // enable the max download pin pps.GougingSettingsPins.MaxDownload.Pinned = true s.updatPinnedSettings(pps) + + // adjust the rate + e.setRate(1.5) + time.Sleep(2 * testUpdateInterval) + + // at threshold of .5 the prices should not be updated if gss := s.gougingSettings(); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } @@ -241,7 +256,7 @@ func TestPinManager(t *testing.T) { // lower the threshold, gouging settings should be updated pps.Threshold = 0.05 s.updatPinnedSettings(pps) - if gss := s.gougingSettings(); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { + if gss := s.gougingSettings(); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) } @@ -289,7 +304,7 @@ func TestPinManager(t *testing.T) { t.Fatalf("expected autopilot to be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) } - // make forex API return an error + // make explorer return an error e.setUnreachable(true) // assert alert was registered @@ -299,7 +314,7 @@ func TestPinManager(t *testing.T) { t.Fatalf("expected 1 alert, got %d", len(a.alerts)) } - // make forex API return a valid response + // make explorer return a valid response e.setUnreachable(false) // assert alert was dismissed From 8de033dd9cff4d52db4f2ab71d4519f8ea5d72c0 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 13:46:03 +0200 Subject: [PATCH 60/98] bus: move explorer to internal --- bus/bus.go | 16 ++++++++++++---- bus/routes.go | 6 +++--- cmd/renterd/config.go | 9 +++++++++ cmd/renterd/node.go | 9 +++++---- {bus => internal/bus}/explorer.go | 12 +++++------- internal/bus/pinmanager.go | 5 ----- internal/bus/pinmanager_test.go | 4 ++++ internal/test/e2e/cluster.go | 9 +++++---- 8 files changed, 43 insertions(+), 27 deletions(-) rename {bus => internal/bus}/explorer.go (92%) diff --git a/bus/bus.go b/bus/bus.go index 45627408e..92b629bfd 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -101,6 +101,11 @@ type ( Shutdown(context.Context) error } + Explorer interface { + Enabled() bool + BaseURL() string + } + // A TransactionPool can validate and relay unconfirmed transactions. TransactionPool interface { AcceptTransactionSet(txns []types.Transaction) error @@ -311,7 +316,6 @@ type Bus struct { pinMgr PinManager webhooksMgr WebhooksManager cm ChainManager - e Explorer cs ChainSubscriber s Syncer w Wallet @@ -327,6 +331,7 @@ type Bus struct { rhp3 *rhp3.Client contractLocker ContractLocker + explorer Explorer sectors UploadingSectorsCache walletMetricsRecorder WalletMetricsRecorder @@ -334,7 +339,7 @@ type Bus struct { } // New returns a new Bus -func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, e Explorer, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, l *zap.Logger) (_ *Bus, err error) { +func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, explorerURL string, l *zap.Logger) (_ *Bus, err error) { l = l.Named("bus") b := &Bus{ @@ -344,7 +349,6 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa accounts: store, s: s, cm: cm, - e: e, w: w, hs: store, as: store, @@ -369,6 +373,10 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa // create contract locker b.contractLocker = ibus.NewContractLocker() + // create explorer + e := ibus.NewExplorer(explorerURL) + b.explorer = e + // create sectors cache b.sectors = ibus.NewSectorsCache() @@ -775,7 +783,7 @@ func (b *Bus) initSettings(ctx context.Context) error { if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { return fmt.Errorf("failed to update setting '%v': %w", api.SettingPricePinning, err) } - } else if pps.Enabled() && !b.e.Enabled() { + } else if pps.Enabled() && !b.explorer.Enabled() { return fmt.Errorf("price pinning can not be enabled, %w", api.ErrExplorerDisabled) } diff --git a/bus/routes.go b/bus/routes.go index 15dee0850..0db0e49c0 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1721,7 +1721,7 @@ func (b *Bus) settingKeyHandlerPUT(jc jape.Context) { } else if err := pps.Validate(); err != nil { jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid settings, error: %v", err), http.StatusBadRequest) return - } else if pps.Enabled() && !b.e.Enabled() { + } else if pps.Enabled() && !b.explorer.Enabled() { jc.Error(fmt.Errorf("pinning can not be enabled, %w", api.ErrExplorerDisabled), http.StatusBadRequest) return } @@ -2052,8 +2052,8 @@ func (b *Bus) stateHandlerGET(jc jape.Context) { BuildTime: api.TimeRFC3339(build.BuildTime()), }, Explorer: api.ExplorerState{ - Enabled: b.e.Enabled(), - URL: b.e.BaseURL(), + Enabled: b.explorer.Enabled(), + URL: b.explorer.BaseURL(), }, Network: b.cm.TipState().Network.Name, }) diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 608c2a62a..3db4edd24 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -171,6 +171,12 @@ func loadConfig() (cfg config.Config, network *consensus.Network, genesis types. return } + // check explorer + if !cfg.Explorer.Disable && cfg.Explorer.URL == "" { + err = fmt.Errorf("explorer is disabled but no URL is set") + return + } + return } @@ -419,6 +425,9 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_WORKER_API_PASSWORD", &workerRemotePassStr) parseEnvVar("RENTERD_S3_KEYPAIRS_V4", &keyPairsV4) + + parseEnvVar("RENTERD_EXPLORER_DISABLE", &cfg.Explorer.Disable) + parseEnvVar("RENTERD_EXPLORER_URL", &cfg.Explorer.URL) } // readPasswordInput reads a password from stdin. diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 11968ff52..d1dda7e17 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -311,9 +311,6 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network } cm := chain.NewManager(store, state) - // create explorer - e := bus.NewExplorer(cfg.Explorer.URL, !cfg.Explorer.Disable) - // create wallet w, err := wallet.NewSingleAddressWallet(pk, cm, sqlStore, wallet.WithReservationDuration(cfg.Bus.UsedUTXOExpiry)) if err != nil { @@ -384,8 +381,12 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network masterKey := blake2b.Sum256(append([]byte("worker"), pk...)) // create bus + var explorerURL string + if !cfg.Explorer.Disable { + explorerURL = cfg.Explorer.URL + } announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, e, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, logger) if err != nil { return nil, nil, fmt.Errorf("failed to create bus: %w", err) } diff --git a/bus/explorer.go b/internal/bus/explorer.go similarity index 92% rename from bus/explorer.go rename to internal/bus/explorer.go index 2040403b1..fcd213870 100644 --- a/bus/explorer.go +++ b/internal/bus/explorer.go @@ -20,16 +20,14 @@ type ( } explorer struct { - enabled bool - url string + url string } ) // NewExplorer returns a new Explorer. -func NewExplorer(url string, enabled bool) Explorer { +func NewExplorer(url string) Explorer { return &explorer{ - enabled: enabled, - url: url, + url: url, } } @@ -40,13 +38,13 @@ func (e *explorer) BaseURL() string { // Enabled returns true if the explorer is enabled. func (e *explorer) Enabled() bool { - return e.enabled + return e.url != "" } // SiacoinExchangeRate returns the exchange rate for the given currency. func (e *explorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { // return early if the explorer is disabled - if !e.enabled { + if !e.Enabled() { return 0, api.ErrExplorerDisabled } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 97e139bd5..1db2fd9a7 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -24,11 +24,6 @@ type ( UpdateAutopilot(ctx context.Context, ap api.Autopilot) error UpdateSetting(ctx context.Context, key, value string) error } - - Explorer interface { - Enabled() bool - SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) - } ) type ( diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 4154f64a9..b2c7964cc 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -81,6 +81,10 @@ func (e *mockExplorer) Enabled() bool { return true } +func (e *mockExplorer) BaseURL() string { + return "" +} + func (e *mockExplorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { e.mu.Lock() defer e.mu.Unlock() diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index b7f554550..9df48ff11 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -585,9 +585,6 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, } cm := chain.NewManager(store, state) - // create explorer - e := bus.NewExplorer(cfgExplorer.URL, !cfgExplorer.Disable) - // create wallet w, err := wallet.NewSingleAddressWallet(pk, cm, sqlStore, wallet.WithReservationDuration(cfg.UsedUTXOExpiry)) if err != nil { @@ -639,7 +636,11 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create bus announcementMaxAgeHours := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, e, s, w, sqlStore, announcementMaxAgeHours, logger) + var explorerURL string + if cfgExplorer.URL != "" { + explorerURL = cfgExplorer.URL + } + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, logger) if err != nil { return nil, nil, nil, nil, err } From 6b8d2690e265bae5e5e091f39337eba3a13b49e1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 14:38:18 +0200 Subject: [PATCH 61/98] bus: remove Explorer interface --- bus/bus.go | 7 +------ internal/bus/explorer.go | 19 ++++++------------- internal/bus/pinmanager.go | 12 ++++++++++-- 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 92b629bfd..1371ea067 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -101,11 +101,6 @@ type ( Shutdown(context.Context) error } - Explorer interface { - Enabled() bool - BaseURL() string - } - // A TransactionPool can validate and relay unconfirmed transactions. TransactionPool interface { AcceptTransactionSet(txns []types.Transaction) error @@ -331,7 +326,7 @@ type Bus struct { rhp3 *rhp3.Client contractLocker ContractLocker - explorer Explorer + explorer *ibus.Explorer sectors UploadingSectorsCache walletMetricsRecorder WalletMetricsRecorder diff --git a/internal/bus/explorer.go b/internal/bus/explorer.go index fcd213870..5b31910c6 100644 --- a/internal/bus/explorer.go +++ b/internal/bus/explorer.go @@ -12,37 +12,30 @@ import ( ) type ( - // An explorer retrieves data about the Sia network from an external source. - Explorer interface { - Enabled() bool - BaseURL() string - SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) - } - - explorer struct { + Explorer struct { url string } ) // NewExplorer returns a new Explorer. -func NewExplorer(url string) Explorer { - return &explorer{ +func NewExplorer(url string) *Explorer { + return &Explorer{ url: url, } } // BaseURL returns the base URL of the Explorer. -func (e *explorer) BaseURL() string { +func (e *Explorer) BaseURL() string { return e.url } // Enabled returns true if the explorer is enabled. -func (e *explorer) Enabled() bool { +func (e *Explorer) Enabled() bool { return e.url != "" } // SiacoinExchangeRate returns the exchange rate for the given currency. -func (e *explorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { +func (e *Explorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { // return early if the explorer is disabled if !e.Enabled() { return 0, api.ErrExplorerDisabled diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 1db2fd9a7..400758c63 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -18,6 +18,14 @@ import ( ) type ( + // An ExchangeRateExplorer retrieves exchange rate data about + // the SC token. + ExchangeRateExplorer interface { + Enabled() bool + BaseURL() string + SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) + } + Store interface { Autopilot(ctx context.Context, id string) (api.Autopilot, error) Setting(ctx context.Context, key string) (string, error) @@ -29,7 +37,7 @@ type ( type ( pinManager struct { a alerts.Alerter - e Explorer + e ExchangeRateExplorer s Store broadcaster webhooks.Broadcaster @@ -51,7 +59,7 @@ type ( // NewPinManager returns a new PinManager, responsible for pinning prices to a // fixed value in an underlying currency. The returned pin manager is already // running and can be stopped by calling Shutdown. -func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, e Explorer, s Store, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { +func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, e ExchangeRateExplorer, s Store, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { pm := &pinManager{ a: alerts, e: e, From 8c27676cdf812ccab54563c92080fa7bfb68d358 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 14:56:43 +0200 Subject: [PATCH 62/98] testing: remove test logger --- internal/bus/pinmanager_test.go | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index b2c7964cc..a6e81c28e 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "os" "reflect" "sync" "testing" @@ -17,7 +16,6 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) const ( @@ -182,19 +180,6 @@ func (ms *mockPinStore) UpdateAutopilot(ctx context.Context, autopilot api.Autop return nil } -func newTestLoggerCustom(level zapcore.Level) *zap.Logger { - config := zap.NewProductionEncoderConfig() - config.EncodeTime = zapcore.RFC3339TimeEncoder - config.EncodeLevel = zapcore.CapitalColorLevelEncoder - config.StacktraceKey = "" - consoleEncoder := zapcore.NewConsoleEncoder(config) - - return zap.New( - zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), level), - zap.AddCaller(), - zap.AddStacktrace(level), - ) -} func TestPinManager(t *testing.T) { // mock dependencies a := &mockAlerter{} @@ -203,8 +188,7 @@ func TestPinManager(t *testing.T) { s := newTestStore() // create a pinmanager - - pm := NewPinManager(a, b, e, s, testUpdateInterval, time.Minute, newTestLoggerCustom(zap.DebugLevel)) + pm := NewPinManager(a, b, e, s, testUpdateInterval, time.Minute, zap.NewNop()) defer func() { if err := pm.Shutdown(context.Background()); err != nil { t.Fatal(err) From ca1c0776c729f3b8ddd5912f7bbcdfdf83db650a Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 6 Sep 2024 15:13:56 +0200 Subject: [PATCH 63/98] stores: use bool --- cmd/renterd/node.go | 14 +++++++------- internal/test/e2e/cluster.go | 2 +- stores/settings.go | 2 +- stores/sql.go | 16 ++++++++-------- stores/sql_test.go | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 5787bae7f..58e96c9c8 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -258,19 +258,13 @@ func newNode(cfg config.Config, network *consensus.Network, genesis types.Block) } func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network *consensus.Network, genesis types.Block, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, error) { - // get explorer URL - var explorerURL string - if !cfg.Explorer.Disable { - explorerURL = cfg.Explorer.URL - } - // create store alertsMgr := alerts.NewManager() storeCfg, err := buildStoreConfig(alertsMgr, cfg, pk, logger) if err != nil { return nil, nil, err } - sqlStore, err := stores.NewSQLStore(storeCfg, explorerURL, network) + sqlStore, err := stores.NewSQLStore(storeCfg, cfg.Explorer.Disable, network) if err != nil { return nil, nil, err } @@ -384,6 +378,12 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network // to ensure contracts formed by the bus can be renewed by the autopilot masterKey := blake2b.Sum256(append([]byte("worker"), pk...)) + // get explorer URL + var explorerURL string + if !cfg.Explorer.Disable { + explorerURL = cfg.Explorer.URL + } + // create bus announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, logger) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 3a75a7ee7..13c66520b 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -554,7 +554,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create store network, genesis := testNetwork() - sqlStore, err := stores.NewSQLStore(storeCfg, "", network) + sqlStore, err := stores.NewSQLStore(storeCfg, true, network) if err != nil { return nil, nil, nil, nil, err } diff --git a/stores/settings.go b/stores/settings.go index 3a24cb5bc..fab149a11 100644 --- a/stores/settings.go +++ b/stores/settings.go @@ -76,7 +76,7 @@ func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{} return fmt.Errorf("failed to fetch setting from db: %w", err) } else if err != nil { value = s.defaultSetting(key) - } else if key == SettingPinned && s.explorerURL == "" { + } else if key == SettingPinned && !s.explorerDisabled { var ps api.PinnedSettings if err := json.Unmarshal([]byte(value), &ps); err == nil && ps.Enabled() { s.logger.Warn("pinned settings are enabled but explorer is disabled, using default settings") diff --git a/stores/sql.go b/stores/sql.go index 5a3c188fb..dee0a416f 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -43,9 +43,9 @@ type ( dbMetrics sql.MetricsDatabase logger *zap.SugaredLogger - explorerURL string - network *consensus.Network - walletAddress types.Address + explorerDisabled bool + network *consensus.Network + walletAddress types.Address // ObjectDB related fields slabBufferMgr *SlabBufferManager @@ -71,7 +71,7 @@ type ( // NewSQLStore uses a given Dialector to connect to a SQL database. NOTE: Only // pass migrate=true for the first instance of SQLHostDB if you connect via the // same Dialector multiple times. -func NewSQLStore(cfg Config, explorerURL string, network *consensus.Network) (*SQLStore, error) { +func NewSQLStore(cfg Config, explorerDisabled bool, network *consensus.Network) (*SQLStore, error) { if err := os.MkdirAll(cfg.PartialSlabDir, 0700); err != nil { return nil, fmt.Errorf("failed to create partial slab dir '%s': %v", cfg.PartialSlabDir, err) } @@ -102,10 +102,10 @@ func NewSQLStore(cfg Config, explorerURL string, network *consensus.Network) (*S dbMetrics: dbMetrics, logger: l.Sugar(), - settings: make(map[string]string), - walletAddress: cfg.WalletAddress, - explorerURL: explorerURL, - network: network, + settings: make(map[string]string), + walletAddress: cfg.WalletAddress, + explorerDisabled: explorerDisabled, + network: network, slabPruneSigChan: make(chan struct{}, 1), lastPrunedAt: time.Now(), diff --git a/stores/sql_test.go b/stores/sql_test.go index 1b2689f03..7a676a5b3 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -180,7 +180,7 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { LongQueryDuration: 100 * time.Millisecond, LongTxDuration: 100 * time.Millisecond, RetryTransactionIntervals: []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 200 * time.Millisecond}, - }, "", &consensus.Network{}) + }, false, &consensus.Network{}) if err != nil { t.Fatal("failed to create SQLStore", err) } From 4f4a9c44001726f7de9c259dc8eafd41cd6ca4b6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 16:15:54 +0200 Subject: [PATCH 64/98] sql: add support for filtering by price table and settings fields --- api/host.go | 71 +++++++++++++++++++++++++++ api/object.go | 4 +- bus/client/hosts.go | 2 + bus/routes.go | 14 ++++++ internal/test/e2e/cluster_test.go | 12 ++--- stores/hostdb_test.go | 81 +++++++++++++++++++++++++++++++ stores/sql/main.go | 55 ++++++++++++++++----- 7 files changed, 220 insertions(+), 19 deletions(-) diff --git a/api/host.go b/api/host.go index a8d7a40e4..0ac29c228 100644 --- a/api/host.go +++ b/api/host.go @@ -22,10 +22,77 @@ const ( UsabilityFilterModeUnusable = "unusable" ) +var ValidHostSortBy = map[string]any{ + // price table + "price_table.uid": nil, + "price_table.validity": nil, + "price_table.hostblockheight": nil, + "price_table.updatepricetablecost": nil, + "price_table.accountbalancecost": nil, + "price_table.fundaccountcost": nil, + "price_table.latestrevisioncost": nil, + "price_table.subscriptionmemorycost": nil, + "price_table.subscriptionnotificationcost": nil, + "price_table.initbasecost": nil, + "price_table.memorytimecost": nil, + "price_table.downloadbandwidthcost": nil, + "price_table.uploadbandwidthcost": nil, + "price_table.dropsectorsbasecost": nil, + "price_table.dropsectorsunitcost": nil, + "price_table.hassectorbasecost": nil, + "price_table.readbasecost": nil, + "price_table.readlengthcost": nil, + "price_table.renewcontractcost": nil, + "price_table.revisionbasecost": nil, + "price_table.swapsectorcost": nil, + "price_table.writebasecost": nil, + "price_table.writelengthcost": nil, + "price_table.writestorecost": nil, + "price_table.txnfeeminrecommended": nil, + "price_table.txnfeemaxrecommended": nil, + "price_table.contractprice": nil, + "price_table.collateralcost": nil, + "price_table.maxcollateral": nil, + "price_table.maxduration": nil, + "price_table.windowsize": nil, + "price_table.registryentriesleft": nil, + "price_table.registryentriestotal": nil, + + // settings + "settings.acceptingcontracts": nil, + "settings.maxdownloadbatchsize": nil, + "settings.maxduration": nil, + "settings.maxrevisebatchsize": nil, + "settings.netaddress": nil, + "settings.remainingstorage": nil, + "settings.sectorsize": nil, + "settings.totalstorage": nil, + "settings.unlockhash": nil, + "settings.windowsize": nil, + "settings.collateral": nil, + "settings.maxcollateral": nil, + "settings.baserpcprice": nil, + "settings.contractprice": nil, + "settings.downloadbandwidthprice": nil, + "settings.sectoraccessprice": nil, + "settings.storageprice": nil, + "settings.uploadbandwidthprice": nil, + "settings.ephemeralaccountexpiry": nil, + "settings.maxephemeralaccountbalance": nil, + "settings.revisionnumber": nil, + "settings.version": nil, + "settings.release": nil, + "settings.siamuxport": nil, +} + var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") + + // ErrInvalidHostSortBy is returned when the SortBy parameter used + // when querying hosts is invalid. + ErrInvalidHostSortBy = errors.New("invalid SortBy parameter") ) var ( @@ -66,6 +133,8 @@ type ( UsabilityMode string `json:"usabilityMode"` AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` + SortBy string `json:"sortBy"` + SortDir string `json:"sortDir"` } // HostResponse is the response type for the GET @@ -117,6 +186,8 @@ type ( KeyIn []types.PublicKey Limit int Offset int + SortBy string + SortDir string } ) diff --git a/api/object.go b/api/object.go index a5cef0422..006014839 100644 --- a/api/object.go +++ b/api/object.go @@ -23,8 +23,8 @@ const ( ObjectSortByName = "name" ObjectSortBySize = "size" - ObjectSortDirAsc = "asc" - ObjectSortDirDesc = "desc" + SortDirAsc = "asc" + SortDirDesc = "desc" ) var ( diff --git a/bus/client/hosts.go b/bus/client/hosts.go index d7aa5f6db..f9480b32b 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -26,6 +26,8 @@ func (c *Client) Hosts(ctx context.Context, opts api.HostOptions) (hosts []api.H UsabilityMode: opts.UsabilityMode, AddressContains: opts.AddressContains, KeyIn: opts.KeyIn, + SortBy: opts.SortBy, + SortDir: opts.SortDir, }, &hosts) return } diff --git a/bus/routes.go b/bus/routes.go index a49742724..881221077 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -625,6 +625,18 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { return } + // validate sorting params + if _, valid := api.ValidHostSortBy[req.SortBy]; !valid { + jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) + return + } + switch req.SortDir { + case "", api.SortDirAsc, api.SortDirDesc: + default: + jc.Error(errors.New("invalid value for SortDir param, options are 'asc' and 'desc'"), http.StatusBadRequest) + return + } + // validate the offset and limit if req.Offset < 0 { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) @@ -645,6 +657,8 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { KeyIn: req.KeyIn, Offset: req.Offset, Limit: req.Limit, + SortBy: req.SortBy, + SortDir: req.SortDir, }) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 9c3383d83..66801afa2 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -105,15 +105,15 @@ func TestListObjects(t *testing.T) { want []api.ObjectMetadata }{ {"/", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, - {"/", api.ObjectSortByHealth, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", api.ObjectSortByHealth, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.SortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.SortDirDesc, []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.SortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.SortDirDesc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, {"/foo/b", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, {"o/baz/quu", "", "", []api.ObjectMetadata{}}, {"/foo", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/foo", api.ObjectSortBySize, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/foo", api.ObjectSortBySize, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.SortDirAsc, []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.SortDirDesc, []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, } for _, test := range tests { // use the bus client diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 82e73191e..526ae31d8 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -138,6 +138,34 @@ func TestHosts(t *testing.T) { } hk1, hk2, hk3 := hks[0], hks[1], hks[2] + err := ss.RecordHostScans(context.Background(), []api.HostScan{ + { + HostKey: hk1, + PriceTable: rhpv3.HostPriceTable{ + InitBaseCost: types.NewCurrency64(2), + }, + Settings: rhpv2.HostSettings{ + BaseRPCPrice: types.NewCurrency64(2), + }, + Success: true, + Timestamp: time.Now(), + }, + { + HostKey: hk3, + PriceTable: rhpv3.HostPriceTable{ + InitBaseCost: types.NewCurrency64(1), + }, + Settings: rhpv2.HostSettings{ + BaseRPCPrice: types.NewCurrency64(1), + }, + Success: true, + Timestamp: time.Now(), + }, + }) + if err != nil { + t.Fatal(err) + } + // search all hosts his, err := ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", @@ -154,6 +182,59 @@ func TestHosts(t *testing.T) { t.Fatal("unexpected") } + // search all hosts sorted by initbasecost + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "price_table.initbasecost", + Limit: -1, + }) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) + } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) + } + + // reverse order + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "price_table.initbasecost", + SortDir: api.SortDirDesc, + Limit: -1, + }) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) + } else if his[0].PublicKey != hk1 || his[1].PublicKey != hk3 || his[2].PublicKey != hk2 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) + } + + // search all hosts sorted by baserpcprice + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "settings.baserpcprice", + Limit: -1, + }) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) + } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) + } + + // search by invalid key + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "price_table.invalid", + Limit: -1, + }) + if !errors.Is(err, api.ErrInvalidHostSortBy) { + t.Fatal(err) + } + // assert offset & limit are taken into account his, err = ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", diff --git a/stores/sql/main.go b/stores/sql/main.go index 95d933fa9..f299d0e33 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -689,7 +689,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } var whereExprs []string - var args []any + var whereArgs []any // fetch autopilot id var autopilotID int64 @@ -728,7 +728,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er // filter address if opts.AddressContains != "" { whereExprs = append(whereExprs, "h.net_address LIKE ?") - args = append(args, "%"+opts.AddressContains+"%") + whereArgs = append(whereArgs, "%"+opts.AddressContains+"%") } // filter public key @@ -739,7 +739,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } placeholders := strings.Repeat("?, ", len(opts.KeyIn)-1) + "?" whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) - args = append(args, pubKeys...) + whereArgs = append(whereArgs, pubKeys...) } // filter usability @@ -750,10 +750,10 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er switch opts.UsabilityMode { case api.UsabilityFilterModeUsable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) - args = append(args, autopilotID) + whereArgs = append(whereArgs, autopilotID) case api.UsabilityFilterModeUnusable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) - args = append(args, autopilotID) + whereArgs = append(whereArgs, autopilotID) } } @@ -788,6 +788,34 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er if hasBlocklist { blockedExprs = append(blockedExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") } + + var orderByExpr string + var orderByArgs []any + if opts.SortBy != "" { + if opts.SortDir != "" && + opts.SortDir != api.SortDirAsc && + opts.SortDir != api.SortDirDesc { + return nil, fmt.Errorf("invalid sort order: %v", opts.SortDir) + } else if _, valid := api.ValidHostSortBy[opts.SortBy]; !valid { + return nil, fmt.Errorf("%w: invalid sortBy parameter: %v", api.ErrInvalidHostSortBy, opts.SortBy) + } + + var fieldExpr string + if strings.HasPrefix(opts.SortBy, "settings.") { + field := strings.TrimPrefix(opts.SortBy, "settings.") + fieldExpr = fmt.Sprintf("h.settings ->> '$.%s'", field) + } else if strings.HasPrefix(opts.SortBy, "price_table.") { + field := strings.TrimPrefix(opts.SortBy, "price_table.") + fieldExpr = fmt.Sprintf("h.price_table ->> '$.%s'", field) + } else { + return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) + } + orderByExpr = fmt.Sprintf("ORDER BY %s %s", fieldExpr, opts.SortDir) + orderByArgs = append(orderByArgs, fieldExpr) + whereExprs = append(whereExprs, "COALESCE(?, '') != ''") + whereArgs = append(whereArgs, fieldExpr) + } + var blockedExpr string if len(blockedExprs) > 0 { blockedExpr = strings.Join(blockedExprs, " OR ") @@ -798,6 +826,10 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er if len(whereExprs) > 0 { whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") } + var args []any + args = append(args, whereArgs...) + args = append(args, orderByArgs...) + rows, err = tx.Query(ctx, fmt.Sprintf(` SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, h.settings, h.total_scans, h.last_scan, h.last_scan_success, h.second_to_last_scan_success, @@ -806,7 +838,8 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er FROM hosts h %s %s - `, blockedExpr, whereExpr, offsetLimitStr), args...) + %s + `, blockedExpr, whereExpr, orderByExpr, offsetLimitStr), args...) if err != nil { return nil, fmt.Errorf("failed to fetch hosts: %w", err) } @@ -1178,7 +1211,7 @@ func whereObjectMarker(marker, sortBy, sortDir string, queryMarker func(dst any, return nil, nil, fmt.Errorf("sortBy and sortDir must be set") } - desc := strings.ToLower(sortDir) == api.ObjectSortDirDesc + desc := strings.ToLower(sortDir) == api.SortDirDesc switch strings.ToLower(sortBy) { case api.ObjectSortByName: if desc { @@ -1221,8 +1254,8 @@ func orderByObject(sortBy, sortDir string) (orderByExprs []string, _ error) { } dir2SQL := map[string]string{ - api.ObjectSortDirAsc: "ASC", - api.ObjectSortDirDesc: "DESC", + api.SortDirAsc: "ASC", + api.SortDirDesc: "DESC", } if _, ok := dir2SQL[strings.ToLower(sortDir)]; !ok { return nil, fmt.Errorf("invalid sortDir: %v", sortDir) @@ -1258,7 +1291,7 @@ func ListObjects(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, ma sortBy = api.ObjectSortByName } if sortDir == "" { - sortDir = api.ObjectSortDirAsc + sortDir = api.SortDirAsc } // filter by bucket @@ -1610,7 +1643,7 @@ func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sor sortBy = api.ObjectSortByName } if sortDir == "" { - sortDir = api.ObjectSortDirAsc + sortDir = api.SortDirAsc } // fetch directory id From 2f2d51592d17749ca0b4a1db14c9ee0bcd103da2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 17:56:42 +0200 Subject: [PATCH 65/98] sql: fix test in MySQL --- stores/sql/main.go | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/stores/sql/main.go b/stores/sql/main.go index f299d0e33..e0c41797e 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -689,7 +689,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } var whereExprs []string - var whereArgs []any + var args []any // fetch autopilot id var autopilotID int64 @@ -728,7 +728,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er // filter address if opts.AddressContains != "" { whereExprs = append(whereExprs, "h.net_address LIKE ?") - whereArgs = append(whereArgs, "%"+opts.AddressContains+"%") + args = append(args, "%"+opts.AddressContains+"%") } // filter public key @@ -739,7 +739,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } placeholders := strings.Repeat("?, ", len(opts.KeyIn)-1) + "?" whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) - whereArgs = append(whereArgs, pubKeys...) + args = append(args, pubKeys...) } // filter usability @@ -750,10 +750,10 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er switch opts.UsabilityMode { case api.UsabilityFilterModeUsable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) - whereArgs = append(whereArgs, autopilotID) + args = append(args, autopilotID) case api.UsabilityFilterModeUnusable: whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) - whereArgs = append(whereArgs, autopilotID) + args = append(args, autopilotID) } } @@ -790,14 +790,14 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } var orderByExpr string - var orderByArgs []any if opts.SortBy != "" { - if opts.SortDir != "" && - opts.SortDir != api.SortDirAsc && - opts.SortDir != api.SortDirDesc { + switch opts.SortDir { + case "", api.SortDirAsc, api.SortDirDesc: + default: return nil, fmt.Errorf("invalid sort order: %v", opts.SortDir) - } else if _, valid := api.ValidHostSortBy[opts.SortBy]; !valid { - return nil, fmt.Errorf("%w: invalid sortBy parameter: %v", api.ErrInvalidHostSortBy, opts.SortBy) + } + if _, valid := api.ValidHostSortBy[opts.SortBy]; !valid { + return nil, fmt.Errorf("%w: %s", api.ErrInvalidHostSortBy, opts.SortBy) } var fieldExpr string @@ -811,9 +811,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) } orderByExpr = fmt.Sprintf("ORDER BY %s %s", fieldExpr, opts.SortDir) - orderByArgs = append(orderByArgs, fieldExpr) - whereExprs = append(whereExprs, "COALESCE(?, '') != ''") - whereArgs = append(whereArgs, fieldExpr) + whereExprs = append(whereExprs, fmt.Sprintf("COALESCE(%s, '') != ''", fieldExpr)) } var blockedExpr string @@ -826,9 +824,6 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er if len(whereExprs) > 0 { whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") } - var args []any - args = append(args, whereArgs...) - args = append(args, orderByArgs...) rows, err = tx.Query(ctx, fmt.Sprintf(` SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, From db4a7e4ddab203fbe27744726d4725a0c4f20d14 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 18:28:49 +0200 Subject: [PATCH 66/98] bus: fix sortBy check --- bus/routes.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bus/routes.go b/bus/routes.go index cb9e4b1cf..04b05b9ac 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -487,9 +487,11 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { } // validate sorting params - if _, valid := api.ValidHostSortBy[req.SortBy]; !valid { - jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) - return + if req.SortBy != "" { + if _, valid := api.ValidHostSortBy[req.SortBy]; !valid { + jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) + return + } } switch req.SortDir { case "", api.SortDirAsc, api.SortDirDesc: From 43f9779397bc38989cccb1e82c62ec98d60e01c4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 18:30:39 +0200 Subject: [PATCH 67/98] e2e: fix TestListObjectsWithDelimiterSlash --- stores/sql/main.go | 90 +--------------------------------------------- 1 file changed, 1 insertion(+), 89 deletions(-) diff --git a/stores/sql/main.go b/stores/sql/main.go index a7453d026..c881e6304 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -1282,95 +1282,7 @@ func ListObjects(ctx context.Context, tx Tx, bucket, prefix, substring, delim, s default: err = fmt.Errorf("unsupported delimiter: '%s'", delim) } - - // establish sane defaults for sorting - if sortBy == "" { - sortBy = api.ObjectSortByName - } - if sortDir == "" { - sortDir = api.SortDirAsc - } - - // filter by bucket - whereExprs := []string{"o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)"} - whereArgs := []any{bucket} - - // apply prefix - if prefix != "" { - whereExprs = append(whereExprs, "o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?") - whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) - } - - // apply sorting - orderByExprs, err := orderByObject(sortBy, sortDir) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) - } - - // apply marker - markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { - err := tx.QueryRow(ctx, fmt.Sprintf(` - SELECT o.%s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE b.name = ? AND o.object_id = ? - `, col), bucket, marker).Scan(dst) - if errors.Is(err, dsql.ErrNoRows) { - return api.ErrMarkerNotFound - } else { - return err - } - }) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to get marker exprs: %w", err) - } - whereExprs = append(whereExprs, markerExprs...) - whereArgs = append(whereArgs, markerArgs...) - - // apply limit - whereArgs = append(whereArgs, limit) - - // run query - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM objects o - WHERE %s - ORDER BY %s - LIMIT ? - `, - tx.SelectObjectMetadataExpr(), - strings.Join(whereExprs, " AND "), - strings.Join(orderByExprs, ", ")), - whereArgs...) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - - var hasMore bool - var nextMarker string - if len(objects) == limit { - objects = objects[:len(objects)-1] - if len(objects) > 0 { - hasMore = true - nextMarker = objects[len(objects)-1].Key - } - } - - return api.ObjectsListResponse{ - HasMore: hasMore, - NextMarker: nextMarker, - Objects: objects, - }, nil + return } func MultipartUpload(ctx context.Context, tx sql.Tx, uploadID string) (api.MultipartUpload, error) { From 863f8764bad89cfc2613621efc159b309cd32a8d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 6 Sep 2024 18:38:29 +0200 Subject: [PATCH 68/98] api: add IsValidHostSortBy helper --- api/host.go | 7 ++++++- bus/routes.go | 2 +- stores/sql/main.go | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/api/host.go b/api/host.go index 0ac29c228..0422c2bee 100644 --- a/api/host.go +++ b/api/host.go @@ -22,7 +22,7 @@ const ( UsabilityFilterModeUnusable = "unusable" ) -var ValidHostSortBy = map[string]any{ +var validHostSortBy = map[string]any{ // price table "price_table.uid": nil, "price_table.validity": nil, @@ -85,6 +85,11 @@ var ValidHostSortBy = map[string]any{ "settings.siamuxport": nil, } +func IsValidHostSortBy(sortBy string) bool { + _, ok := validHostSortBy[sortBy] + return ok +} + var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. diff --git a/bus/routes.go b/bus/routes.go index 04b05b9ac..80f8fb175 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -488,7 +488,7 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { // validate sorting params if req.SortBy != "" { - if _, valid := api.ValidHostSortBy[req.SortBy]; !valid { + if !api.IsValidHostSortBy(req.SortBy) { jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) return } diff --git a/stores/sql/main.go b/stores/sql/main.go index c881e6304..16322934d 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -796,7 +796,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er default: return nil, fmt.Errorf("invalid sort order: %v", opts.SortDir) } - if _, valid := api.ValidHostSortBy[opts.SortBy]; !valid { + if !api.IsValidHostSortBy(opts.SortBy) { return nil, fmt.Errorf("%w: %s", api.ErrInvalidHostSortBy, opts.SortBy) } From fe695fad735c7bca38c75033a4007f4d690d83d9 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 9 Sep 2024 09:28:00 +0200 Subject: [PATCH 69/98] stores: remove explorer enabled check --- cmd/renterd/node.go | 2 +- internal/test/e2e/cluster.go | 2 +- stores/settings.go | 6 ------ stores/sql.go | 14 ++++++-------- stores/sql_test.go | 2 +- 5 files changed, 9 insertions(+), 17 deletions(-) diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 58e96c9c8..92c62ae9f 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -264,7 +264,7 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network if err != nil { return nil, nil, err } - sqlStore, err := stores.NewSQLStore(storeCfg, cfg.Explorer.Disable, network) + sqlStore, err := stores.NewSQLStore(storeCfg, network) if err != nil { return nil, nil, err } diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 13c66520b..ec07a465a 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -554,7 +554,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create store network, genesis := testNetwork() - sqlStore, err := stores.NewSQLStore(storeCfg, true, network) + sqlStore, err := stores.NewSQLStore(storeCfg, network) if err != nil { return nil, nil, nil, nil, err } diff --git a/stores/settings.go b/stores/settings.go index fab149a11..5801de1a9 100644 --- a/stores/settings.go +++ b/stores/settings.go @@ -76,12 +76,6 @@ func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{} return fmt.Errorf("failed to fetch setting from db: %w", err) } else if err != nil { value = s.defaultSetting(key) - } else if key == SettingPinned && !s.explorerDisabled { - var ps api.PinnedSettings - if err := json.Unmarshal([]byte(value), &ps); err == nil && ps.Enabled() { - s.logger.Warn("pinned settings are enabled but explorer is disabled, using default settings") - value = s.defaultSetting(key) - } } // unmarshal setting diff --git a/stores/sql.go b/stores/sql.go index dee0a416f..424016927 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -43,9 +43,8 @@ type ( dbMetrics sql.MetricsDatabase logger *zap.SugaredLogger - explorerDisabled bool - network *consensus.Network - walletAddress types.Address + network *consensus.Network + walletAddress types.Address // ObjectDB related fields slabBufferMgr *SlabBufferManager @@ -71,7 +70,7 @@ type ( // NewSQLStore uses a given Dialector to connect to a SQL database. NOTE: Only // pass migrate=true for the first instance of SQLHostDB if you connect via the // same Dialector multiple times. -func NewSQLStore(cfg Config, explorerDisabled bool, network *consensus.Network) (*SQLStore, error) { +func NewSQLStore(cfg Config, network *consensus.Network) (*SQLStore, error) { if err := os.MkdirAll(cfg.PartialSlabDir, 0700); err != nil { return nil, fmt.Errorf("failed to create partial slab dir '%s': %v", cfg.PartialSlabDir, err) } @@ -102,10 +101,9 @@ func NewSQLStore(cfg Config, explorerDisabled bool, network *consensus.Network) dbMetrics: dbMetrics, logger: l.Sugar(), - settings: make(map[string]string), - walletAddress: cfg.WalletAddress, - explorerDisabled: explorerDisabled, - network: network, + settings: make(map[string]string), + walletAddress: cfg.WalletAddress, + network: network, slabPruneSigChan: make(chan struct{}, 1), lastPrunedAt: time.Now(), diff --git a/stores/sql_test.go b/stores/sql_test.go index 7a676a5b3..0fd280873 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -180,7 +180,7 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { LongQueryDuration: 100 * time.Millisecond, LongTxDuration: 100 * time.Millisecond, RetryTransactionIntervals: []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 200 * time.Millisecond}, - }, false, &consensus.Network{}) + }, &consensus.Network{}) if err != nil { t.Fatal("failed to create SQLStore", err) } From e4ed9e6d820b71a97aba10f9160b961aeacaff09 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 9 Sep 2024 09:39:36 +0200 Subject: [PATCH 70/98] sql: swap migrations --- .../mysql/migrations/main/migration_00018_idx_db_directory.sql | 2 +- .../sqlite/migrations/main/migration_00018_idx_db_directory.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql b/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql index 5757fd280..8b1378917 100644 --- a/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql +++ b/stores/sql/mysql/migrations/main/migration_00018_idx_db_directory.sql @@ -1 +1 @@ -CREATE INDEX IF NOT EXISTS `idx_objects_db_directory_id` ON `objects`(`db_directory_id`); + diff --git a/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql b/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql index 8b1378917..5757fd280 100644 --- a/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql +++ b/stores/sql/sqlite/migrations/main/migration_00018_idx_db_directory.sql @@ -1 +1 @@ - +CREATE INDEX IF NOT EXISTS `idx_objects_db_directory_id` ON `objects`(`db_directory_id`); From 1d576bc52d4a3b6747ec49a2aa8de15b62a10b82 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 9 Sep 2024 10:53:11 +0200 Subject: [PATCH 71/98] testing: fix TestPinManager NDF --- internal/bus/pinmanager.go | 9 +++---- internal/bus/pinmanager_test.go | 44 ++++++++++++++++----------------- 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 4f38b8ba8..548e6b708 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -47,7 +47,7 @@ type ( updateInterval time.Duration rateWindow time.Duration - triggerChan chan struct{} + triggerChan chan bool closedChan chan struct{} wg sync.WaitGroup @@ -74,7 +74,7 @@ func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, e Ex updateInterval: updateInterval, rateWindow: rateWindow, - triggerChan: make(chan struct{}, 1), + triggerChan: make(chan bool, 1), closedChan: make(chan struct{}), } @@ -109,7 +109,7 @@ func (pm *pinManager) Shutdown(ctx context.Context) error { func (pm *pinManager) TriggerUpdate() { select { - case pm.triggerChan <- struct{}{}: + case pm.triggerChan <- true: default: } } @@ -174,8 +174,7 @@ func (pm *pinManager) run() { select { case <-pm.closedChan: return - case <-pm.triggerChan: - forced = true + case forced = <-pm.triggerChan: case <-t.C: } } diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index 8283144b7..d7800ed71 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -152,6 +152,9 @@ func (ms *mockPinStore) PinnedSettings(ctx context.Context) (api.PinnedSettings, } func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + ms.mu.Lock() + defer ms.mu.Unlock() + b, err := json.Marshal(ps) if err != nil { return err @@ -160,10 +163,7 @@ func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedS if err := json.Unmarshal(b, &cloned); err != nil { return err } - ms.mu.Lock() ms.ps = cloned - ms.mu.Unlock() - time.Sleep(2 * testUpdateInterval) return nil } @@ -195,17 +195,11 @@ func TestPinManager(t *testing.T) { } }() - // define a small helper to fetch the price manager's rates - rates := func() []float64 { + // waitForUpdate waits for the price manager to update + waitForUpdate := func() { t.Helper() - pm.mu.Lock() - defer pm.mu.Unlock() - return pm.rates - } - - // assert price manager is disabled by default - if cnt := len(rates()); cnt != 0 { - t.Fatalf("expected no rates, got %d", cnt) + pm.triggerChan <- false + time.Sleep(testUpdateInterval) } // enable price pinning @@ -214,7 +208,7 @@ func TestPinManager(t *testing.T) { ps.Threshold = 0.5 s.UpdatePinnedSettings(context.Background(), ps) - // update exchange rate and fetch current gouging settings + // fetch current gouging settings gs, _ := s.GougingSettings(context.Background()) // configure all pins but disable them for now @@ -231,19 +225,20 @@ func TestPinManager(t *testing.T) { // enable the max download pin ps.GougingSettingsPins.MaxDownload.Pinned = true s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() - // adjust the rate - e.setRate(1.5) - time.Sleep(2 * testUpdateInterval) - - // at threshold of .5 the prices should not be updated + // assert prices are not updated if gss, _ := s.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { - t.Fatalf("expected gouging settings to be the same, got %v", gss) + t.Fatalf("expected gouging settings to be the same, got %v expected %v", gss, gs) } - // lower the threshold, gouging settings should be updated + // adjust and lower the threshold + e.setRate(1.5) ps.Threshold = 0.05 s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() + + // assert prices are updated if gss, _ := s.GougingSettings(context.Background()); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) } @@ -253,6 +248,7 @@ func TestPinManager(t *testing.T) { ps.GougingSettingsPins.MaxStorage.Pinned = true ps.GougingSettingsPins.MaxUpload.Pinned = true s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() // assert they're all updated if gss, _ := s.GougingSettings(context.Background()); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || @@ -276,6 +272,7 @@ func TestPinManager(t *testing.T) { } ps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() // assert autopilot was not updated if app, _ := s.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { @@ -286,6 +283,7 @@ func TestPinManager(t *testing.T) { pins.Allowance.Pinned = true ps.Autopilots[testAutopilotID] = pins s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() // assert autopilot was updated if app, _ := s.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { @@ -294,9 +292,9 @@ func TestPinManager(t *testing.T) { // make explorer return an error e.setUnreachable(true) + waitForUpdate() // assert alert was registered - s.UpdatePinnedSettings(context.Background(), ps) res, _ := a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) == 0 { t.Fatalf("expected 1 alert, got %d", len(a.alerts)) @@ -304,9 +302,9 @@ func TestPinManager(t *testing.T) { // make explorer return a valid response e.setUnreachable(false) + waitForUpdate() // assert alert was dismissed - s.UpdatePinnedSettings(context.Background(), ps) res, _ = a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) != 0 { t.Fatalf("expected 0 alerts, got %d", len(a.alerts)) From 51d35ee3d3ad13293c3617ebba89143e9228e3eb Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 9 Sep 2024 11:47:22 +0200 Subject: [PATCH 72/98] gouging: address review comments --- internal/gouging/gouging.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/gouging/gouging.go b/internal/gouging/gouging.go index e226e96f0..f2fc11dec 100644 --- a/internal/gouging/gouging.go +++ b/internal/gouging/gouging.go @@ -326,7 +326,7 @@ func checkUploadGougingRHPv3(gs api.GougingSettings, pt *rhpv3.HostPriceTable) e } uploadPrice := sectorUploadPricePerMonth.Div64(rhpv2.SectorSize) if !gs.MaxUploadPrice.IsZero() && uploadPrice.Cmp(gs.MaxUploadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max ul price: %v > %v", ErrPriceTableGouging, uploadPrice, gs.MaxUploadPrice) + return fmt.Errorf("%w: cost per byte exceeds max ul price: %v > %v", ErrPriceTableGouging, uploadPrice, gs.MaxUploadPrice) } return nil } From c439b8bc35169f2c038f799bf6d3ca578acc0dbf Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 9 Sep 2024 13:56:14 +0200 Subject: [PATCH 73/98] bus: handle updatePinnedSettings error --- internal/bus/pinmanager_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index e5158836d..8103a07fe 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -154,7 +154,10 @@ func (ms *mockPinStore) gougingSettings() api.GougingSettings { func (ms *mockPinStore) updatPinnedSettings(pps api.PricePinSettings) { b, _ := json.Marshal(pps) - ms.UpdateSetting(context.Background(), api.SettingPricePinning, string(b)) + err := ms.UpdateSetting(context.Background(), api.SettingPricePinning, string(b)) + if err != nil { + panic(err) + } time.Sleep(2 * testUpdateInterval) } From d1fe47741671cba11b46bbaaf051b640d21f307b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 9 Sep 2024 14:12:04 +0200 Subject: [PATCH 74/98] bus: increase sleep in updatPinnedSettings --- internal/bus/pinmanager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index bc37a4d7e..6a4ae55dd 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -154,7 +154,7 @@ func (ms *mockPinStore) updatPinnedSettings(pps api.PricePinSettings) { if err != nil { panic(err) } - time.Sleep(2 * testUpdateInterval) + time.Sleep(10 * testUpdateInterval) } func (ms *mockPinStore) Setting(ctx context.Context, key string) (string, error) { From 197e691f5a33e930af76735b30a29e1222a08db2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 9 Sep 2024 14:43:03 +0200 Subject: [PATCH 75/98] stores: don't filter --- stores/hostdb_test.go | 25 ++++++++++++++++--------- stores/sql/main.go | 1 - 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 601ae088e..bada7249e 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -150,6 +150,13 @@ func TestHosts(t *testing.T) { Success: true, Timestamp: time.Now(), }, + { + HostKey: hk2, + PriceTable: rhpv3.HostPriceTable{}, // empty price table + Settings: rhpv2.HostSettings{}, // empty settings + Success: true, + Timestamp: time.Now(), + }, { HostKey: hk3, PriceTable: rhpv3.HostPriceTable{ @@ -190,10 +197,10 @@ func TestHosts(t *testing.T) { }) if err != nil { t.Fatal(err) - } else if len(his) != 2 { + } else if len(his) != 3 { t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk3 || his[1].PublicKey != hk1 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) + } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) } // reverse order @@ -205,10 +212,10 @@ func TestHosts(t *testing.T) { }) if err != nil { t.Fatal(err) - } else if len(his) != 2 { + } else if len(his) != 3 { t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk1 || his[1].PublicKey != hk3 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) + } else if his[0].PublicKey != hk1 || his[1].PublicKey != hk3 || his[2].PublicKey != hk2 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) } // search all hosts sorted by baserpcprice @@ -219,10 +226,10 @@ func TestHosts(t *testing.T) { }) if err != nil { t.Fatal(err) - } else if len(his) != 2 { + } else if len(his) != 3 { t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk3 || his[1].PublicKey != hk1 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) + } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) } // search by invalid key diff --git a/stores/sql/main.go b/stores/sql/main.go index 16322934d..59188b17b 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -811,7 +811,6 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) } orderByExpr = fmt.Sprintf("ORDER BY %s %s", fieldExpr, opts.SortDir) - whereExprs = append(whereExprs, fmt.Sprintf("COALESCE(%s, '') != ''", fieldExpr)) } var blockedExpr string From 90399de1e0378194a412d01bb1d0995e27be289a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 10 Sep 2024 10:24:11 +0200 Subject: [PATCH 76/98] bus: rename key to encryptionkey for slabs --- internal/test/e2e/cluster_test.go | 2 +- object/slab.go | 24 +-- object/slab_test.go | 2 +- stores/metadata_test.go | 282 +++++++++++++++--------------- stores/sql/main.go | 4 +- stores/sql/mysql/main.go | 6 +- stores/sql/sqlite/main.go | 6 +- worker/download.go | 2 +- worker/mocks_test.go | 6 +- worker/upload_utils.go | 6 +- 10 files changed, 170 insertions(+), 170 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index d41edffac..ae92b9809 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1732,7 +1732,7 @@ func TestUploadPacking(t *testing.T) { // and file2 share the same slab. res, err := b.Object(context.Background(), api.DefaultBucketName, "file1", api.GetObjectOptions{}) tt.OK(err) - objs, err := b.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, res.Object.Slabs[0].Key) + objs, err := b.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, res.Object.Slabs[0].EncryptionKey) tt.OK(err) if len(objs) != 2 { t.Fatal("expected 2 objects", len(objs)) diff --git a/object/slab.go b/object/slab.go index e52e7bd7b..55d070142 100644 --- a/object/slab.go +++ b/object/slab.go @@ -23,10 +23,10 @@ type Sector struct { // be used for each Slab, and should not be the same key used for the parent // Object. type Slab struct { - Health float64 `json:"health"` - Key EncryptionKey `json:"key"` - MinShards uint8 `json:"minShards"` - Shards []Sector `json:"shards,omitempty"` + Health float64 `json:"health"` + EncryptionKey EncryptionKey `json:"encryptionKey"` + MinShards uint8 `json:"minShards"` + Shards []Sector `json:"shards,omitempty"` } func (s Slab) IsPartial() bool { @@ -36,18 +36,18 @@ func (s Slab) IsPartial() bool { // NewSlab returns a new slab for the shards. func NewSlab(minShards uint8) Slab { return Slab{ - Key: GenerateEncryptionKey(), - MinShards: minShards, + EncryptionKey: GenerateEncryptionKey(), + MinShards: minShards, } } // NewPartialSlab returns a new partial slab. func NewPartialSlab(ec EncryptionKey, minShards uint8) Slab { return Slab{ - Health: 1, - Key: ec, - MinShards: minShards, - Shards: nil, + Health: 1, + EncryptionKey: ec, + MinShards: minShards, + Shards: nil, } } @@ -98,7 +98,7 @@ func (s Slab) Encrypt(shards [][]byte) { wg.Add(1) go func(i int) { nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) + c, _ := chacha20.NewUnauthenticatedCipher(s.EncryptionKey.entropy[:], nonce[:]) c.XORKeyStream(shards[i], shards[i]) wg.Done() }(i) @@ -176,7 +176,7 @@ func (ss SlabSlice) Decrypt(shards [][]byte) { wg.Add(1) go func(i int) { nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) + c, _ := chacha20.NewUnauthenticatedCipher(ss.EncryptionKey.entropy[:], nonce[:]) c.SetCounter(offset) c.XORKeyStream(shards[i], shards[i]) wg.Done() diff --git a/object/slab_test.go b/object/slab_test.go index 1138b7c8b..c4c4a2006 100644 --- a/object/slab_test.go +++ b/object/slab_test.go @@ -72,7 +72,7 @@ func TestReedSolomon(t *testing.T) { func BenchmarkReedSolomon(b *testing.B) { makeSlab := func(m, n uint8) (Slab, []byte, [][]byte) { - return Slab{Key: GenerateEncryptionKey(), MinShards: m, Shards: make([]Sector, n)}, + return Slab{EncryptionKey: GenerateEncryptionKey(), MinShards: m, Shards: make([]Sector, n)}, frand.Bytes(rhpv2.SectorSize * int(m)), make([][]byte, n) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index fb0f63f9f..0195b264f 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -148,9 +148,9 @@ func TestPrunableContractRoots(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hks[0], fcids[0], types.Hash256{byte(i)}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[0], fcids[0], types.Hash256{byte(i)}), }, }, }, @@ -228,20 +228,20 @@ func TestObjectBasic(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), }, Offset: 20, Length: 200, @@ -310,20 +310,20 @@ func TestObjectMetadata(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), }, Offset: 20, Length: 200, @@ -575,9 +575,9 @@ func TestContractRoots(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hks[0], fcids[0], types.Hash256{1}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[0], fcids[0], types.Hash256{1}), }, }, }, @@ -672,9 +672,9 @@ func TestRenewedContract(t *testing.T) { // good slab { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: append(newTestShards(hk, fcid1, types.Hash256{1}), newTestShards(hk2, fcid2, types.Hash256{2})...), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: append(newTestShards(hk, fcid1, types.Hash256{1}), newTestShards(hk2, fcid2, types.Hash256{2})...), }, }, }, @@ -1079,20 +1079,20 @@ func TestSQLMetadataStore(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Health: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), }, Offset: 20, Length: 200, @@ -1119,8 +1119,8 @@ func TestSQLMetadataStore(t *testing.T) { } obj.ModTime = api.TimeRFC3339{} - obj1Slab0Key := obj1.Slabs[0].Key - obj1Slab1Key := obj1.Slabs[1].Key + obj1Slab0Key := obj1.Slabs[0].EncryptionKey + obj1Slab1Key := obj1.Slabs[1].EncryptionKey expectedObj := api.Object{ ObjectMetadata: api.ObjectMetadata{ @@ -1139,9 +1139,9 @@ func TestSQLMetadataStore(t *testing.T) { Offset: 10, Length: 100, Slab: object.Slab{ - Health: 1, - Key: obj1Slab0Key, - MinShards: 1, + Health: 1, + EncryptionKey: obj1Slab0Key, + MinShards: 1, Shards: []object.Sector{ { LatestHost: hk1, @@ -1157,9 +1157,9 @@ func TestSQLMetadataStore(t *testing.T) { Offset: 20, Length: 200, Slab: object.Slab{ - Health: 1, - Key: obj1Slab1Key, - MinShards: 2, + Health: 1, + EncryptionKey: obj1Slab1Key, + MinShards: 2, Shards: []object.Sector{ { LatestHost: hk2, @@ -1210,9 +1210,9 @@ func TestSQLMetadataStore(t *testing.T) { } expectedObjSlab1 := object.Slab{ - Health: 1, - Key: obj1Slab0Key, - MinShards: 1, + Health: 1, + EncryptionKey: obj1Slab0Key, + MinShards: 1, Shards: []object.Sector{ { Contracts: map[types.PublicKey][]types.FileContractID{ @@ -1249,9 +1249,9 @@ func TestSQLMetadataStore(t *testing.T) { } expectedObjSlab2 := object.Slab{ - Health: 1, - Key: obj1Slab1Key, - MinShards: 2, + Health: 1, + EncryptionKey: obj1Slab1Key, + MinShards: 2, Shards: []object.Sector{ { Contracts: map[types.PublicKey][]types.FileContractID{ @@ -1395,8 +1395,8 @@ func TestObjectHealth(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{1}), newTestShard(hks[1], fcids[1], types.Hash256{2}), @@ -1407,8 +1407,8 @@ func TestObjectHealth(t *testing.T) { }, { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hks[1], fcids[1], types.Hash256{5}), newTestShard(hks[2], fcids[2], types.Hash256{6}), @@ -1832,8 +1832,8 @@ func TestUnhealthySlabs(t *testing.T) { // good slab { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{1}), newTestShard(hk2, fcid2, types.Hash256{2}), @@ -1844,8 +1844,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - hk4 is bad (1/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{4}), newTestShard(hk2, fcid2, types.Hash256{5}), @@ -1856,8 +1856,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - hk4 is bad (2/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{7}), newTestShard(hk4, fcid4, types.Hash256{8}), @@ -1868,8 +1868,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - hk5 is deleted (1/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{10}), newTestShard(hk2, fcid2, types.Hash256{11}), @@ -1880,8 +1880,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - h1 is reused { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{13}), newTestShard(hk1, fcid4, types.Hash256{14}), @@ -1892,8 +1892,8 @@ func TestUnhealthySlabs(t *testing.T) { // lost slab - no good pieces (0/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{16}), newTestShard(hk2, fcid2, types.Hash256{17}), @@ -1926,10 +1926,10 @@ func TestUnhealthySlabs(t *testing.T) { } expected := []api.UnhealthySlab{ - {EncryptionKey: obj.Slabs[2].Key, Health: 0}, - {EncryptionKey: obj.Slabs[4].Key, Health: 0}, - {EncryptionKey: obj.Slabs[1].Key, Health: 0.5}, - {EncryptionKey: obj.Slabs[3].Key, Health: 0.5}, + {EncryptionKey: obj.Slabs[2].EncryptionKey, Health: 0}, + {EncryptionKey: obj.Slabs[4].EncryptionKey, Health: 0}, + {EncryptionKey: obj.Slabs[1].EncryptionKey, Health: 0.5}, + {EncryptionKey: obj.Slabs[3].EncryptionKey, Health: 0.5}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order") @@ -1947,8 +1947,8 @@ func TestUnhealthySlabs(t *testing.T) { } expected = []api.UnhealthySlab{ - {EncryptionKey: obj.Slabs[2].Key, Health: 0}, - {EncryptionKey: obj.Slabs[4].Key, Health: 0}, + {EncryptionKey: obj.Slabs[2].EncryptionKey, Health: 0}, + {EncryptionKey: obj.Slabs[4].EncryptionKey, Health: 0}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order", slabs, expected) @@ -1997,8 +1997,8 @@ func TestUnhealthySlabsNegHealth(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{1}), newTestShard(hk1, fcid1, types.Hash256{2}), @@ -2056,9 +2056,9 @@ func TestUnhealthySlabsNoContracts(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, }, }, @@ -2137,16 +2137,16 @@ func TestUnhealthySlabsNoRedundancy(t *testing.T) { // hk1 is good so this slab should have full health. { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, }, // hk4 is bad so this slab should have no health. { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, Shards: []object.Sector{ newTestShard(hk2, fcid2, types.Hash256{2}), newTestShard(hk3, fcid3, types.Hash256{4}), @@ -2172,7 +2172,7 @@ func TestUnhealthySlabsNoRedundancy(t *testing.T) { } expected := []api.UnhealthySlab{ - {EncryptionKey: obj.Slabs[1].Slab.Key, Health: -1}, + {EncryptionKey: obj.Slabs[1].Slab.EncryptionKey, Health: -1}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order") @@ -2205,8 +2205,8 @@ func TestContractSectors(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ sectorGood, }, @@ -2278,8 +2278,8 @@ func TestUpdateSlab(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{1}), newTestShard(hk2, fcid2, types.Hash256{2}), @@ -2294,7 +2294,7 @@ func TestUpdateSlab(t *testing.T) { } // extract the slab key - key, err := obj.Slabs[0].Key.MarshalBinary() + key, err := obj.Slabs[0].EncryptionKey.MarshalBinary() if err != nil { t.Fatal(err) } @@ -2302,7 +2302,7 @@ func TestUpdateSlab(t *testing.T) { // helper to fetch a slab from the database fetchSlab := func() (slab object.Slab) { t.Helper() - if slab, err = ss.Slab(ctx, obj.Slabs[0].Key); err != nil { + if slab, err = ss.Slab(ctx, obj.Slabs[0].EncryptionKey); err != nil { t.Fatal(err) } return @@ -2416,8 +2416,8 @@ func TestUpdateSlab(t *testing.T) { t.Fatal(err) } else if len(obj.Slabs) != 1 { t.Fatalf("unexpected number of slabs, %v != 1", len(obj.Slabs)) - } else if obj.Slabs[0].Key.String() != updated.Key.String() { - t.Fatalf("unexpected slab, %v != %v", obj.Slabs[0].Key, updated.Key) + } else if obj.Slabs[0].EncryptionKey.String() != updated.EncryptionKey.String() { + t.Fatalf("unexpected slab, %v != %v", obj.Slabs[0].EncryptionKey, updated.EncryptionKey) } // update the slab to change its contract set. @@ -2448,9 +2448,9 @@ func newTestObject(slabs int) object.Object { length := offset + uint32(frand.Uint64n(1<<22)) obj.Slabs[i] = object.SlabSlice{ Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: n, - Shards: make([]object.Sector, n*2), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: n, + Shards: make([]object.Sector, n*2), }, Offset: offset, Length: length, @@ -2885,7 +2885,7 @@ func TestPartialSlab(t *testing.T) { } else if bufferSize != rhpv2.SectorSize { t.Fatal("unexpected buffer size", bufferSize) } - data, err := ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length) + data, err := ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, slab1Data) { @@ -2910,7 +2910,7 @@ func TestPartialSlab(t *testing.T) { return } - buffer := fetchBuffer(slabs[0].Key) + buffer := fetchBuffer(slabs[0].EncryptionKey) if buffer.Filename == "" { t.Fatal("empty filename") } @@ -2924,9 +2924,9 @@ func TestPartialSlab(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, frand.Entropy256()), newTestShard(hk2, fcid2, frand.Entropy256()), @@ -2962,7 +2962,7 @@ func TestPartialSlab(t *testing.T) { } else if bufferSize != rhpv2.SectorSize { t.Fatal("unexpected buffer size", bufferSize) } - data, err = ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length) + data, err = ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, slab2Data) { @@ -2997,16 +2997,16 @@ func TestPartialSlab(t *testing.T) { if bufferSize != 2*rhpv2.SectorSize { t.Fatal("unexpected buffer size", bufferSize) } - if data1, err := ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length); err != nil { + if data1, err := ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length); err != nil { t.Fatal(err) - } else if data2, err := ss.FetchPartialSlab(ctx, slabs[1].Key, slabs[1].Offset, slabs[1].Length); err != nil { + } else if data2, err := ss.FetchPartialSlab(ctx, slabs[1].EncryptionKey, slabs[1].Offset, slabs[1].Length); err != nil { t.Fatal(err) } else if !bytes.Equal(slab3Data, append(data1, data2...)) { t.Fatal("wrong data") } assertBuffer(buffer1Name, rhpv2.SectorSize, true, false) - buffer = fetchBuffer(slabs[1].Key) + buffer = fetchBuffer(slabs[1].EncryptionKey) buffer2Name := buffer.Filename assertBuffer(buffer2Name, 1, false, false) @@ -3056,7 +3056,7 @@ func TestPartialSlab(t *testing.T) { } assertBuffer(buffer2Name, 1, false, false) - _, err = ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length) + _, err = ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length) if !errors.Is(err, api.ErrObjectNotFound) { t.Fatal("expected ErrObjectNotFound", err) } @@ -3172,9 +3172,9 @@ func TestContractSizes(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hks[i], fcids[i], types.Hash256{byte(i)}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[i], fcids[i], types.Hash256{byte(i)}), }, }, }, @@ -3275,10 +3275,10 @@ func TestObjectsBySlabKey(t *testing.T) { // create a slab. slab := object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), } // Add 3 objects that all reference the slab. @@ -3300,7 +3300,7 @@ func TestObjectsBySlabKey(t *testing.T) { } // Fetch the objects by slab. - objs, err := ss.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, slab.Key) + objs, err := ss.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, slab.EncryptionKey) if err != nil { t.Fatal(err) } @@ -3515,11 +3515,11 @@ func TestBucketObjects(t *testing.T) { // See if we can fetch the object by slab. if obj, err := ss.Object(context.Background(), b1, "/bar"); err != nil { t.Fatal(err) - } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b1, obj.Slabs[0].Key); err != nil { + } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b1, obj.Slabs[0].EncryptionKey); err != nil { t.Fatal(err) } else if len(objects) != 1 { t.Fatal("expected 1 object", len(objects)) - } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b2, obj.Slabs[0].Key); err != nil { + } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b2, obj.Slabs[0].EncryptionKey); err != nil { t.Fatal(err) } else if len(objects) != 0 { t.Fatal("expected 0 objects", len(objects)) @@ -3776,8 +3776,8 @@ func TestDeleteHostSector(t *testing.T) { // create a healthy slab with one sector that is uploaded to all contracts. root := types.Hash256{1, 2, 3} ss.InsertSlab(object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ { Contracts: map[types.PublicKey][]types.FileContractID{ @@ -3943,9 +3943,9 @@ func TestUpdateSlabSanityChecks(t *testing.T) { shards = append(shards, newTestShard(hks[i], contracts[i].ID, types.Hash256{byte(i + 1)})) } slab := object.Slab{ - Key: object.GenerateEncryptionKey(), - Shards: shards, - Health: 1, + EncryptionKey: object.GenerateEncryptionKey(), + Shards: shards, + Health: 1, } // set slab. @@ -3958,7 +3958,7 @@ func TestUpdateSlabSanityChecks(t *testing.T) { } // verify slab. - rSlab, err := ss.Slab(context.Background(), slab.Key) + rSlab, err := ss.Slab(context.Background(), slab.EncryptionKey) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(slab, rSlab) { @@ -3967,8 +3967,8 @@ func TestUpdateSlabSanityChecks(t *testing.T) { // change the length to fail the update. if err := ss.UpdateSlab(context.Background(), object.Slab{ - Key: slab.Key, - Shards: shards[:len(shards)-1], + EncryptionKey: slab.EncryptionKey, + Shards: shards[:len(shards)-1], }, testContractSet); !errors.Is(err, isql.ErrInvalidNumberOfShards) { t.Fatal(err) } @@ -3980,8 +3980,8 @@ func TestUpdateSlabSanityChecks(t *testing.T) { reversedShards[i], reversedShards[j] = reversedShards[j], reversedShards[i] } reversedSlab := object.Slab{ - Key: slab.Key, - Shards: reversedShards, + EncryptionKey: slab.EncryptionKey, + Shards: reversedShards, } if err := ss.UpdateSlab(context.Background(), reversedSlab, testContractSet); !errors.Is(err, isql.ErrShardRootChanged) { t.Fatal(err) @@ -4035,7 +4035,7 @@ func TestSlabHealthInvalidation(t *testing.T) { _, err = ss.addTestObject("o1", object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: s1, + EncryptionKey: s1, Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{0}), newTestShard(hks[1], fcids[1], types.Hash256{1}), @@ -4051,7 +4051,7 @@ func TestSlabHealthInvalidation(t *testing.T) { err = ss.UpdateObject(context.Background(), api.DefaultBucketName, "o2", testContractSet, testETag, testMimeType, testMetadata, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: s2, + EncryptionKey: s2, Shards: []object.Sector{ newTestShard(hks[2], fcids[2], types.Hash256{2}), newTestShard(hks[3], fcids[3], types.Hash256{3}), @@ -4181,8 +4181,8 @@ func TestRefreshHealth(t *testing.T) { if added, err := ss.addTestObject(o1, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - MinShards: 2, - Key: object.GenerateEncryptionKey(), + MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{0}), newTestShard(hks[1], fcids[1], types.Hash256{1}), @@ -4200,8 +4200,8 @@ func TestRefreshHealth(t *testing.T) { if added, err := ss.addTestObject(o2, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - MinShards: 2, - Key: object.GenerateEncryptionKey(), + MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[4], fcids[4], types.Hash256{4}), newTestShard(hks[5], fcids[5], types.Hash256{5}), @@ -4409,8 +4409,8 @@ func TestUpdateObjectReuseSlab(t *testing.T) { Offset: 0, Length: uint32(minShards) * rhpv2.SectorSize, Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: uint8(minShards), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), }, }) } @@ -4561,7 +4561,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { t.Fatal("invalid minShards", slab.MinShards) } else if slab.TotalShards != uint8(totalShards) { t.Fatal("invalid totalShards", slab.TotalShards) - } else if slab.Key.String() != obj.Slabs[i].Key.String() { + } else if slab.Key.String() != obj.Slabs[i].EncryptionKey.String() { t.Fatal("wrong key") } @@ -4591,8 +4591,8 @@ func TestUpdateObjectReuseSlab(t *testing.T) { Offset: 0, Length: uint32(minShards) * rhpv2.SectorSize, Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: uint8(minShards), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), }, }) // 30 shards each @@ -4664,7 +4664,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { t.Fatal("invalid minShards", slab2.MinShards) } else if slab2.TotalShards != uint8(totalShards) { t.Fatal("invalid totalShards", slab2.TotalShards) - } else if slab2.Key.String() != obj2.Slabs[0].Key.String() { + } else if slab2.Key.String() != obj2.Slabs[0].EncryptionKey.String() { t.Fatal("wrong key") } @@ -4766,20 +4766,20 @@ func TestUpdateObjectParallel(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, frand.Entropy256()), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, frand.Entropy256()), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, frand.Entropy256()), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, frand.Entropy256()), }, Offset: 20, Length: 200, diff --git a/stores/sql/main.go b/stores/sql/main.go index 9a94d6c47..94c0ec932 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -1992,7 +1992,7 @@ func Settings(ctx context.Context, tx sql.Tx) ([]string, error) { func Slab(ctx context.Context, tx sql.Tx, key object.EncryptionKey) (object.Slab, error) { // fetch slab var slabID int64 - slab := object.Slab{Key: key} + slab := object.Slab{EncryptionKey: key} err := tx.QueryRow(ctx, ` SELECT id, health, min_shards FROM slabs sla @@ -2579,7 +2579,7 @@ func Object(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) var hk types.PublicKey if err := rows.Scan(&bufferedSlab, // whether the slab is buffered &objectIndex, &ss.Offset, &ss.Length, // slice info - &ss.Health, (*EncryptionKey)(&ss.Key), &ss.MinShards, // slab info + &ss.Health, (*EncryptionKey)(&ss.EncryptionKey), &ss.MinShards, // slab info &slabIndex, (*Hash256)(§or.Root), (*PublicKey)(§or.LatestHost), // sector info (*PublicKey)(&fcid), // contract info (*PublicKey)(&hk), // host info diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 3da866d5b..a883b771f 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -1083,7 +1083,7 @@ func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contrac health_valid_until = ?, health = ? WHERE `+"`key`"+` = ? - `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.Key)) + `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.EncryptionKey)) if err != nil { return err } else if n, err := res.RowsAffected(); err != nil { @@ -1094,7 +1094,7 @@ func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contrac // fetch slab id and total shards var slabID, totalShards int64 - err = tx.QueryRow(ctx, "SELECT id, total_shards FROM slabs WHERE `key` = ?", ssql.EncryptionKey(s.Key)). + err = tx.QueryRow(ctx, "SELECT id, total_shards FROM slabs WHERE `key` = ?", ssql.EncryptionKey(s.EncryptionKey)). Scan(&slabID, &totalShards) if err != nil { return err @@ -1263,7 +1263,7 @@ func (tx *MainDatabaseTx) insertSlabs(ctx context.Context, objID, partID *int64, res, err := insertSlabStmt.Exec(ctx, time.Now(), contractSetID, - ssql.EncryptionKey(slices[i].Key), + ssql.EncryptionKey(slices[i].EncryptionKey), slices[i].MinShards, uint8(len(slices[i].Shards)), ) diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 18565e41b..d2679eb29 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -1096,7 +1096,7 @@ func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contrac health = ? WHERE key = ? RETURNING id, total_shards - `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.Key)). + `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.EncryptionKey)). Scan(&slabID, &totalShards) if errors.Is(err, dsql.ErrNoRows) { return api.ErrSlabNotFound @@ -1263,12 +1263,12 @@ func (tx *MainDatabaseTx) insertSlabs(ctx context.Context, objID, partID *int64, err = insertSlabStmt.QueryRow(ctx, time.Now(), contractSetID, - ssql.EncryptionKey(slices[i].Key), + ssql.EncryptionKey(slices[i].EncryptionKey), slices[i].MinShards, uint8(len(slices[i].Shards)), ).Scan(&slabIDs[i]) if errors.Is(err, dsql.ErrNoRows) { - if err := querySlabIDStmt.QueryRow(ctx, ssql.EncryptionKey(slices[i].Key)).Scan(&slabIDs[i]); err != nil { + if err := querySlabIDStmt.QueryRow(ctx, ssql.EncryptionKey(slices[i].EncryptionKey)).Scan(&slabIDs[i]); err != nil { return fmt.Errorf("failed to fetch slab id: %w", err) } } else if err != nil { diff --git a/worker/download.go b/worker/download.go index e1c54771d..cedcd7a82 100644 --- a/worker/download.go +++ b/worker/download.go @@ -173,7 +173,7 @@ func (mgr *downloadManager) DownloadObject(ctx context.Context, w io.Writer, o o if !slabs[i].PartialSlab { continue } - data, slab, err := mgr.fetchPartialSlab(ctx, slabs[i].SlabSlice.Key, slabs[i].SlabSlice.Offset, slabs[i].SlabSlice.Length) + data, slab, err := mgr.fetchPartialSlab(ctx, slabs[i].SlabSlice.EncryptionKey, slabs[i].SlabSlice.Offset, slabs[i].SlabSlice.Length) if err != nil { return fmt.Errorf("failed to fetch partial slab data: %w", err) } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 20657940f..682f8178e 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -480,7 +480,7 @@ func (os *objectStoreMock) Slab(ctx context.Context, key object.EncryptionKey) ( os.forEachObject(func(bucket, objKey string, o object.Object) { for _, s := range o.Slabs { - if s.Slab.Key.String() == key.String() { + if s.Slab.EncryptionKey.String() == key.String() { slab = s.Slab return } @@ -496,7 +496,7 @@ func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contra os.forEachObject(func(bucket, objKey string, o object.Object) { for i, slab := range o.Slabs { - if slab.Key.String() != s.Key.String() { + if slab.EncryptionKey.String() != s.EncryptionKey.String() { continue } // update slab @@ -566,7 +566,7 @@ func (os *objectStoreMock) MarkPackedSlabsUploaded(ctx context.Context, slabs [] slabKeyToSlab := make(map[string]*object.Slab) os.forEachObject(func(bucket, objKey string, o object.Object) { for i, slab := range o.Slabs { - slabKeyToSlab[slab.Slab.Key.String()] = &os.objects[bucket][objKey].Slabs[i].Slab + slabKeyToSlab[slab.Slab.EncryptionKey.String()] = &os.objects[bucket][objKey].Slabs[i].Slab } }) diff --git a/worker/upload_utils.go b/worker/upload_utils.go index 306e1774f..6dfc9b729 100644 --- a/worker/upload_utils.go +++ b/worker/upload_utils.go @@ -10,9 +10,9 @@ import ( func encryptPartialSlab(data []byte, key object.EncryptionKey, minShards, totalShards uint8) [][]byte { slab := object.Slab{ - Key: key, - MinShards: minShards, - Shards: make([]object.Sector, totalShards), + EncryptionKey: key, + MinShards: minShards, + Shards: make([]object.Sector, totalShards), } encodedShards := make([][]byte, totalShards) slab.Encode(data, encodedShards) From bcbeda8b2f5090f61ca85e6d5bd14fb9543113bd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 10 Sep 2024 15:20:18 +0200 Subject: [PATCH 77/98] bus: fill in Relevant field of Event response --- bus/routes.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bus/routes.go b/bus/routes.go index c1c5d9d70..170c84ab6 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -288,6 +288,15 @@ func (b *Bus) walletEventsHandler(jc jape.Context) { if jc.Check("couldn't load events", err) != nil { return } + addr := b.w.Address() + for i := range events { + // NOTE: add the wallet's address to every event. Theoretically, + // this information should be persisted next to the event but + // using a SingleAddress the address should always be set because + // only relevant events are persisted and because the wallet only + // has one address. + events[i].Relevant = []types.Address{addr} + } jc.Encode(events) } From 525a29860e77194cdd1c8dd5e9261ddfa37499ec Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 10 Sep 2024 15:25:42 +0200 Subject: [PATCH 78/98] e2e: extend TestWalletEvents --- bus/routes.go | 4 ++-- internal/test/e2e/cluster_test.go | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/bus/routes.go b/bus/routes.go index 170c84ab6..778034bb7 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -288,14 +288,14 @@ func (b *Bus) walletEventsHandler(jc jape.Context) { if jc.Check("couldn't load events", err) != nil { return } - addr := b.w.Address() + relevant := []types.Address{b.w.Address()} for i := range events { // NOTE: add the wallet's address to every event. Theoretically, // this information should be persisted next to the event but // using a SingleAddress the address should always be set because // only relevant events are persisted and because the wallet only // has one address. - events[i].Relevant = []types.Address{addr} + events[i].Relevant = relevant } jc.Encode(events) } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index d41edffac..331b905fa 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1572,6 +1572,15 @@ func TestWalletEvents(t *testing.T) { if len(txns) != 5 { t.Fatalf("expected exactly 5 events, got %v", len(txns)) } + + // Events should have 'Relevant' field set. + resp, err := b.Wallet(context.Background()) + tt.OK(err) + for _, txn := range txns { + if len(txn.Relevant) != 1 || txn.Relevant[0] != resp.Address { + t.Fatal("invalid 'Relevant' field in wallet event", txn.Relevant, resp.Address) + } + } } func TestUploadPacking(t *testing.T) { From e082fdfd92cc32e02dfc5aad7f2346ec94da1ebd Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 10 Sep 2024 23:17:38 +0200 Subject: [PATCH 79/98] stores: use raw SQL in the migration --- internal/sql/migrations.go | 150 +----------------- .../main/migration_00018_settings.sql | 84 +++++++++- .../main/migration_00018_settings.sql | 46 +++++- 3 files changed, 129 insertions(+), 151 deletions(-) diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index 12e21cb4b..029aba4ea 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -3,12 +3,10 @@ package sql import ( "context" "embed" - "encoding/json" "fmt" "strings" "unicode/utf8" - "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -223,153 +221,7 @@ var ( { ID: "00018_settings", Migrate: func(tx Tx) error { - log.Infof("performing %s migration '00018_settings'", dbIdentifier) - - // fetch all settings - rows, err := tx.Query(ctx, "SELECT `key`, value FROM settings") - if err != nil { - return fmt.Errorf("failed to fetch settings: %v", err) - } - defer rows.Close() - - settings := make(map[string]string) - for rows.Next() { - var k, v string - if err := rows.Scan(&k, &v); err != nil { - return fmt.Errorf("failed to scan setting: %v", err) - } - settings[k] = v - } - - // migrate gouging settings - if v, ok := settings["gouging"]; ok { - var gs api.GougingSettings - err := json.Unmarshal([]byte(v), &gs) - if err == nil { - err = gs.Validate() - } - if err != nil { - log.Warnf("gouging settings are not being migrated, err: %v", err) - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "gouging"); err != nil { - return fmt.Errorf("failed to delete gouging settings: %v", err) - } - } - } else { - log.Warn("no gouging settings found") - } - - // migrate pinning settings - if v, ok := settings["pricepinning"]; ok { - var ps api.PinnedSettings - err := json.Unmarshal([]byte(v), &ps) - if err == nil { - err = ps.Validate() - } - if err == nil { - updated, _ := json.Marshal(ps) - if err := m.UpdateSetting(ctx, tx, "pinned", string(updated)); err != nil { - return fmt.Errorf("failed to insert pinned settings: %v", err) - } - } else { - log.Warnf("pricepinning settings are not being migrated, err: %v", err) - } - - // always delete because it got renamed - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "pricepinning"); err != nil { - log.Warnf("failed to delete pricepinning settings: %v", err) - } - } else { - log.Warn("no pricepinning settings found") - } - - // migrate S3 authentication settings - if v, ok := settings["s3authentication"]; ok { - var s3s api.S3Settings - err := json.Unmarshal([]byte(v), &s3s.Authentication) - if err == nil { - err = s3s.Validate() - } - if err == nil { - updated, _ := json.Marshal(s3s) - if err := m.UpdateSetting(ctx, tx, "s3", string(updated)); err != nil { - return fmt.Errorf("failed to insert s3 settings: %v", err) - } - } else { - log.Warnf("s3authentication settings are not being migrated, err: %v", err) - } - - // always delete because it got renamed - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "s3authentication"); err != nil { - log.Warnf("failed to delete s3authentication settings: %v", err) - } - } else { - log.Warn("no s3authentication setting found") - } - - // migrate upload settings - us := api.DefaultUploadSettings("mainnet") - - if v, ok := settings["contractset"]; ok { - var css struct { - Default string `json:"default"` - } - if err := json.Unmarshal([]byte(v), &css); err != nil { - log.Warnf("contractset settings are not being migrated, err: %v", err) - } else { - us.DefaultContractSet = css.Default - } - - // always delete because it got replaced - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "contractset"); err != nil { - return err - } - } - - if v, ok := settings["uploadpacking"]; ok { - var ups api.UploadPackingSettings - if err := json.Unmarshal([]byte(v), &ups); err != nil { - log.Warnf("uploadpacking settings are not being migrated, err: %v", err) - } else { - us.Packing = ups - } - - // always delete because it got replaced - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "uploadpacking"); err != nil { - return err - } - } - - if v, ok := settings["redundancy"]; ok { - var rs api.RedundancySettings - err := json.Unmarshal([]byte(v), &rs) - if err == nil { - err = rs.Validate() - } - if err != nil { - log.Warnf("redundancy settings are not being migrated, err: %v", err) - } else { - us.Redundancy = rs - } - - // always delete because it got replaced - if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", "redundancy"); err != nil { - return err - } - } - - // update upload settings - if err := us.Validate(); err != nil { - log.Warnf("upload settings are not being migrated, err: %v", err) - return err // developer error - } else { - updated, _ := json.Marshal(us) - if err := m.UpdateSetting(ctx, tx, "upload", string(updated)); err != nil { - return fmt.Errorf("failed to insert upload settings: %v", err) - } - } - - log.Info("migration '00018_settings' complete") - return nil + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00018_settings", log) }, }, } diff --git a/stores/sql/mysql/migrations/main/migration_00018_settings.sql b/stores/sql/mysql/migrations/main/migration_00018_settings.sql index 09a7b0592..0ac46b266 100644 --- a/stores/sql/mysql/migrations/main/migration_00018_settings.sql +++ b/stores/sql/mysql/migrations/main/migration_00018_settings.sql @@ -1 +1,83 @@ --- placeholder +-- avoid duplicate key errors +DELETE FROM settings WHERE `key` IN ("s3", "upload", "pinned"); + +-- migrate settings +INSERT INTO settings (created_at, `key`, value) +SELECT NOW(), k, v +FROM ( + -- upload is a combination of uploadpacking, redundancy, and contractset + SELECT + "upload" as k, + JSON_MERGE_PATCH( + JSON_OBJECT("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "uploadpacking")), + JSON_MERGE_PATCH( + JSON_OBJECT("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "redundancy")), + JSON_OBJECT("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE `key` = "contractset")) + ) + ) as v + WHERE JSON_EXTRACT( + JSON_MERGE_PATCH( + JSON_OBJECT("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "uploadpacking")), + JSON_MERGE_PATCH( + JSON_OBJECT("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "redundancy")), + JSON_OBJECT("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE `key` = "contractset")) + ) + ), "$.packing" + ) IS NOT NULL + AND JSON_EXTRACT( + JSON_MERGE_PATCH( + JSON_OBJECT("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "uploadpacking")), + JSON_MERGE_PATCH( + JSON_OBJECT("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "redundancy")), + JSON_OBJECT("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE `key` = "contractset")) + ) + ), "$.redundancy" + ) IS NOT NULL + + UNION ALL + + -- s3 wraps the s3authentication setting + SELECT + "s3" as k, + JSON_OBJECT("authentication", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "s3authentication")) as v + WHERE JSON_EXTRACT( + JSON_OBJECT("authentication", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "s3authentication")), + "$.authentication" + ) IS NOT NULL + + UNION ALL + + -- pinning renames pricepinning and removes the 'enabled' and 'forexEndpointURL' fields + SELECT + "pinned" as k, + JSON_REMOVE( + JSON_REMOVE( + (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ) as v + WHERE JSON_EXTRACT( + JSON_REMOVE( + JSON_REMOVE( + (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ), + "$.currency" + ) IS NOT NULL + AND JSON_EXTRACT( + JSON_REMOVE( + JSON_REMOVE( + (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ), + "$.threshold" + ) IS NOT NULL +) as migration; + +-- delete old settings (TODO: should we?) +DELETE FROM settings WHERE `key` IN ("uploadpacking", "redundancy", "contractset", "s3authentication", "pricepinning"); diff --git a/stores/sql/sqlite/migrations/main/migration_00018_settings.sql b/stores/sql/sqlite/migrations/main/migration_00018_settings.sql index 09a7b0592..aef6d775a 100644 --- a/stores/sql/sqlite/migrations/main/migration_00018_settings.sql +++ b/stores/sql/sqlite/migrations/main/migration_00018_settings.sql @@ -1 +1,45 @@ --- placeholder +-- avoid duplicate key errors +DELETE FROM settings WHERE `key` IN ("s3", "upload", "pinned"); + +-- migrate settings +INSERT INTO settings (created_at, `key`, value) +SELECT DATETIME('now'), k, v +FROM ( + -- upload is a combination of uploadpacking, redundancy, and contractset + SELECT + "upload" as k, + json_patch( + json_object("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "uploadpacking")), + json_patch( + json_object("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "redundancy")), + json_object("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE key = "contractset")) + ) + ) as v + WHERE json_extract(v, "$.packing") IS NOT NULL + AND json_extract(v, "$.redundancy") IS NOT NULL + + UNION ALL + + -- s3 wraps the s3authentication setting + SELECT + "s3" as k, + json_object("authentication", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "s3authentication")) as v + WHERE json_extract(v, "$.authentication") IS NOT NULL + + UNION ALL + + -- pinning renames pricepinning and removes the 'enabled' and 'forexEndpointURL' fields + SELECT + "pinned" as k, + json_remove( + json_remove( + (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ) as v + WHERE json_extract(v, "$.currency") IS NOT NULL AND json_extract(v, "$.threshold") IS NOT NULL +) + +-- delete old settings +DELETE FROM settings WHERE `key` IN ("uploadpacking", "redundancy", "contractset", "s3authentication", "pricepinning"); \ No newline at end of file From dafe545d655b5b7dd5fce781b8492e96cd90d416 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 10 Sep 2024 23:19:40 +0200 Subject: [PATCH 80/98] stores: cleanup migration --- .../main/migration_00018_settings.sql | 62 +++++++++---------- .../main/migration_00018_settings.sql | 10 +-- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/stores/sql/mysql/migrations/main/migration_00018_settings.sql b/stores/sql/mysql/migrations/main/migration_00018_settings.sql index 0ac46b266..006e97784 100644 --- a/stores/sql/mysql/migrations/main/migration_00018_settings.sql +++ b/stores/sql/mysql/migrations/main/migration_00018_settings.sql @@ -8,28 +8,28 @@ FROM ( -- upload is a combination of uploadpacking, redundancy, and contractset SELECT "upload" as k, - JSON_MERGE_PATCH( - JSON_OBJECT("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "uploadpacking")), - JSON_MERGE_PATCH( - JSON_OBJECT("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "redundancy")), - JSON_OBJECT("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE `key` = "contractset")) + json_merge_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "uploadpacking")), + json_merge_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE `key` = "contractset")) ) ) as v - WHERE JSON_EXTRACT( - JSON_MERGE_PATCH( - JSON_OBJECT("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "uploadpacking")), - JSON_MERGE_PATCH( - JSON_OBJECT("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "redundancy")), - JSON_OBJECT("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE `key` = "contractset")) + WHERE json_extract( + json_merge_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "uploadpacking")), + json_merge_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE `key` = "contractset")) ) ), "$.packing" ) IS NOT NULL - AND JSON_EXTRACT( - JSON_MERGE_PATCH( - JSON_OBJECT("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "uploadpacking")), - JSON_MERGE_PATCH( - JSON_OBJECT("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "redundancy")), - JSON_OBJECT("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE `key` = "contractset")) + AND json_extract( + json_merge_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "uploadpacking")), + json_merge_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE `key` = "contractset")) ) ), "$.redundancy" ) IS NOT NULL @@ -39,9 +39,9 @@ FROM ( -- s3 wraps the s3authentication setting SELECT "s3" as k, - JSON_OBJECT("authentication", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "s3authentication")) as v - WHERE JSON_EXTRACT( - JSON_OBJECT("authentication", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "s3authentication")), + json_object("authentication", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "s3authentication")) as v + WHERE json_extract( + json_object("authentication", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "s3authentication")), "$.authentication" ) IS NOT NULL @@ -50,27 +50,27 @@ FROM ( -- pinning renames pricepinning and removes the 'enabled' and 'forexEndpointURL' fields SELECT "pinned" as k, - JSON_REMOVE( - JSON_REMOVE( - (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "pricepinning"), + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE `key` = "pricepinning"), "$.enabled" ), "$.forexEndpointURL" ) as v - WHERE JSON_EXTRACT( - JSON_REMOVE( - JSON_REMOVE( - (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "pricepinning"), + WHERE json_extract( + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE `key` = "pricepinning"), "$.enabled" ), "$.forexEndpointURL" ), "$.currency" ) IS NOT NULL - AND JSON_EXTRACT( - JSON_REMOVE( - JSON_REMOVE( - (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE `key` = "pricepinning"), + AND json_extract( + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE `key` = "pricepinning"), "$.enabled" ), "$.forexEndpointURL" diff --git a/stores/sql/sqlite/migrations/main/migration_00018_settings.sql b/stores/sql/sqlite/migrations/main/migration_00018_settings.sql index aef6d775a..91d3b2aa0 100644 --- a/stores/sql/sqlite/migrations/main/migration_00018_settings.sql +++ b/stores/sql/sqlite/migrations/main/migration_00018_settings.sql @@ -9,10 +9,10 @@ FROM ( SELECT "upload" as k, json_patch( - json_object("packing", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "uploadpacking")), + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE key = "uploadpacking")), json_patch( - json_object("redundancy", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "redundancy")), - json_object("defaultContractSet", (SELECT JSON_EXTRACT(value, "$.default") FROM settings WHERE key = "contractset")) + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE key = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE key = "contractset")) ) ) as v WHERE json_extract(v, "$.packing") IS NOT NULL @@ -23,7 +23,7 @@ FROM ( -- s3 wraps the s3authentication setting SELECT "s3" as k, - json_object("authentication", (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "s3authentication")) as v + json_object("authentication", (SELECT json_extract(value, "$") FROM settings WHERE key = "s3authentication")) as v WHERE json_extract(v, "$.authentication") IS NOT NULL UNION ALL @@ -33,7 +33,7 @@ FROM ( "pinned" as k, json_remove( json_remove( - (SELECT JSON_EXTRACT(value, "$") FROM settings WHERE key = "pricepinning"), + (SELECT json_extract(value, "$") FROM settings WHERE key = "pricepinning"), "$.enabled" ), "$.forexEndpointURL" From ac8a663ad22f3c029ecb866924ce95f248c7b5a2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 11 Sep 2024 10:25:50 +0200 Subject: [PATCH 81/98] stores: fix SQLite migratiton --- bus/bus.go | 7 ++----- .../migrations/main/migration_00018_gouging_units.sql | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index fa9e02cf8..79623615c 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -340,6 +340,7 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa masterKey: masterKey, accounts: store, + explorer: ibus.NewExplorer(explorerURL), s: s, cm: cm, w: w, @@ -366,15 +367,11 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa // create contract locker b.contractLocker = ibus.NewContractLocker() - // create explorer - e := ibus.NewExplorer(explorerURL) - b.explorer = e - // create sectors cache b.sectors = ibus.NewSectorsCache() // create pin manager - b.pinMgr = ibus.NewPinManager(b.alerts, wm, e, store, defaultPinUpdateInterval, defaultPinRateWindow, l) + b.pinMgr = ibus.NewPinManager(b.alerts, wm, b.explorer, store, defaultPinUpdateInterval, defaultPinRateWindow, l) // create chain subscriber b.cs = ibus.NewChainSubscriber(wm, cm, store, w, announcementMaxAge, l) diff --git a/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql b/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql index 77982c509..c5be99f11 100644 --- a/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql +++ b/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql @@ -10,8 +10,8 @@ SET value = ( FROM ( -- SELECT previous settings SELECT - JSON_UNQUOTE(JSON_EXTRACT(value, '$.maxDownloadPrice')) AS maxDownloadPrice, - JSON_UNQUOTE(JSON_EXTRACT(value, '$.maxUploadPrice')) AS maxUploadPrice + JSON_EXTRACT(value, '$.maxDownloadPrice') AS maxDownloadPrice, + JSON_EXTRACT(value, '$.maxUploadPrice') AS maxUploadPrice ) AS _ ) AS _ ) From a60225bf0c4924e55396aedba665054a70daff4a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 11 Sep 2024 11:20:54 +0200 Subject: [PATCH 82/98] stores: address comments --- api/host.go | 4 + stores/hostdb_test.go | 8 +- stores/sql/main.go | 167 +----------------------------------------- 3 files changed, 12 insertions(+), 167 deletions(-) diff --git a/api/host.go b/api/host.go index 0422c2bee..7a9e3904b 100644 --- a/api/host.go +++ b/api/host.go @@ -98,6 +98,10 @@ var ( // ErrInvalidHostSortBy is returned when the SortBy parameter used // when querying hosts is invalid. ErrInvalidHostSortBy = errors.New("invalid SortBy parameter") + + // ErrInvalidHostSortDir is returned when the SortDir parameter used + // when querying hosts is invalid. + ErrInvalidHostSortDir = errors.New("invalid SortDir parameter") ) var ( diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index bada7249e..90de32196 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -142,10 +142,10 @@ func TestHosts(t *testing.T) { { HostKey: hk1, PriceTable: rhpv3.HostPriceTable{ - InitBaseCost: types.NewCurrency64(2), + InitBaseCost: types.NewCurrency64(230), }, Settings: rhpv2.HostSettings{ - BaseRPCPrice: types.NewCurrency64(2), + BaseRPCPrice: types.NewCurrency64(230), }, Success: true, Timestamp: time.Now(), @@ -160,10 +160,10 @@ func TestHosts(t *testing.T) { { HostKey: hk3, PriceTable: rhpv3.HostPriceTable{ - InitBaseCost: types.NewCurrency64(1), + InitBaseCost: types.NewCurrency64(15), }, Settings: rhpv2.HostSettings{ - BaseRPCPrice: types.NewCurrency64(1), + BaseRPCPrice: types.NewCurrency64(15), }, Success: true, Timestamp: time.Now(), diff --git a/stores/sql/main.go b/stores/sql/main.go index 59188b17b..4fe847391 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -792,9 +792,11 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er var orderByExpr string if opts.SortBy != "" { switch opts.SortDir { - case "", api.SortDirAsc, api.SortDirDesc: + case "": + opts.SortDir = api.SortDirAsc + case api.SortDirAsc, api.SortDirDesc: default: - return nil, fmt.Errorf("invalid sort order: %v", opts.SortDir) + return nil, fmt.Errorf("%w: %v", api.ErrInvalidHostSortDir, opts.SortDir) } if !api.IsValidHostSortBy(opts.SortBy) { return nil, fmt.Errorf("%w: %s", api.ErrInvalidHostSortBy, opts.SortBy) @@ -1526,167 +1528,6 @@ func dirID(ctx context.Context, tx sql.Tx, dirPath string) (int64, error) { return id, nil } -func ObjectEntries(ctx context.Context, tx Tx, bucket, key, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - // sanity check we are passing a directory - if !strings.HasSuffix(key, "/") { - panic("path must end in /") - } - - // sanity check we are passing sane paging parameters - usingMarker := marker != "" - usingOffset := offset > 0 - if usingMarker && usingOffset { - return nil, false, errors.New("fetching entries using a marker and an offset is not supported at the same time") - } - - // fetch one more to see if there are more entries - if limit <= -1 { - limit = math.MaxInt - } else if limit != math.MaxInt { - limit++ - } - - // establish sane defaults for sorting - if sortBy == "" { - sortBy = api.ObjectSortByName - } - if sortDir == "" { - sortDir = api.SortDirAsc - } - - // fetch directory id - dirID, err := dirID(ctx, tx, key) - if errors.Is(err, dsql.ErrNoRows) { - return []api.ObjectMetadata{}, false, nil - } else if err != nil { - return nil, false, fmt.Errorf("failed to fetch directory id: %w", err) - } - - args := []any{ - key, - dirID, bucket, - } - - // apply prefix - var prefixExpr string - if prefix != "" { - prefixExpr = "AND SUBSTR(o.object_id, 1, ?) = ?" - args = append(args, - utf8.RuneCountInString(key+prefix), key+prefix, - utf8.RuneCountInString(key+prefix), key+prefix, - ) - } - - args = append(args, - bucket, - key+"%", - utf8.RuneCountInString(key), key, - dirID, - ) - - // apply marker - var whereExpr string - markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { - var groupFn string - switch col { - case "size": - groupFn = "SUM" - case "health": - groupFn = "MIN" - default: - return fmt.Errorf("unknown column: %v", col) - } - err := tx.QueryRow(ctx, fmt.Sprintf(` - SELECT o.%s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE b.name = ? AND o.object_id = ? - UNION ALL - SELECT %s(o.%s) - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name - WHERE b.name = ? AND d.name = ? - GROUP BY d.id - `, col, groupFn, col, tx.CharLengthExpr()), bucket, marker, bucket, marker).Scan(dst) - if errors.Is(err, dsql.ErrNoRows) { - return api.ErrMarkerNotFound - } else { - return err - } - }) - if err != nil { - return nil, false, fmt.Errorf("failed to query marker: %w", err) - } else if len(markerExprs) > 0 { - whereExpr = "WHERE " + strings.Join(markerExprs, " AND ") - } - args = append(args, markerArgs...) - - // apply sorting - orderByExprs, err := orderByObject(sortBy, sortDir) - if err != nil { - return nil, false, fmt.Errorf("failed to apply sorting: %w", err) - } - - // apply offset and limit - args = append(args, limit, offset) - - // objectsQuery consists of 2 parts - // 1. fetch all objects in requested directory - // 2. fetch all sub-directories - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM ( - SELECT o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag - FROM objects o - LEFT JOIN directories d ON d.name = o.object_id - WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) %s - AND d.id IS NULL - UNION ALL - SELECT d.name as object_id, SUM(o.size), MIN(o.health), '' as mime_type, MAX(o.created_at) as created_at, '' as etag - FROM objects o - INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name %s - WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) - AND o.object_id LIKE ? - AND SUBSTR(o.object_id, 1, ?) = ? - AND d.db_parent_id = ? - GROUP BY d.id - ) AS o - %s - ORDER BY %s - LIMIT ? OFFSET ? - `, - tx.SelectObjectMetadataExpr(), - prefixExpr, - tx.CharLengthExpr(), - prefixExpr, - whereExpr, - strings.Join(orderByExprs, ", "), - ), args...) - if err != nil { - return nil, false, fmt.Errorf("failed to fetch objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return nil, false, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - - // trim last element if we have more - var hasMore bool - if len(objects) == limit { - hasMore = true - objects = objects[:len(objects)-1] - } - - return objects, hasMore, nil -} - func ObjectMetadata(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) { // fetch object id var objID int64 From be0102e38f9021416c0e12ca257c036077d3b9fc Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 11 Sep 2024 11:31:31 +0200 Subject: [PATCH 83/98] stores: remove some sorting options on pricetables --- api/host.go | 50 +++++++++++++++++--------------------------------- 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/api/host.go b/api/host.go index 7a9e3904b..bab76ce21 100644 --- a/api/host.go +++ b/api/host.go @@ -24,39 +24,23 @@ const ( var validHostSortBy = map[string]any{ // price table - "price_table.uid": nil, - "price_table.validity": nil, - "price_table.hostblockheight": nil, - "price_table.updatepricetablecost": nil, - "price_table.accountbalancecost": nil, - "price_table.fundaccountcost": nil, - "price_table.latestrevisioncost": nil, - "price_table.subscriptionmemorycost": nil, - "price_table.subscriptionnotificationcost": nil, - "price_table.initbasecost": nil, - "price_table.memorytimecost": nil, - "price_table.downloadbandwidthcost": nil, - "price_table.uploadbandwidthcost": nil, - "price_table.dropsectorsbasecost": nil, - "price_table.dropsectorsunitcost": nil, - "price_table.hassectorbasecost": nil, - "price_table.readbasecost": nil, - "price_table.readlengthcost": nil, - "price_table.renewcontractcost": nil, - "price_table.revisionbasecost": nil, - "price_table.swapsectorcost": nil, - "price_table.writebasecost": nil, - "price_table.writelengthcost": nil, - "price_table.writestorecost": nil, - "price_table.txnfeeminrecommended": nil, - "price_table.txnfeemaxrecommended": nil, - "price_table.contractprice": nil, - "price_table.collateralcost": nil, - "price_table.maxcollateral": nil, - "price_table.maxduration": nil, - "price_table.windowsize": nil, - "price_table.registryentriesleft": nil, - "price_table.registryentriestotal": nil, + "price_table.uid": nil, + "price_table.validity": nil, + "price_table.hostblockheight": nil, + "price_table.latestrevisioncost": nil, + "price_table.initbasecost": nil, + "price_table.downloadbandwidthcost": nil, + "price_table.uploadbandwidthcost": nil, + "price_table.readbasecost": nil, + "price_table.writebasecost": nil, + "price_table.writestorecost": nil, + "price_table.txnfeeminrecommended": nil, + "price_table.txnfeemaxrecommended": nil, + "price_table.contractprice": nil, + "price_table.collateralcost": nil, + "price_table.maxcollateral": nil, + "price_table.maxduration": nil, + "price_table.windowsize": nil, // settings "settings.acceptingcontracts": nil, From 7dc4831313603428362cd19fb3224337492d1970 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 11 Sep 2024 12:16:13 +0200 Subject: [PATCH 84/98] stores: add decimal cast --- stores/sql/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/sql/main.go b/stores/sql/main.go index 4fe847391..ffc2449c6 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -812,7 +812,7 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } else { return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) } - orderByExpr = fmt.Sprintf("ORDER BY %s %s", fieldExpr, opts.SortDir) + orderByExpr = fmt.Sprintf("ORDER BY CAST(%s AS DECIMAL(65, 0)) %s", fieldExpr, opts.SortDir) } var blockedExpr string From 360d6657ca518fd43814e17fe027cd036d830670 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 11 Sep 2024 12:27:25 +0200 Subject: [PATCH 85/98] stores: remove sorting --- api/host.go | 64 ------------------------------- bus/client/hosts.go | 2 - bus/routes.go | 16 -------- stores/hostdb_test.go | 88 ------------------------------------------- stores/sql/main.go | 25 ------------ 5 files changed, 195 deletions(-) diff --git a/api/host.go b/api/host.go index bab76ce21..a8d7a40e4 100644 --- a/api/host.go +++ b/api/host.go @@ -22,70 +22,10 @@ const ( UsabilityFilterModeUnusable = "unusable" ) -var validHostSortBy = map[string]any{ - // price table - "price_table.uid": nil, - "price_table.validity": nil, - "price_table.hostblockheight": nil, - "price_table.latestrevisioncost": nil, - "price_table.initbasecost": nil, - "price_table.downloadbandwidthcost": nil, - "price_table.uploadbandwidthcost": nil, - "price_table.readbasecost": nil, - "price_table.writebasecost": nil, - "price_table.writestorecost": nil, - "price_table.txnfeeminrecommended": nil, - "price_table.txnfeemaxrecommended": nil, - "price_table.contractprice": nil, - "price_table.collateralcost": nil, - "price_table.maxcollateral": nil, - "price_table.maxduration": nil, - "price_table.windowsize": nil, - - // settings - "settings.acceptingcontracts": nil, - "settings.maxdownloadbatchsize": nil, - "settings.maxduration": nil, - "settings.maxrevisebatchsize": nil, - "settings.netaddress": nil, - "settings.remainingstorage": nil, - "settings.sectorsize": nil, - "settings.totalstorage": nil, - "settings.unlockhash": nil, - "settings.windowsize": nil, - "settings.collateral": nil, - "settings.maxcollateral": nil, - "settings.baserpcprice": nil, - "settings.contractprice": nil, - "settings.downloadbandwidthprice": nil, - "settings.sectoraccessprice": nil, - "settings.storageprice": nil, - "settings.uploadbandwidthprice": nil, - "settings.ephemeralaccountexpiry": nil, - "settings.maxephemeralaccountbalance": nil, - "settings.revisionnumber": nil, - "settings.version": nil, - "settings.release": nil, - "settings.siamuxport": nil, -} - -func IsValidHostSortBy(sortBy string) bool { - _, ok := validHostSortBy[sortBy] - return ok -} - var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") - - // ErrInvalidHostSortBy is returned when the SortBy parameter used - // when querying hosts is invalid. - ErrInvalidHostSortBy = errors.New("invalid SortBy parameter") - - // ErrInvalidHostSortDir is returned when the SortDir parameter used - // when querying hosts is invalid. - ErrInvalidHostSortDir = errors.New("invalid SortDir parameter") ) var ( @@ -126,8 +66,6 @@ type ( UsabilityMode string `json:"usabilityMode"` AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` - SortBy string `json:"sortBy"` - SortDir string `json:"sortDir"` } // HostResponse is the response type for the GET @@ -179,8 +117,6 @@ type ( KeyIn []types.PublicKey Limit int Offset int - SortBy string - SortDir string } ) diff --git a/bus/client/hosts.go b/bus/client/hosts.go index f9480b32b..d7aa5f6db 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -26,8 +26,6 @@ func (c *Client) Hosts(ctx context.Context, opts api.HostOptions) (hosts []api.H UsabilityMode: opts.UsabilityMode, AddressContains: opts.AddressContains, KeyIn: opts.KeyIn, - SortBy: opts.SortBy, - SortDir: opts.SortDir, }, &hosts) return } diff --git a/bus/routes.go b/bus/routes.go index 80f8fb175..96c18eacb 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -486,20 +486,6 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { return } - // validate sorting params - if req.SortBy != "" { - if !api.IsValidHostSortBy(req.SortBy) { - jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) - return - } - } - switch req.SortDir { - case "", api.SortDirAsc, api.SortDirDesc: - default: - jc.Error(errors.New("invalid value for SortDir param, options are 'asc' and 'desc'"), http.StatusBadRequest) - return - } - // validate the offset and limit if req.Offset < 0 { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) @@ -520,8 +506,6 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { KeyIn: req.KeyIn, Offset: req.Offset, Limit: req.Limit, - SortBy: req.SortBy, - SortDir: req.SortDir, }) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 90de32196..555fc2ac8 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -138,41 +138,6 @@ func TestHosts(t *testing.T) { } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - err := ss.RecordHostScans(context.Background(), []api.HostScan{ - { - HostKey: hk1, - PriceTable: rhpv3.HostPriceTable{ - InitBaseCost: types.NewCurrency64(230), - }, - Settings: rhpv2.HostSettings{ - BaseRPCPrice: types.NewCurrency64(230), - }, - Success: true, - Timestamp: time.Now(), - }, - { - HostKey: hk2, - PriceTable: rhpv3.HostPriceTable{}, // empty price table - Settings: rhpv2.HostSettings{}, // empty settings - Success: true, - Timestamp: time.Now(), - }, - { - HostKey: hk3, - PriceTable: rhpv3.HostPriceTable{ - InitBaseCost: types.NewCurrency64(15), - }, - Settings: rhpv2.HostSettings{ - BaseRPCPrice: types.NewCurrency64(15), - }, - Success: true, - Timestamp: time.Now(), - }, - }) - if err != nil { - t.Fatal(err) - } - // search all hosts his, err := ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", @@ -189,59 +154,6 @@ func TestHosts(t *testing.T) { t.Fatal("unexpected") } - // search all hosts sorted by initbasecost - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "price_table.initbasecost", - Limit: -1, - }) - if err != nil { - t.Fatal(err) - } else if len(his) != 3 { - t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) - } - - // reverse order - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "price_table.initbasecost", - SortDir: api.SortDirDesc, - Limit: -1, - }) - if err != nil { - t.Fatal(err) - } else if len(his) != 3 { - t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk1 || his[1].PublicKey != hk3 || his[2].PublicKey != hk2 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) - } - - // search all hosts sorted by baserpcprice - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "settings.baserpcprice", - Limit: -1, - }) - if err != nil { - t.Fatal(err) - } else if len(his) != 3 { - t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) - } - - // search by invalid key - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "price_table.invalid", - Limit: -1, - }) - if !errors.Is(err, api.ErrInvalidHostSortBy) { - t.Fatal(err) - } - // assert offset & limit are taken into account his, err = ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", diff --git a/stores/sql/main.go b/stores/sql/main.go index ffc2449c6..b8c5c8759 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -790,31 +790,6 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } var orderByExpr string - if opts.SortBy != "" { - switch opts.SortDir { - case "": - opts.SortDir = api.SortDirAsc - case api.SortDirAsc, api.SortDirDesc: - default: - return nil, fmt.Errorf("%w: %v", api.ErrInvalidHostSortDir, opts.SortDir) - } - if !api.IsValidHostSortBy(opts.SortBy) { - return nil, fmt.Errorf("%w: %s", api.ErrInvalidHostSortBy, opts.SortBy) - } - - var fieldExpr string - if strings.HasPrefix(opts.SortBy, "settings.") { - field := strings.TrimPrefix(opts.SortBy, "settings.") - fieldExpr = fmt.Sprintf("h.settings ->> '$.%s'", field) - } else if strings.HasPrefix(opts.SortBy, "price_table.") { - field := strings.TrimPrefix(opts.SortBy, "price_table.") - fieldExpr = fmt.Sprintf("h.price_table ->> '$.%s'", field) - } else { - return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) - } - orderByExpr = fmt.Sprintf("ORDER BY CAST(%s AS DECIMAL(65, 0)) %s", fieldExpr, opts.SortDir) - } - var blockedExpr string if len(blockedExprs) > 0 { blockedExpr = strings.Join(blockedExprs, " OR ") From b1ab76cd9b634c26d98943605dab477b6cffa46f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 11 Sep 2024 12:37:57 +0200 Subject: [PATCH 86/98] Revert "stores: remove sorting" This reverts commit 360d6657ca518fd43814e17fe027cd036d830670. --- api/host.go | 64 +++++++++++++++++++++++++++++++ bus/client/hosts.go | 2 + bus/routes.go | 16 ++++++++ stores/hostdb_test.go | 88 +++++++++++++++++++++++++++++++++++++++++++ stores/sql/main.go | 25 ++++++++++++ 5 files changed, 195 insertions(+) diff --git a/api/host.go b/api/host.go index a8d7a40e4..bab76ce21 100644 --- a/api/host.go +++ b/api/host.go @@ -22,10 +22,70 @@ const ( UsabilityFilterModeUnusable = "unusable" ) +var validHostSortBy = map[string]any{ + // price table + "price_table.uid": nil, + "price_table.validity": nil, + "price_table.hostblockheight": nil, + "price_table.latestrevisioncost": nil, + "price_table.initbasecost": nil, + "price_table.downloadbandwidthcost": nil, + "price_table.uploadbandwidthcost": nil, + "price_table.readbasecost": nil, + "price_table.writebasecost": nil, + "price_table.writestorecost": nil, + "price_table.txnfeeminrecommended": nil, + "price_table.txnfeemaxrecommended": nil, + "price_table.contractprice": nil, + "price_table.collateralcost": nil, + "price_table.maxcollateral": nil, + "price_table.maxduration": nil, + "price_table.windowsize": nil, + + // settings + "settings.acceptingcontracts": nil, + "settings.maxdownloadbatchsize": nil, + "settings.maxduration": nil, + "settings.maxrevisebatchsize": nil, + "settings.netaddress": nil, + "settings.remainingstorage": nil, + "settings.sectorsize": nil, + "settings.totalstorage": nil, + "settings.unlockhash": nil, + "settings.windowsize": nil, + "settings.collateral": nil, + "settings.maxcollateral": nil, + "settings.baserpcprice": nil, + "settings.contractprice": nil, + "settings.downloadbandwidthprice": nil, + "settings.sectoraccessprice": nil, + "settings.storageprice": nil, + "settings.uploadbandwidthprice": nil, + "settings.ephemeralaccountexpiry": nil, + "settings.maxephemeralaccountbalance": nil, + "settings.revisionnumber": nil, + "settings.version": nil, + "settings.release": nil, + "settings.siamuxport": nil, +} + +func IsValidHostSortBy(sortBy string) bool { + _, ok := validHostSortBy[sortBy] + return ok +} + var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") + + // ErrInvalidHostSortBy is returned when the SortBy parameter used + // when querying hosts is invalid. + ErrInvalidHostSortBy = errors.New("invalid SortBy parameter") + + // ErrInvalidHostSortDir is returned when the SortDir parameter used + // when querying hosts is invalid. + ErrInvalidHostSortDir = errors.New("invalid SortDir parameter") ) var ( @@ -66,6 +126,8 @@ type ( UsabilityMode string `json:"usabilityMode"` AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` + SortBy string `json:"sortBy"` + SortDir string `json:"sortDir"` } // HostResponse is the response type for the GET @@ -117,6 +179,8 @@ type ( KeyIn []types.PublicKey Limit int Offset int + SortBy string + SortDir string } ) diff --git a/bus/client/hosts.go b/bus/client/hosts.go index d7aa5f6db..f9480b32b 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -26,6 +26,8 @@ func (c *Client) Hosts(ctx context.Context, opts api.HostOptions) (hosts []api.H UsabilityMode: opts.UsabilityMode, AddressContains: opts.AddressContains, KeyIn: opts.KeyIn, + SortBy: opts.SortBy, + SortDir: opts.SortDir, }, &hosts) return } diff --git a/bus/routes.go b/bus/routes.go index 96c18eacb..80f8fb175 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -486,6 +486,20 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { return } + // validate sorting params + if req.SortBy != "" { + if !api.IsValidHostSortBy(req.SortBy) { + jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) + return + } + } + switch req.SortDir { + case "", api.SortDirAsc, api.SortDirDesc: + default: + jc.Error(errors.New("invalid value for SortDir param, options are 'asc' and 'desc'"), http.StatusBadRequest) + return + } + // validate the offset and limit if req.Offset < 0 { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) @@ -506,6 +520,8 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { KeyIn: req.KeyIn, Offset: req.Offset, Limit: req.Limit, + SortBy: req.SortBy, + SortDir: req.SortDir, }) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 555fc2ac8..90de32196 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -138,6 +138,41 @@ func TestHosts(t *testing.T) { } hk1, hk2, hk3 := hks[0], hks[1], hks[2] + err := ss.RecordHostScans(context.Background(), []api.HostScan{ + { + HostKey: hk1, + PriceTable: rhpv3.HostPriceTable{ + InitBaseCost: types.NewCurrency64(230), + }, + Settings: rhpv2.HostSettings{ + BaseRPCPrice: types.NewCurrency64(230), + }, + Success: true, + Timestamp: time.Now(), + }, + { + HostKey: hk2, + PriceTable: rhpv3.HostPriceTable{}, // empty price table + Settings: rhpv2.HostSettings{}, // empty settings + Success: true, + Timestamp: time.Now(), + }, + { + HostKey: hk3, + PriceTable: rhpv3.HostPriceTable{ + InitBaseCost: types.NewCurrency64(15), + }, + Settings: rhpv2.HostSettings{ + BaseRPCPrice: types.NewCurrency64(15), + }, + Success: true, + Timestamp: time.Now(), + }, + }) + if err != nil { + t.Fatal(err) + } + // search all hosts his, err := ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", @@ -154,6 +189,59 @@ func TestHosts(t *testing.T) { t.Fatal("unexpected") } + // search all hosts sorted by initbasecost + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "price_table.initbasecost", + Limit: -1, + }) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) + } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) + } + + // reverse order + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "price_table.initbasecost", + SortDir: api.SortDirDesc, + Limit: -1, + }) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) + } else if his[0].PublicKey != hk1 || his[1].PublicKey != hk3 || his[2].PublicKey != hk2 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) + } + + // search all hosts sorted by baserpcprice + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "settings.baserpcprice", + Limit: -1, + }) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) + } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) + } + + // search by invalid key + his, err = ss.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + SortBy: "price_table.invalid", + Limit: -1, + }) + if !errors.Is(err, api.ErrInvalidHostSortBy) { + t.Fatal(err) + } + // assert offset & limit are taken into account his, err = ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", diff --git a/stores/sql/main.go b/stores/sql/main.go index b8c5c8759..ffc2449c6 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -790,6 +790,31 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } var orderByExpr string + if opts.SortBy != "" { + switch opts.SortDir { + case "": + opts.SortDir = api.SortDirAsc + case api.SortDirAsc, api.SortDirDesc: + default: + return nil, fmt.Errorf("%w: %v", api.ErrInvalidHostSortDir, opts.SortDir) + } + if !api.IsValidHostSortBy(opts.SortBy) { + return nil, fmt.Errorf("%w: %s", api.ErrInvalidHostSortBy, opts.SortBy) + } + + var fieldExpr string + if strings.HasPrefix(opts.SortBy, "settings.") { + field := strings.TrimPrefix(opts.SortBy, "settings.") + fieldExpr = fmt.Sprintf("h.settings ->> '$.%s'", field) + } else if strings.HasPrefix(opts.SortBy, "price_table.") { + field := strings.TrimPrefix(opts.SortBy, "price_table.") + fieldExpr = fmt.Sprintf("h.price_table ->> '$.%s'", field) + } else { + return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) + } + orderByExpr = fmt.Sprintf("ORDER BY CAST(%s AS DECIMAL(65, 0)) %s", fieldExpr, opts.SortDir) + } + var blockedExpr string if len(blockedExprs) > 0 { blockedExpr = strings.Join(blockedExprs, " OR ") From 1f4d42647b2912e744195436527db2b1a9dd2952 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 11 Sep 2024 14:55:41 +0200 Subject: [PATCH 87/98] stores: remove default settings --- bus/bus.go | 4 ++- bus/routes.go | 33 ++++++++++++++---- cmd/renterd/node.go | 4 +-- internal/bus/pinmanager.go | 14 ++++++-- internal/test/e2e/cluster.go | 6 ++-- stores/settings.go | 34 +++---------------- stores/sql.go | 5 +-- .../main/migration_00018_settings.sql | 25 ++++++++------ stores/sql_test.go | 3 +- 9 files changed, 66 insertions(+), 62 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 148baae39..56fa946e7 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -309,6 +309,7 @@ type ( type Bus struct { startTime time.Time masterKey utils.MasterKey + network *consensus.Network alerts alerts.Alerter alertMgr AlertManager @@ -338,12 +339,13 @@ type Bus struct { } // New returns a new Bus -func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, explorerURL string, l *zap.Logger) (_ *Bus, err error) { +func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, explorerURL string, network *consensus.Network, l *zap.Logger) (_ *Bus, err error) { l = l.Named("bus") b := &Bus{ startTime: time.Now(), masterKey: masterKey, + network: network, accounts: store, s: s, diff --git a/bus/routes.go b/bus/routes.go index deee1410e..efcfbbfeb 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -16,6 +16,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhp3 "go.sia.tech/renterd/internal/rhp/v3" + "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/internal/gouging" rhp2 "go.sia.tech/renterd/internal/rhp/v2" @@ -1341,7 +1342,11 @@ func (b *Bus) packedSlabsHandlerDonePOST(jc jape.Context) { func (b *Bus) settingsGougingHandlerGET(jc jape.Context) { gs, err := b.ss.GougingSettings(jc.Request.Context()) - if jc.Check("failed to get gouging settings", err) == nil { + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("gouging settings not found, returning defaults") + jc.Encode(api.DefaultGougingSettings) + return + } else if jc.Check("failed to get gouging settings", err) == nil { jc.Encode(gs) } } @@ -1368,7 +1373,11 @@ func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { ps, err := b.ss.PinnedSettings(jc.Request.Context()) - if jc.Check("failed to get pinned settings", err) == nil { + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("pinned settings not found, returning defaults") + jc.Encode(api.DefaultPinnedSettings) + return + } else if jc.Check("failed to get pinned settings", err) == nil { // populate the Autopilots map with the current autopilots aps, err := b.as.Autopilots(jc.Request.Context()) if jc.Check("failed to fetch autopilots", err) != nil { @@ -1413,7 +1422,11 @@ func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { func (b *Bus) settingsUploadHandlerGET(jc jape.Context) { us, err := b.ss.UploadSettings(jc.Request.Context()) - if jc.Check("failed to get upload settings", err) == nil { + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("upload settings not found, returning defaults") + jc.Encode(api.DefaultUploadSettings(b.network.Name)) + return + } else if jc.Check("failed to get upload settings", err) == nil { jc.Encode(us) } } @@ -1439,7 +1452,11 @@ func (b *Bus) settingsUploadHandlerPUT(jc jape.Context) { func (b *Bus) settingsS3HandlerGET(jc jape.Context) { s3s, err := b.ss.S3Settings(jc.Request.Context()) - if jc.Check("failed to get S3 settings", err) == nil { + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("S3 settings not found, returning defaults") + jc.Encode(api.DefaultS3Settings) + return + } else if jc.Check("failed to get S3 settings", err) == nil { jc.Encode(s3s) } } @@ -1686,12 +1703,16 @@ func (b *Bus) paramsHandlerGougingGET(jc jape.Context) { func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { gs, err := b.ss.GougingSettings(ctx) - if err != nil { + if errors.Is(err, sql.ErrSettingNotFound) { + gs = api.DefaultGougingSettings + } else if err != nil { return api.GougingParams{}, err } us, err := b.ss.UploadSettings(ctx) - if err != nil { + if errors.Is(err, sql.ErrSettingNotFound) { + us = api.DefaultUploadSettings(b.network.Name) + } else if err != nil { return api.GougingParams{}, err } diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 92c62ae9f..9fad6c05e 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -264,7 +264,7 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network if err != nil { return nil, nil, err } - sqlStore, err := stores.NewSQLStore(storeCfg, network) + sqlStore, err := stores.NewSQLStore(storeCfg) if err != nil { return nil, nil, err } @@ -386,7 +386,7 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network // create bus announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, network, logger) if err != nil { return nil, nil, fmt.Errorf("failed to create bus: %w", err) } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 548e6b708..46372c8b9 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -12,6 +12,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" ) @@ -247,7 +248,9 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin // fetch gouging settings gs, err := pm.s.GougingSettings(ctx) - if err != nil { + if errors.Is(err, sql.ErrSettingNotFound) { + gs = api.DefaultGougingSettings + } else if err != nil { return err } @@ -323,9 +326,14 @@ func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { // fetch pinned settings settings, err := pm.s.PinnedSettings(ctx) - if err != nil { + if errors.Is(err, sql.ErrSettingNotFound) { + settings = api.DefaultPinnedSettings + } else if err != nil { return fmt.Errorf("failed to fetch pinned settings: %w", err) - } else if !settings.Enabled() { + } + + // check if pinning is enabled + if !settings.Enabled() { pm.logger.Debug("no pinned settings, skipping price update") return nil } diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index ec07a465a..4bbbecc80 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -553,8 +553,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, } // create store - network, genesis := testNetwork() - sqlStore, err := stores.NewSQLStore(storeCfg, network) + sqlStore, err := stores.NewSQLStore(storeCfg) if err != nil { return nil, nil, nil, nil, err } @@ -582,6 +581,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, } // create chain manager + network, genesis := testNetwork() store, state, err := chain.NewDBStore(bdb, network, genesis) if err != nil { return nil, nil, nil, nil, err @@ -639,7 +639,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create bus announcementMaxAgeHours := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, "", logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, "", network, logger) if err != nil { return nil, nil, nil, nil, err } diff --git a/stores/settings.go b/stores/settings.go index 5801de1a9..471c013fb 100644 --- a/stores/settings.go +++ b/stores/settings.go @@ -3,7 +3,6 @@ package stores import ( "context" "encoding/json" - "errors" "fmt" "go.sia.tech/renterd/api" @@ -60,10 +59,7 @@ func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{} // fetch setting from cache value, ok := s.settings[key] if ok { - if err := json.Unmarshal([]byte(value), &out); err != nil { - s.logger.Warnf("failed to unmarshal %s setting '%s': %v, using default", key, value, err) - return json.Unmarshal([]byte(s.defaultSetting(key)), &out) - } + _ = json.Unmarshal([]byte(value), &out) // cached values are always valid json return nil } @@ -72,16 +68,13 @@ func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{} if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { value, err = tx.Setting(ctx, key) return err - }); err != nil && !errors.Is(err, sql.ErrSettingNotFound) { - return fmt.Errorf("failed to fetch setting from db: %w", err) - } else if err != nil { - value = s.defaultSetting(key) + }); err != nil { + return err } // unmarshal setting if err := json.Unmarshal([]byte(value), &out); err != nil { - s.logger.Warnf("failed to unmarshal %s setting '%s': %v, using default", key, value, err) - return json.Unmarshal([]byte(s.defaultSetting(key)), &out) + return fmt.Errorf("failed to unmarshal setting '%s', err: %v", key, err) } // update cache @@ -112,22 +105,3 @@ func (s *SQLStore) updateSetting(ctx context.Context, key string, value any) err s.settings[key] = string(b) return nil } - -func (s *SQLStore) defaultSetting(key string) string { - switch key { - case SettingGouging: - b, _ := json.Marshal(api.DefaultGougingSettings) - return string(b) - case SettingPinned: - b, _ := json.Marshal(api.DefaultPinnedSettings) - return string(b) - case SettingS3: - b, _ := json.Marshal(api.DefaultS3Settings) - return string(b) - case SettingUpload: - b, _ := json.Marshal(api.DefaultUploadSettings(s.network.Name)) - return string(b) - default: - panic("unknown setting") // developer error - } -} diff --git a/stores/sql.go b/stores/sql.go index 424016927..14216ce32 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -8,7 +8,6 @@ import ( "sync" "time" - "go.sia.tech/core/consensus" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/stores/sql" @@ -43,7 +42,6 @@ type ( dbMetrics sql.MetricsDatabase logger *zap.SugaredLogger - network *consensus.Network walletAddress types.Address // ObjectDB related fields @@ -70,7 +68,7 @@ type ( // NewSQLStore uses a given Dialector to connect to a SQL database. NOTE: Only // pass migrate=true for the first instance of SQLHostDB if you connect via the // same Dialector multiple times. -func NewSQLStore(cfg Config, network *consensus.Network) (*SQLStore, error) { +func NewSQLStore(cfg Config) (*SQLStore, error) { if err := os.MkdirAll(cfg.PartialSlabDir, 0700); err != nil { return nil, fmt.Errorf("failed to create partial slab dir '%s': %v", cfg.PartialSlabDir, err) } @@ -103,7 +101,6 @@ func NewSQLStore(cfg Config, network *consensus.Network) (*SQLStore, error) { settings: make(map[string]string), walletAddress: cfg.WalletAddress, - network: network, slabPruneSigChan: make(chan struct{}, 1), lastPrunedAt: time.Now(), diff --git a/stores/sql/sqlite/migrations/main/migration_00018_settings.sql b/stores/sql/sqlite/migrations/main/migration_00018_settings.sql index 91d3b2aa0..5a8b24739 100644 --- a/stores/sql/sqlite/migrations/main/migration_00018_settings.sql +++ b/stores/sql/sqlite/migrations/main/migration_00018_settings.sql @@ -7,16 +7,17 @@ SELECT DATETIME('now'), k, v FROM ( -- upload is a combination of uploadpacking, redundancy, and contractset SELECT - "upload" as k, - json_patch( - json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE key = "uploadpacking")), - json_patch( - json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE key = "redundancy")), - json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE key = "contractset")) - ) - ) as v - WHERE json_extract(v, "$.packing") IS NOT NULL - AND json_extract(v, "$.redundancy") IS NOT NULL + "upload" as k, + json_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE key = "uploadpacking")), + json_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE key = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE key = "contractset")) + ) + ) as v + WHERE + json_extract(v, "$.packing") IS NOT NULL AND + json_extract(v, "$.redundancy") IS NOT NULL UNION ALL @@ -38,7 +39,9 @@ FROM ( ), "$.forexEndpointURL" ) as v - WHERE json_extract(v, "$.currency") IS NOT NULL AND json_extract(v, "$.threshold") IS NOT NULL + WHERE + json_extract(v, "$.currency") IS NOT NULL AND + json_extract(v, "$.threshold") IS NOT NULL ) -- delete old settings diff --git a/stores/sql_test.go b/stores/sql_test.go index 0fd280873..7eb33d07b 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "go.sia.tech/core/consensus" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" @@ -180,7 +179,7 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { LongQueryDuration: 100 * time.Millisecond, LongTxDuration: 100 * time.Millisecond, RetryTransactionIntervals: []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 200 * time.Millisecond}, - }, &consensus.Network{}) + }) if err != nil { t.Fatal("failed to create SQLStore", err) } From 62dbcd69d1b6e1eb35c82e17dc3355fc0d6b10e6 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 11 Sep 2024 15:10:53 +0200 Subject: [PATCH 88/98] stores: fix indentation --- .../migrations/main/migration_00019_settings.sql | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stores/sql/sqlite/migrations/main/migration_00019_settings.sql b/stores/sql/sqlite/migrations/main/migration_00019_settings.sql index 5a8b24739..98d4aa36b 100644 --- a/stores/sql/sqlite/migrations/main/migration_00019_settings.sql +++ b/stores/sql/sqlite/migrations/main/migration_00019_settings.sql @@ -30,19 +30,19 @@ FROM ( UNION ALL -- pinning renames pricepinning and removes the 'enabled' and 'forexEndpointURL' fields - SELECT - "pinned" as k, - json_remove( + SELECT + "pinned" as k, + json_remove( json_remove( (SELECT json_extract(value, "$") FROM settings WHERE key = "pricepinning"), "$.enabled" ), "$.forexEndpointURL" ) as v - WHERE + WHERE json_extract(v, "$.currency") IS NOT NULL AND json_extract(v, "$.threshold") IS NOT NULL ) -- delete old settings -DELETE FROM settings WHERE `key` IN ("uploadpacking", "redundancy", "contractset", "s3authentication", "pricepinning"); \ No newline at end of file +DELETE FROM settings WHERE `key` IN ("uploadpacking", "redundancy", "contractset", "s3authentication", "pricepinning"); From b6224021fb857e6d469c4564bbf055894a9a3851 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 12 Sep 2024 11:49:32 +0200 Subject: [PATCH 89/98] api: add 'score' back to host check --- api/host.go | 12 ++++++++++++ api/host_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 api/host_test.go diff --git a/api/host.go b/api/host.go index a8d7a40e4..97f53829a 100644 --- a/api/host.go +++ b/api/host.go @@ -1,6 +1,7 @@ package api import ( + "encoding/json" "errors" "fmt" "net/url" @@ -225,6 +226,17 @@ type ( } ) +func (hc HostCheck) MarshalJSON() ([]byte, error) { + type check HostCheck + return json.Marshal(struct { + check + Score float64 `json:"score"` + }{ + check: check(hc), + Score: hc.Score.Score(), + }) +} + // IsAnnounced returns whether the host has been announced. func (h Host) IsAnnounced() bool { return !h.LastAnnouncement.IsZero() diff --git a/api/host_test.go b/api/host_test.go new file mode 100644 index 000000000..94612f4f3 --- /dev/null +++ b/api/host_test.go @@ -0,0 +1,27 @@ +package api + +import ( + "encoding/json" + "strings" + "testing" +) + +func TestMarshalHostScoreBreakdownJSON(t *testing.T) { + hc := HostCheck{ + Score: HostScoreBreakdown{ + Age: 1.1, + Collateral: 1.1, + Interactions: 1.1, + StorageRemaining: 1.1, + Uptime: 1.1, + Version: 1.1, + Prices: 1.1, + }, + } + b, err := json.MarshalIndent(hc, " ", " ") + if err != nil { + t.Fatal(err) + } else if !strings.Contains(string(b), "\"score\": 1.9487171000000014") { + t.Fatal("expected a score field") + } +} From 4dd0b578c23b16cf9d2610e1c5ae3ec2c71b1050 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 12 Sep 2024 13:39:12 +0200 Subject: [PATCH 90/98] config: fix typo --- cmd/renterd/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 572b062a1..769875866 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -167,7 +167,7 @@ func loadConfig() (cfg config.Config, network *consensus.Network, genesis types. // check explorer if !cfg.Explorer.Disable && cfg.Explorer.URL == "" { - err = fmt.Errorf("explorer is disabled but no URL is set") + err = fmt.Errorf("explorer is enabled but no URL is set") return } From 87f0cc2316d7cc215c1496a8675b2c1d3d1e89d9 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 12 Sep 2024 13:51:27 +0200 Subject: [PATCH 91/98] bus: remove network --- bus/bus.go | 4 +--- bus/routes.go | 4 ++-- cmd/renterd/node.go | 2 +- internal/test/e2e/cluster.go | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index f180a7ee9..a1ff1034d 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -307,7 +307,6 @@ type ( type Bus struct { startTime time.Time masterKey utils.MasterKey - network *consensus.Network alerts alerts.Alerter alertMgr AlertManager @@ -337,13 +336,12 @@ type Bus struct { } // New returns a new Bus -func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, explorerURL string, network *consensus.Network, l *zap.Logger) (_ *Bus, err error) { +func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, explorerURL string, l *zap.Logger) (_ *Bus, err error) { l = l.Named("bus") b := &Bus{ startTime: time.Now(), masterKey: masterKey, - network: network, accounts: store, explorer: ibus.NewExplorer(explorerURL), diff --git a/bus/routes.go b/bus/routes.go index 646540bd8..7f10b03ac 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -1373,7 +1373,7 @@ func (b *Bus) settingsUploadHandlerGET(jc jape.Context) { us, err := b.ss.UploadSettings(jc.Request.Context()) if errors.Is(err, sql.ErrSettingNotFound) { b.logger.Warn("upload settings not found, returning defaults") - jc.Encode(api.DefaultUploadSettings(b.network.Name)) + jc.Encode(api.DefaultUploadSettings(b.cm.TipState().Network.Name)) return } else if jc.Check("failed to get upload settings", err) == nil { jc.Encode(us) @@ -1660,7 +1660,7 @@ func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { us, err := b.ss.UploadSettings(ctx) if errors.Is(err, sql.ErrSettingNotFound) { - us = api.DefaultUploadSettings(b.network.Name) + us = api.DefaultUploadSettings(b.cm.TipState().Network.Name) } else if err != nil { return api.GougingParams{}, err } diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 9fad6c05e..aa36b9f24 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -386,7 +386,7 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network // create bus announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, network, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, logger) if err != nil { return nil, nil, fmt.Errorf("failed to create bus: %w", err) } diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 951f0f8d8..735085711 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -639,7 +639,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create bus announcementMaxAgeHours := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, "", network, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, "", logger) if err != nil { return nil, nil, nil, nil, err } From ceed2906aac73ccebb8cdc64bf01b6d7ee0ae18a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 12 Sep 2024 14:05:30 +0200 Subject: [PATCH 92/98] api: remove unused types --- api/host.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/api/host.go b/api/host.go index bab76ce21..7b4d52eee 100644 --- a/api/host.go +++ b/api/host.go @@ -129,22 +129,6 @@ type ( SortBy string `json:"sortBy"` SortDir string `json:"sortDir"` } - - // HostResponse is the response type for the GET - // /api/autopilot/host/:hostkey endpoint. - HostResponse struct { - Host Host `json:"host"` - Checks *HostChecks `json:"checks,omitempty"` - } - - HostChecks struct { - Gouging bool `json:"gouging"` - GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` - Score float64 `json:"score"` - ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` - Usable bool `json:"usable"` - UnusableReasons []string `json:"unusableReasons,omitempty"` - } ) type ( From a4d3cc63fce9491a2316a4caa8f3da5b3f31c700 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 12 Sep 2024 14:10:18 +0200 Subject: [PATCH 93/98] api: fix json field conflict --- api/host.go | 14 ++++++++------ api/host_test.go | 2 +- autopilot/autopilot.go | 24 ++++++++++++------------ autopilot/contractor/contractor.go | 14 +++++++------- autopilot/contractor/evaluate.go | 20 ++++++++++---------- autopilot/contractor/hostfilter.go | 6 +++--- stores/hostdb_test.go | 14 +++++++------- stores/sql/main.go | 10 +++++----- stores/sql/mysql/main.go | 8 ++++---- stores/sql/sqlite/main.go | 8 ++++---- 10 files changed, 61 insertions(+), 59 deletions(-) diff --git a/api/host.go b/api/host.go index 97f53829a..b39bff850 100644 --- a/api/host.go +++ b/api/host.go @@ -191,9 +191,9 @@ type ( } HostCheck struct { - Gouging HostGougingBreakdown `json:"gouging"` - Score HostScoreBreakdown `json:"score"` - Usability HostUsabilityBreakdown `json:"usability"` + GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` + ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` + UsabilityBreakdown HostUsabilityBreakdown `json:"usabilityBreakdown"` } HostGougingBreakdown struct { @@ -230,10 +230,12 @@ func (hc HostCheck) MarshalJSON() ([]byte, error) { type check HostCheck return json.Marshal(struct { check - Score float64 `json:"score"` + Score float64 `json:"score"` + Usable bool `json:"usable"` }{ - check: check(hc), - Score: hc.Score.Score(), + check: check(hc), + Score: hc.ScoreBreakdown.Score(), + Usable: hc.UsabilityBreakdown.IsUsable(), }) } diff --git a/api/host_test.go b/api/host_test.go index 94612f4f3..e2711723e 100644 --- a/api/host_test.go +++ b/api/host_test.go @@ -8,7 +8,7 @@ import ( func TestMarshalHostScoreBreakdownJSON(t *testing.T) { hc := HostCheck{ - Score: HostScoreBreakdown{ + ScoreBreakdown: HostScoreBreakdown{ Age: 1.1, Collateral: 1.1, Interactions: 1.1, diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 2bf92f6f1..9493a1311 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -709,12 +709,12 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { jc.Encode(api.HostResponse{ Host: hi, Checks: &api.HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), + Gouging: check.GougingBreakdown.Gouging(), + GougingBreakdown: check.GougingBreakdown, + Score: check.ScoreBreakdown.Score(), + ScoreBreakdown: check.ScoreBreakdown, + Usable: check.UsabilityBreakdown.IsUsable(), + UnusableReasons: check.UsabilityBreakdown.UnusableReasons(), }, }) return @@ -750,12 +750,12 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { resps[i] = api.HostResponse{ Host: host, Checks: &api.HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), + Gouging: check.GougingBreakdown.Gouging(), + GougingBreakdown: check.GougingBreakdown, + Score: check.ScoreBreakdown.Score(), + ScoreBreakdown: check.ScoreBreakdown, + Usable: check.UsabilityBreakdown.IsUsable(), + UnusableReasons: check.UsabilityBreakdown.UnusableReasons(), }, } } else { diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 4a22f9d81..6f9e646f6 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -964,8 +964,8 @@ func performContractChecks(ctx *mCtx, alerter alerts.Alerter, bus Bus, w Worker, } // check usability - if !check.Usability.IsUsable() { - reasons := strings.Join(check.Usability.UnusableReasons(), ",") + if !check.UsabilityBreakdown.IsUsable() { + reasons := strings.Join(check.UsabilityBreakdown.UnusableReasons(), ",") logger.With("reasons", reasons).Info("unusable host") churnReasons[c.ID] = reasons continue @@ -1112,11 +1112,11 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, } else if _, used := usedHosts[host.PublicKey]; used { logger.Debug("host already used") continue - } else if score := hc.Score.Score(); score == 0 { + } else if score := hc.ScoreBreakdown.Score(); score == 0 { logger.Error("host has a score of 0") continue } - candidates = append(candidates, newScoredHost(host, hc.Score)) + candidates = append(candidates, newScoredHost(host, hc.ScoreBreakdown)) } logger = logger.With("candidates", len(candidates)) @@ -1225,11 +1225,11 @@ func performHostChecks(ctx *mCtx, bus Bus, logger *zap.SugaredLogger) error { if err := bus.UpdateHostCheck(ctx, ctx.ApID(), h.host.PublicKey, *hc); err != nil { return fmt.Errorf("failed to update host check for host %v: %w", h.host.PublicKey, err) } - usabilityBreakdown.track(hc.Usability) + usabilityBreakdown.track(hc.UsabilityBreakdown) - if !hc.Usability.IsUsable() { + if !hc.UsabilityBreakdown.IsUsable() { logger.With("hostKey", h.host.PublicKey). - With("reasons", strings.Join(hc.Usability.UnusableReasons(), ",")). + With("reasons", strings.Join(hc.UsabilityBreakdown.UnusableReasons(), ",")). Debug("host is not usable") } } diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go index e947009cb..7bacc44c9 100644 --- a/autopilot/contractor/evaluate.go +++ b/autopilot/contractor/evaluate.go @@ -14,7 +14,7 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. gc := gouging.NewChecker(gs, cs, fee, &period, &cfg.Contracts.RenewWindow) for _, host := range hosts { hc := checkHost(gc, scoreHost(host, cfg, rs.Redundancy()), minValidScore) - if hc.Usability.IsUsable() { + if hc.UsabilityBreakdown.IsUsable() { usables++ } } @@ -37,32 +37,32 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu for i, host := range hosts { hosts[i].PriceTable.HostBlockHeight = cs.BlockHeight // ignore block height hc := checkHost(gc, scoreHost(host, cfg, rs.Redundancy()), minValidScore) - if hc.Usability.IsUsable() { + if hc.UsabilityBreakdown.IsUsable() { resp.Usable++ continue } - if hc.Usability.Blocked { + if hc.UsabilityBreakdown.Blocked { resp.Unusable.Blocked++ } - if hc.Usability.NotAcceptingContracts { + if hc.UsabilityBreakdown.NotAcceptingContracts { resp.Unusable.NotAcceptingContracts++ } - if hc.Usability.NotCompletingScan { + if hc.UsabilityBreakdown.NotCompletingScan { resp.Unusable.NotScanned++ } - if hc.Gouging.ContractErr != "" { + if hc.GougingBreakdown.ContractErr != "" { resp.Unusable.Gouging.Contract++ } - if hc.Gouging.DownloadErr != "" { + if hc.GougingBreakdown.DownloadErr != "" { resp.Unusable.Gouging.Download++ } - if hc.Gouging.GougingErr != "" { + if hc.GougingBreakdown.GougingErr != "" { resp.Unusable.Gouging.Gouging++ } - if hc.Gouging.PruneErr != "" { + if hc.GougingBreakdown.PruneErr != "" { resp.Unusable.Gouging.Pruning++ } - if hc.Gouging.UploadErr != "" { + if hc.GougingBreakdown.UploadErr != "" { resp.Unusable.Gouging.Upload++ } } diff --git a/autopilot/contractor/hostfilter.go b/autopilot/contractor/hostfilter.go index 3083976ed..87f60c367 100644 --- a/autopilot/contractor/hostfilter.go +++ b/autopilot/contractor/hostfilter.go @@ -258,9 +258,9 @@ func checkHost(gc gouging.Checker, sh scoredHost, minScore float64) *api.HostChe } return &api.HostCheck{ - Usability: ub, - Gouging: gb, - Score: sh.sb, + UsabilityBreakdown: ub, + GougingBreakdown: gb, + ScoreBreakdown: sh.sb, } } diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index ae2525688..115661652 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -204,19 +204,19 @@ func TestHosts(t *testing.T) { // add host checks, h1 gets ap1 and h2 gets both h1c := newTestHostCheck() - h1c.Score.Age = .1 + h1c.ScoreBreakdown.Age = .1 err = ss.UpdateHostCheck(context.Background(), ap1, hk1, h1c) if err != nil { t.Fatal(err) } h2c1 := newTestHostCheck() - h2c1.Score.Age = .21 + h2c1.ScoreBreakdown.Age = .21 err = ss.UpdateHostCheck(context.Background(), ap1, hk2, h2c1) if err != nil { t.Fatal(err) } h2c2 := newTestHostCheck() - h2c2.Score.Age = .22 + h2c2.ScoreBreakdown.Age = .22 err = ss.UpdateHostCheck(context.Background(), ap2, hk2, h2c2) if err != nil { t.Fatal(err) @@ -266,7 +266,7 @@ func TestHosts(t *testing.T) { } // assert usability filter is taken into account - h2c1.Usability.RedundantIP = true + h2c1.UsabilityBreakdown.RedundantIP = true err = ss.UpdateHostCheck(context.Background(), ap1, hk2, h2c1) if err != nil { t.Fatal(err) @@ -1039,14 +1039,14 @@ func newTestScan(hk types.PublicKey, scanTime time.Time, settings rhpv2.HostSett func newTestHostCheck() api.HostCheck { return api.HostCheck{ - Gouging: api.HostGougingBreakdown{ + GougingBreakdown: api.HostGougingBreakdown{ ContractErr: "foo", DownloadErr: "bar", GougingErr: "baz", PruneErr: "qux", UploadErr: "quuz", }, - Score: api.HostScoreBreakdown{ + ScoreBreakdown: api.HostScoreBreakdown{ Age: .1, Collateral: .2, Interactions: .3, @@ -1055,7 +1055,7 @@ func newTestHostCheck() api.HostCheck { Version: .6, Prices: .7, }, - Usability: api.HostUsabilityBreakdown{ + UsabilityBreakdown: api.HostUsabilityBreakdown{ Blocked: false, Offline: false, LowScore: false, diff --git a/stores/sql/main.go b/stores/sql/main.go index 94c0ec932..544255c2c 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -867,11 +867,11 @@ func Hosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, var ap string var pk PublicKey var hc api.HostCheck - err := rows.Scan(&pk, &ap, &hc.Usability.Blocked, &hc.Usability.Offline, &hc.Usability.LowScore, &hc.Usability.RedundantIP, - &hc.Usability.Gouging, &hc.Usability.NotAcceptingContracts, &hc.Usability.NotAnnounced, &hc.Usability.NotCompletingScan, - &hc.Score.Age, &hc.Score.Collateral, &hc.Score.Interactions, &hc.Score.StorageRemaining, &hc.Score.Uptime, - &hc.Score.Version, &hc.Score.Prices, &hc.Gouging.ContractErr, &hc.Gouging.DownloadErr, &hc.Gouging.GougingErr, - &hc.Gouging.PruneErr, &hc.Gouging.UploadErr) + err := rows.Scan(&pk, &ap, &hc.UsabilityBreakdown.Blocked, &hc.UsabilityBreakdown.Offline, &hc.UsabilityBreakdown.LowScore, &hc.UsabilityBreakdown.RedundantIP, + &hc.UsabilityBreakdown.Gouging, &hc.UsabilityBreakdown.NotAcceptingContracts, &hc.UsabilityBreakdown.NotAnnounced, &hc.UsabilityBreakdown.NotCompletingScan, + &hc.ScoreBreakdown.Age, &hc.ScoreBreakdown.Collateral, &hc.ScoreBreakdown.Interactions, &hc.ScoreBreakdown.StorageRemaining, &hc.ScoreBreakdown.Uptime, + &hc.ScoreBreakdown.Version, &hc.ScoreBreakdown.Prices, &hc.GougingBreakdown.ContractErr, &hc.GougingBreakdown.DownloadErr, &hc.GougingBreakdown.GougingErr, + &hc.GougingBreakdown.PruneErr, &hc.GougingBreakdown.UploadErr) if err != nil { return nil, fmt.Errorf("failed to scan host: %w", err) } diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index a883b771f..f417b1a3a 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -1045,10 +1045,10 @@ func (tx *MainDatabaseTx) UpdateHostCheck(ctx context.Context, autopilot string, score_storage_remaining = VALUES(score_storage_remaining), score_uptime = VALUES(score_uptime), score_version = VALUES(score_version), score_prices = VALUES(score_prices), gouging_contract_err = VALUES(gouging_contract_err), gouging_download_err = VALUES(gouging_download_err), gouging_gouging_err = VALUES(gouging_gouging_err), gouging_prune_err = VALUES(gouging_prune_err), gouging_upload_err = VALUES(gouging_upload_err) - `, time.Now(), autopilot, ssql.PublicKey(hk), hc.Usability.Blocked, hc.Usability.Offline, hc.Usability.LowScore, - hc.Usability.RedundantIP, hc.Usability.Gouging, hc.Usability.NotAcceptingContracts, hc.Usability.NotAnnounced, hc.Usability.NotCompletingScan, - hc.Score.Age, hc.Score.Collateral, hc.Score.Interactions, hc.Score.StorageRemaining, hc.Score.Uptime, hc.Score.Version, hc.Score.Prices, - hc.Gouging.ContractErr, hc.Gouging.DownloadErr, hc.Gouging.GougingErr, hc.Gouging.PruneErr, hc.Gouging.UploadErr, + `, time.Now(), autopilot, ssql.PublicKey(hk), hc.UsabilityBreakdown.Blocked, hc.UsabilityBreakdown.Offline, hc.UsabilityBreakdown.LowScore, + hc.UsabilityBreakdown.RedundantIP, hc.UsabilityBreakdown.Gouging, hc.UsabilityBreakdown.NotAcceptingContracts, hc.UsabilityBreakdown.NotAnnounced, hc.UsabilityBreakdown.NotCompletingScan, + hc.ScoreBreakdown.Age, hc.ScoreBreakdown.Collateral, hc.ScoreBreakdown.Interactions, hc.ScoreBreakdown.StorageRemaining, hc.ScoreBreakdown.Uptime, hc.ScoreBreakdown.Version, hc.ScoreBreakdown.Prices, + hc.GougingBreakdown.ContractErr, hc.GougingBreakdown.DownloadErr, hc.GougingBreakdown.GougingErr, hc.GougingBreakdown.PruneErr, hc.GougingBreakdown.UploadErr, ) if err != nil { return fmt.Errorf("failed to insert host check: %w", err) diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index d2679eb29..a4e0bc569 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -1056,10 +1056,10 @@ func (tx *MainDatabaseTx) UpdateHostCheck(ctx context.Context, autopilot string, score_storage_remaining = EXCLUDED.score_storage_remaining, score_uptime = EXCLUDED.score_uptime, score_version = EXCLUDED.score_version, score_prices = EXCLUDED.score_prices, gouging_contract_err = EXCLUDED.gouging_contract_err, gouging_download_err = EXCLUDED.gouging_download_err, gouging_gouging_err = EXCLUDED.gouging_gouging_err, gouging_prune_err = EXCLUDED.gouging_prune_err, gouging_upload_err = EXCLUDED.gouging_upload_err - `, time.Now(), autopilot, ssql.PublicKey(hk), hc.Usability.Blocked, hc.Usability.Offline, hc.Usability.LowScore, - hc.Usability.RedundantIP, hc.Usability.Gouging, hc.Usability.NotAcceptingContracts, hc.Usability.NotAnnounced, hc.Usability.NotCompletingScan, - hc.Score.Age, hc.Score.Collateral, hc.Score.Interactions, hc.Score.StorageRemaining, hc.Score.Uptime, hc.Score.Version, hc.Score.Prices, - hc.Gouging.ContractErr, hc.Gouging.DownloadErr, hc.Gouging.GougingErr, hc.Gouging.PruneErr, hc.Gouging.UploadErr, + `, time.Now(), autopilot, ssql.PublicKey(hk), hc.UsabilityBreakdown.Blocked, hc.UsabilityBreakdown.Offline, hc.UsabilityBreakdown.LowScore, + hc.UsabilityBreakdown.RedundantIP, hc.UsabilityBreakdown.Gouging, hc.UsabilityBreakdown.NotAcceptingContracts, hc.UsabilityBreakdown.NotAnnounced, hc.UsabilityBreakdown.NotCompletingScan, + hc.ScoreBreakdown.Age, hc.ScoreBreakdown.Collateral, hc.ScoreBreakdown.Interactions, hc.ScoreBreakdown.StorageRemaining, hc.ScoreBreakdown.Uptime, hc.ScoreBreakdown.Version, hc.ScoreBreakdown.Prices, + hc.GougingBreakdown.ContractErr, hc.GougingBreakdown.DownloadErr, hc.GougingBreakdown.GougingErr, hc.GougingBreakdown.PruneErr, hc.GougingBreakdown.UploadErr, ) if err != nil { return fmt.Errorf("failed to insert host check: %w", err) From f4d8788890ddcc0d21396cd551f16dd214bfcd94 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 12 Sep 2024 14:26:11 +0200 Subject: [PATCH 94/98] wallet: add scanheight to wallet response --- api/wallet.go | 3 ++- bus/routes.go | 5 +++-- internal/test/e2e/cluster.go | 7 +++++++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/api/wallet.go b/api/wallet.go index 6e9feaea6..ec22076c3 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -55,7 +55,8 @@ type ( WalletResponse struct { wallet.Balance - Address types.Address `json:"address"` + Address types.Address `json:"address"` + ScanHeight uint64 `json:"scanHeight"` } WalletSendRequest struct { diff --git a/bus/routes.go b/bus/routes.go index 778034bb7..3e2b21494 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -271,8 +271,9 @@ func (b *Bus) walletHandler(jc jape.Context) { } jc.Encode(api.WalletResponse{ - Balance: balance, - Address: address, + Balance: balance, + Address: address, + ScanHeight: b.w.Tip().Height, }) } diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 2156c3244..c132cd65f 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -745,6 +745,13 @@ func (c *TestCluster) sync() { return fmt.Errorf("subscriber hasn't caught up, %d < %d", cs.BlockHeight, tip.Height) } + wallet, err := c.Bus.Wallet(context.Background()) + if err != nil { + return err + } else if wallet.ScanHeight < tip.Height { + return fmt.Errorf("wallet hasn't caught up, %d < %d", wallet.ScanHeight, tip.Height) + } + for _, h := range c.hosts { if hh := h.cm.Tip().Height; hh < tip.Height { return fmt.Errorf("host %v is not synced, %v < %v", h.PublicKey(), hh, cs.BlockHeight) From 0a2986653b518b5aca1af64d8b52e09d4f0ad722 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 12 Sep 2024 15:31:28 +0200 Subject: [PATCH 95/98] Revert "Revert "stores: remove sorting"" This reverts commit b1ab76cd9b634c26d98943605dab477b6cffa46f. --- api/host.go | 64 ------------------------------- bus/client/hosts.go | 2 - bus/routes.go | 16 -------- stores/hostdb_test.go | 88 ------------------------------------------- stores/sql/main.go | 25 ------------ 5 files changed, 195 deletions(-) diff --git a/api/host.go b/api/host.go index 7b4d52eee..19f28cb2f 100644 --- a/api/host.go +++ b/api/host.go @@ -22,70 +22,10 @@ const ( UsabilityFilterModeUnusable = "unusable" ) -var validHostSortBy = map[string]any{ - // price table - "price_table.uid": nil, - "price_table.validity": nil, - "price_table.hostblockheight": nil, - "price_table.latestrevisioncost": nil, - "price_table.initbasecost": nil, - "price_table.downloadbandwidthcost": nil, - "price_table.uploadbandwidthcost": nil, - "price_table.readbasecost": nil, - "price_table.writebasecost": nil, - "price_table.writestorecost": nil, - "price_table.txnfeeminrecommended": nil, - "price_table.txnfeemaxrecommended": nil, - "price_table.contractprice": nil, - "price_table.collateralcost": nil, - "price_table.maxcollateral": nil, - "price_table.maxduration": nil, - "price_table.windowsize": nil, - - // settings - "settings.acceptingcontracts": nil, - "settings.maxdownloadbatchsize": nil, - "settings.maxduration": nil, - "settings.maxrevisebatchsize": nil, - "settings.netaddress": nil, - "settings.remainingstorage": nil, - "settings.sectorsize": nil, - "settings.totalstorage": nil, - "settings.unlockhash": nil, - "settings.windowsize": nil, - "settings.collateral": nil, - "settings.maxcollateral": nil, - "settings.baserpcprice": nil, - "settings.contractprice": nil, - "settings.downloadbandwidthprice": nil, - "settings.sectoraccessprice": nil, - "settings.storageprice": nil, - "settings.uploadbandwidthprice": nil, - "settings.ephemeralaccountexpiry": nil, - "settings.maxephemeralaccountbalance": nil, - "settings.revisionnumber": nil, - "settings.version": nil, - "settings.release": nil, - "settings.siamuxport": nil, -} - -func IsValidHostSortBy(sortBy string) bool { - _, ok := validHostSortBy[sortBy] - return ok -} - var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") - - // ErrInvalidHostSortBy is returned when the SortBy parameter used - // when querying hosts is invalid. - ErrInvalidHostSortBy = errors.New("invalid SortBy parameter") - - // ErrInvalidHostSortDir is returned when the SortDir parameter used - // when querying hosts is invalid. - ErrInvalidHostSortDir = errors.New("invalid SortDir parameter") ) var ( @@ -126,8 +66,6 @@ type ( UsabilityMode string `json:"usabilityMode"` AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` - SortBy string `json:"sortBy"` - SortDir string `json:"sortDir"` } ) @@ -163,8 +101,6 @@ type ( KeyIn []types.PublicKey Limit int Offset int - SortBy string - SortDir string } ) diff --git a/bus/client/hosts.go b/bus/client/hosts.go index f9480b32b..d7aa5f6db 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -26,8 +26,6 @@ func (c *Client) Hosts(ctx context.Context, opts api.HostOptions) (hosts []api.H UsabilityMode: opts.UsabilityMode, AddressContains: opts.AddressContains, KeyIn: opts.KeyIn, - SortBy: opts.SortBy, - SortDir: opts.SortDir, }, &hosts) return } diff --git a/bus/routes.go b/bus/routes.go index 1c28747fe..7f7e0251d 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -487,20 +487,6 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { return } - // validate sorting params - if req.SortBy != "" { - if !api.IsValidHostSortBy(req.SortBy) { - jc.Error(fmt.Errorf("%w: %v", api.ErrInvalidHostSortBy, req.SortBy), http.StatusBadRequest) - return - } - } - switch req.SortDir { - case "", api.SortDirAsc, api.SortDirDesc: - default: - jc.Error(errors.New("invalid value for SortDir param, options are 'asc' and 'desc'"), http.StatusBadRequest) - return - } - // validate the offset and limit if req.Offset < 0 { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) @@ -521,8 +507,6 @@ func (b *Bus) hostsHandlerPOST(jc jape.Context) { KeyIn: req.KeyIn, Offset: req.Offset, Limit: req.Limit, - SortBy: req.SortBy, - SortDir: req.SortDir, }) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 90de32196..555fc2ac8 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -138,41 +138,6 @@ func TestHosts(t *testing.T) { } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - err := ss.RecordHostScans(context.Background(), []api.HostScan{ - { - HostKey: hk1, - PriceTable: rhpv3.HostPriceTable{ - InitBaseCost: types.NewCurrency64(230), - }, - Settings: rhpv2.HostSettings{ - BaseRPCPrice: types.NewCurrency64(230), - }, - Success: true, - Timestamp: time.Now(), - }, - { - HostKey: hk2, - PriceTable: rhpv3.HostPriceTable{}, // empty price table - Settings: rhpv2.HostSettings{}, // empty settings - Success: true, - Timestamp: time.Now(), - }, - { - HostKey: hk3, - PriceTable: rhpv3.HostPriceTable{ - InitBaseCost: types.NewCurrency64(15), - }, - Settings: rhpv2.HostSettings{ - BaseRPCPrice: types.NewCurrency64(15), - }, - Success: true, - Timestamp: time.Now(), - }, - }) - if err != nil { - t.Fatal(err) - } - // search all hosts his, err := ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", @@ -189,59 +154,6 @@ func TestHosts(t *testing.T) { t.Fatal("unexpected") } - // search all hosts sorted by initbasecost - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "price_table.initbasecost", - Limit: -1, - }) - if err != nil { - t.Fatal(err) - } else if len(his) != 3 { - t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) - } - - // reverse order - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "price_table.initbasecost", - SortDir: api.SortDirDesc, - Limit: -1, - }) - if err != nil { - t.Fatal(err) - } else if len(his) != 3 { - t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk1 || his[1].PublicKey != hk3 || his[2].PublicKey != hk2 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) - } - - // search all hosts sorted by baserpcprice - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "settings.baserpcprice", - Limit: -1, - }) - if err != nil { - t.Fatal(err) - } else if len(his) != 3 { - t.Fatal("unexpected", len(his)) - } else if his[0].PublicKey != hk2 || his[1].PublicKey != hk3 || his[2].PublicKey != hk1 { - t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey, his[2].PublicKey) - } - - // search by invalid key - his, err = ss.Hosts(context.Background(), api.HostOptions{ - FilterMode: api.HostFilterModeAll, - SortBy: "price_table.invalid", - Limit: -1, - }) - if !errors.Is(err, api.ErrInvalidHostSortBy) { - t.Fatal(err) - } - // assert offset & limit are taken into account his, err = ss.Hosts(context.Background(), api.HostOptions{ AutopilotID: "", diff --git a/stores/sql/main.go b/stores/sql/main.go index bf4779ff9..12b265587 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -791,31 +791,6 @@ func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, er } var orderByExpr string - if opts.SortBy != "" { - switch opts.SortDir { - case "": - opts.SortDir = api.SortDirAsc - case api.SortDirAsc, api.SortDirDesc: - default: - return nil, fmt.Errorf("%w: %v", api.ErrInvalidHostSortDir, opts.SortDir) - } - if !api.IsValidHostSortBy(opts.SortBy) { - return nil, fmt.Errorf("%w: %s", api.ErrInvalidHostSortBy, opts.SortBy) - } - - var fieldExpr string - if strings.HasPrefix(opts.SortBy, "settings.") { - field := strings.TrimPrefix(opts.SortBy, "settings.") - fieldExpr = fmt.Sprintf("h.settings ->> '$.%s'", field) - } else if strings.HasPrefix(opts.SortBy, "price_table.") { - field := strings.TrimPrefix(opts.SortBy, "price_table.") - fieldExpr = fmt.Sprintf("h.price_table ->> '$.%s'", field) - } else { - return nil, fmt.Errorf("invalid sortBy parameter: %v", opts.SortBy) - } - orderByExpr = fmt.Sprintf("ORDER BY CAST(%s AS DECIMAL(65, 0)) %s", fieldExpr, opts.SortDir) - } - var blockedExpr string if len(blockedExprs) > 0 { blockedExpr = strings.Join(blockedExprs, " OR ") From e48cbef2df26e2056b672d8a317e05bdd5a8e4cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 01:57:01 +0000 Subject: [PATCH 96/98] build(deps): bump the all-dependencies group with 2 updates Bumps the all-dependencies group with 2 updates: [github.com/klauspost/reedsolomon](https://github.com/klauspost/reedsolomon) and [go.sia.tech/gofakes3](https://github.com/SiaFoundation/gofakes3). Updates `github.com/klauspost/reedsolomon` from 1.12.3 to 1.12.4 - [Release notes](https://github.com/klauspost/reedsolomon/releases) - [Commits](https://github.com/klauspost/reedsolomon/compare/v1.12.3...v1.12.4) Updates `go.sia.tech/gofakes3` from 0.0.4 to 0.0.5 - [Commits](https://github.com/SiaFoundation/gofakes3/compare/v0.0.4...v0.0.5) --- updated-dependencies: - dependency-name: github.com/klauspost/reedsolomon dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-dependencies - dependency-name: go.sia.tech/gofakes3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 908c20ad3..0f17e8970 100644 --- a/go.mod +++ b/go.mod @@ -8,14 +8,14 @@ require ( github.com/go-sql-driver/mysql v1.8.1 github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.20.0 - github.com/klauspost/reedsolomon v1.12.3 + github.com/klauspost/reedsolomon v1.12.4 github.com/mattn/go-sqlite3 v1.14.23 github.com/minio/minio-go/v7 v7.0.76 github.com/montanaflynn/stats v0.7.1 github.com/shopspring/decimal v1.4.0 go.sia.tech/core v0.4.6 go.sia.tech/coreutils v0.3.2 - go.sia.tech/gofakes3 v0.0.4 + go.sia.tech/gofakes3 v0.0.5 go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238 go.sia.tech/jape v0.12.1 go.sia.tech/mux v1.2.0 @@ -52,6 +52,6 @@ require ( golang.org/x/net v0.28.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.23.0 // indirect nhooyr.io/websocket v1.8.17 // indirect ) diff --git a/go.sum b/go.sum index f32b39629..7e34bf8ed 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ib github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/klauspost/reedsolomon v1.12.3 h1:tzUznbfc3OFwJaTebv/QdhnFf2Xvb7gZ24XaHLBPmdc= -github.com/klauspost/reedsolomon v1.12.3/go.mod h1:3K5rXwABAvzGeR01r6pWZieUALXO/Tq7bFKGIb4m4WI= +github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= +github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -76,8 +76,8 @@ go.sia.tech/core v0.4.6 h1:QLm97a7GWBonfnMEOokqWRAqsWCUPL7kzo6k3Adwx8E= go.sia.tech/core v0.4.6/go.mod h1:Zuq0Tn2aIXJyO0bjGu8cMeVWe+vwQnUfZhG1LCmjD5c= go.sia.tech/coreutils v0.3.2 h1:3gJqvs18n1FVZmcrnfIYyzS+rBu06OtIscDDAfUAYQI= go.sia.tech/coreutils v0.3.2/go.mod h1:woPVmN6GUpIKHdi71Hkb9goIbl7b45TquCsAyEzyxnI= -go.sia.tech/gofakes3 v0.0.4 h1:Kvo8j5cVdJRBXvV1KBJ69bocY23twG8ao/HCdwuPMeI= -go.sia.tech/gofakes3 v0.0.4/go.mod h1:6hh4lETCMbyFFNWp3FRE838geY6vh1Aeas7LtYDpQdc= +go.sia.tech/gofakes3 v0.0.5 h1:vFhVBUFbKE9ZplvLE2w4TQxFMQyF8qvgxV4TaTph+Vw= +go.sia.tech/gofakes3 v0.0.5/go.mod h1:LXEzwGw+OHysWLmagleCttX93cJZlT9rBu/icOZjQ54= go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238 h1:DP9o+TnNeS34EmxZ/zqZ4px3DgL8en/2RL4EsiSd4GU= go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238/go.mod h1:InmB5LdO6EP+ZW9uolUCO+zh+zVdbJF3iCgU7xokJxQ= go.sia.tech/jape v0.12.1 h1:xr+o9V8FO8ScRqbSaqYf9bjj1UJ2eipZuNcI1nYousU= @@ -97,8 +97,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= @@ -118,8 +118,8 @@ golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From d71ad6dc85c30fc84b7353189b9ee3be6f4d8048 Mon Sep 17 00:00:00 2001 From: Nate Maninger Date: Sun, 15 Sep 2024 18:37:10 -0700 Subject: [PATCH 97/98] sqlite: fix Hash256 scanning --- stores/sql/sqlite/chain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/sql/sqlite/chain.go b/stores/sql/sqlite/chain.go index 0a80e1a98..c2188733c 100644 --- a/stores/sql/sqlite/chain.go +++ b/stores/sql/sqlite/chain.go @@ -132,7 +132,7 @@ func (c chainUpdateTx) WalletRevertIndex(index types.ChainIndex, removed, unspen // delete removed outputs for _, e := range removed { c.l.Debugw(fmt.Sprintf("remove output %v", e.ID), "height", index.Height, "block_id", index.ID) - if res, err := deleteRemovedStmt.Exec(c.ctx, e.ID); err != nil { + if res, err := deleteRemovedStmt.Exec(c.ctx, ssql.Hash256(e.ID)); err != nil { return fmt.Errorf("failed to delete removed output: %w", err) } else if n, err := res.RowsAffected(); err != nil { return fmt.Errorf("failed to get rows affected: %w", err) From b87198325751fdcb85674f9159cbf56bb0e7aa9f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 16 Sep 2024 09:18:16 +0200 Subject: [PATCH 98/98] autopilot: add id to autopilot state response --- api/autopilot.go | 1 + autopilot/autopilot.go | 1 + internal/test/e2e/cluster_test.go | 16 +++++++--------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/api/autopilot.go b/api/autopilot.go index 7f2ee7b08..1dca769a1 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -86,6 +86,7 @@ type ( // AutopilotStateResponse is the response type for the /autopilot/state // endpoint. AutopilotStateResponse struct { + ID string `json:"id"` Configured bool `json:"configured"` Migrating bool `json:"migrating"` MigratingLastStart TimeRFC3339 `json:"migratingLastStart"` diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 12f4c809e..7dbfdd2da 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -703,6 +703,7 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { } jc.Encode(api.AutopilotStateResponse{ + ID: ap.id, Configured: err == nil, Migrating: migrating, MigratingLastStart: api.TimeRFC3339(mLastStart), diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 1b51b2fc1..b17dd7609 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -358,19 +358,17 @@ func TestNewTestCluster(t *testing.T) { // Fetch the autopilot state state, err := cluster.Autopilot.State() tt.OK(err) - if time.Time(state.StartTime).IsZero() { + if state.ID != api.DefaultAutopilotID { + t.Fatal("autopilot should have default id", state.ID) + } else if time.Time(state.StartTime).IsZero() { t.Fatal("autopilot should have start time") - } - if time.Time(state.MigratingLastStart).IsZero() { + } else if time.Time(state.MigratingLastStart).IsZero() { t.Fatal("autopilot should have completed a migration") - } - if time.Time(state.ScanningLastStart).IsZero() { + } else if time.Time(state.ScanningLastStart).IsZero() { t.Fatal("autopilot should have completed a scan") - } - if state.UptimeMS == 0 { + } else if state.UptimeMS == 0 { t.Fatal("uptime should be set") - } - if !state.Configured { + } else if !state.Configured { t.Fatal("autopilot should be configured") } }