diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 04504c086..a5ccf759f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -66,10 +66,10 @@ body: description: | The configuration of your bus ```bash - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/contractset - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/gouging - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/redundancy - curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/setting/uploadpacking + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/gouging + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/pinned + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/s3 + curl -u ":[YOUR_PASSWORD]" http://localhost:9980/api/bus/settings/uploads ``` placeholder: Paste the output of the above commands here validations: diff --git a/README.md b/README.md index c20749935..e631c84ec 100644 --- a/README.md +++ b/README.md @@ -54,10 +54,6 @@ overview of all settings configurable through the CLI. | `Log.Database.Level` | Logger level for database queries (info\|warn\|error). Defaults to 'warn' | `warn` | `--log.database.level` | `RENTERD_LOG_DATABASE_LEVEL`, `RENTERD_LOG_LEVEL` | `log.database.level` | | `Log.Database.IgnoreRecordNotFoundError` | Enable ignoring 'not found' errors resulting from database queries. Defaults to 'true' | `true` | `--log.database.ignoreRecordNotFoundError` | `RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR` | `log.database.ignoreRecordNotFoundError` | | `Log.Database.SlowThreshold` | Threshold for slow queries in logger. Defaults to 100ms | `100ms` | `--log.database.slowThreshold` | `RENTERD_LOG_DATABASE_SLOW_THRESHOLD` | `log.database.slowThreshold` | -| `Log.Database.Level (DEPRECATED)` | Logger level | `warn` | `--db.logger.logLevel` | `RENTERD_DB_LOGGER_LOG_LEVEL` | `log.database.level` | -| `Log.Database.IgnoreRecordNotFoundError (DEPRECATED)` | Ignores 'not found' errors in logger | `true` | `--db.logger.ignoreNotFoundError`| `RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR` | `log.ignoreRecordNotFoundError` | -| `Log.Database.SlowThreshold (DEPRECATED)` | Threshold for slow queries in logger | `100ms` | `--db.logger.slowThreshold` | `RENTERD_DB_LOGGER_SLOW_THRESHOLD` | `log.slowThreshold` | -| `Log.Path (DEPRECATED)` | Path to directory for logs | - | `--log-path` | `RENTERD_LOG_PATH` | `log.path` | | `Database.MySQL.URI` | Database URI for the bus | - | `--db.uri` | `RENTERD_DB_URI` | `database.mysql.uri` | | `Database.MySQL.User` | Database username for the bus | `renterd` | `--db.user` | `RENTERD_DB_USER` | `database.mysql.user` | | `Database.MySQL.Password` | Database password for the bus | - | - | `RENTERD_DB_PASSWORD` | `database.mysql.password` | @@ -70,7 +66,6 @@ overview of all settings configurable through the CLI. | `Bus.GatewayAddr` | Address for Sia peer connections | `:9981` | `--bus.gatewayAddr` | `RENTERD_BUS_GATEWAY_ADDR` | `bus.gatewayAddr` | | `Bus.RemoteAddr` | Remote address for the bus | - | - | `RENTERD_BUS_REMOTE_ADDR` | `bus.remoteAddr` | | `Bus.RemotePassword` | Remote password for the bus | - | - | `RENTERD_BUS_API_PASSWORD` | `bus.remotePassword` | -| `Bus.PersistInterval` | Interval for persisting consensus updates | `1m` | `--bus.persistInterval` | - | `bus.persistInterval` | | `Bus.UsedUTXOExpiry` | Expiry for used UTXOs in transactions | `24h` | `--bus.usedUTXOExpiry` | - | `bus.usedUtxoExpiry` | | `Bus.SlabBufferCompletionThreshold` | Threshold for slab buffer upload | `4096` | `--bus.slabBufferCompletionThreshold` | `RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD` | `bus.slabBufferCompletionThreshold` | | `Worker.AllowPrivateIPs` | Allows hosts with private IPs | - | `--worker.allowPrivateIPs` | - | `worker.allowPrivateIPs` | @@ -102,7 +97,8 @@ overview of all settings configurable through the CLI. | `S3.Enabled` | Enables/disables S3 API | `true` | `--s3.enabled` | `RENTERD_S3_ENABLED` | `s3.enabled` | | `S3.HostBucketBases` | Enables bucket rewriting in the router for the provided bases | - | `--s3.hostBucketBases` | `RENTERD_S3_HOST_BUCKET_BASES` | `s3.hostBucketBases` | | `S3.HostBucketEnabled` | Enables bucket rewriting in the router | - | `--s3.hostBucketEnabled` | `RENTERD_S3_HOST_BUCKET_ENABLED` | `s3.hostBucketEnabled` | -| `S3.KeypairsV4 (DEPRECATED)` | V4 keypairs for S3 | - | - | - | `s3.keypairsV4` | +| `Explorer.Disable` | Disables explorer service | `false` | `--explorer.disable` | `RENTERD_EXPLORER_DISABLE` | `explorer.disable` | +| `Explorer.URL` | URL of service to retrieve data about the Sia network | `https://api.siascan.com` | `--explorer.url` | `RENTERD_EXPLORER_URL` | `explorer.url` | ### Single-Node Setup @@ -558,65 +554,6 @@ formed. } ``` -### Contract Set - -The contract set settings on the bus allow specifying a default contract set. -This contract set will be returned by the `bus` through the upload parameters, -and decides what contracts data is upload or migrated to by default. This -setting does not have a default value, it can be updated using the settings API: - -- `GET /api/bus/setting/contractset` -- `PUT /api/bus/setting/contractset` - -```json -{ - "default": "autopilot" -} -``` - -In most cases the default set should match the set from your autopilot -configuration in order for migrations to work properly. The contract set can be -overridden by passing it as a query string parameter to the worker's upload and -migrate endpoints. - -- `PUT /api/worker/objects/foo?contractset=foo` - -### Redundancy - -The default redundancy on mainnet is 30-10, on testnet it is 6-2. The redundancy -can be updated using the settings API: - -- `GET /api/bus/setting/redundancy` -- `PUT /api/bus/setting/redundancy` - -The redundancy can also be passed through query string parameters on the upload -endpoint in the worker API: - -- `PUT /api/worker/objects/foo?minshards=2&totalshards=5` - -### Gouging - -The default gouging settings are listed below. The gouging settings can be -updated using the settings API: - -- `GET /api/bus/setting/gouging` -- `PUT /api/bus/setting/gouging` - -```json -{ - "hostBlockHeightLeeway": 6, // 6 blocks - "maxContractPrice": "15000000000000000000000000", // 15 SC per contract - "maxDownloadPrice": "3000000000000000000000000000", // 3000 SC per 1 TB - "maxRPCPrice": "1000000000000000000000", // 1mS per RPC - "maxStoragePrice": "631593542824", // 3000 SC per TB per month - "maxUploadPrice": "3000000000000000000000000000", // 3000 SC per 1 TB - "migrationSurchargeMultiplier": 10, // overpay up to 10x for sectors migrations on critical slabs - "minAccountExpiry": 86400000000000, // 1 day - "minMaxEphemeralAccountBalance": "1000000000000000000000000", // 1 SC - "minPriceTableValidity": 300000000000 // 5 minutes -} -``` - ### Blocklist Unfortunately the Sia blockchain is subject to hosts that announced themselves diff --git a/api/autopilot.go b/api/autopilot.go index 7f2ee7b08..1dca769a1 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -86,6 +86,7 @@ type ( // AutopilotStateResponse is the response type for the /autopilot/state // endpoint. AutopilotStateResponse struct { + ID string `json:"id"` Configured bool `json:"configured"` Migrating bool `json:"migrating"` MigratingLastStart TimeRFC3339 `json:"migratingLastStart"` diff --git a/api/bus.go b/api/bus.go index 3b73469e3..bf417d3b4 100644 --- a/api/bus.go +++ b/api/bus.go @@ -10,6 +10,7 @@ import ( var ( ErrMarkerNotFound = errors.New("marker not found") ErrMaxFundAmountExceeded = errors.New("renewal exceeds max fund amount") + ErrExplorerDisabled = errors.New("explorer is disabled") ) type ( @@ -65,6 +66,13 @@ type ( StartTime TimeRFC3339 `json:"startTime"` Network string `json:"network"` BuildState + Explorer ExplorerState `json:"explorer"` + } + + // ExplorerState contains static information about explorer data sources. + ExplorerState struct { + Enabled bool `json:"enabled"` + URL string `json:"url,omitempty"` } ContractSetUpdateRequest struct { diff --git a/api/events.go b/api/events.go index 7f14ee4c5..85fe857d9 100644 --- a/api/events.go +++ b/api/events.go @@ -19,7 +19,6 @@ const ( EventAdd = "add" EventUpdate = "update" - EventDelete = "delete" EventArchive = "archive" EventRenew = "renew" ) @@ -51,12 +50,6 @@ type ( Timestamp time.Time `json:"timestamp"` } - EventHostUpdate struct { - HostKey types.PublicKey `json:"hostKey"` - NetAddr string `json:"netAddr"` - Timestamp time.Time `json:"timestamp"` - } - EventContractSetUpdate struct { Name string `json:"name"` ToAdd []types.FileContractID `json:"toAdd"` @@ -64,15 +57,18 @@ type ( Timestamp time.Time `json:"timestamp"` } - EventSettingUpdate struct { - Key string `json:"key"` - Update interface{} `json:"update"` - Timestamp time.Time `json:"timestamp"` + EventHostUpdate struct { + HostKey types.PublicKey `json:"hostKey"` + NetAddr string `json:"netAddr"` + Timestamp time.Time `json:"timestamp"` } - EventSettingDelete struct { - Key string `json:"key"` - Timestamp time.Time `json:"timestamp"` + EventSettingUpdate struct { + GougingSettings *GougingSettings `json:"gougingSettings,omitempty"` + PinnedSettings *PinnedSettings `json:"pinnedSettings,omitempty"` + S3Settings *S3Settings `json:"s3Settings,omitempty"` + UploadSettings *UploadSettings `json:"uploadSettings,omitempty"` + Timestamp time.Time `json:"timestamp"` } ) @@ -139,15 +135,6 @@ var ( URL: url, } } - - WebhookSettingDelete = func(url string, headers map[string]string) webhooks.Webhook { - return webhooks.Webhook{ - Event: EventDelete, - Headers: headers, - Module: ModuleSetting, - URL: url, - } - } ) func ParseEventWebhook(event webhooks.Event) (interface{}, error) { @@ -202,19 +189,12 @@ func ParseEventWebhook(event webhooks.Event) (interface{}, error) { return e, nil } case ModuleSetting: - switch event.Event { - case EventUpdate: + if event.Event == EventUpdate { var e EventSettingUpdate if err := json.Unmarshal(bytes, &e); err != nil { return nil, err } return e, nil - case EventDelete: - var e EventSettingDelete - if err := json.Unmarshal(bytes, &e); err != nil { - return nil, err - } - return e, nil } } return nil, fmt.Errorf("%w: module %s event %s", ErrUnknownEvent, event.Module, event.Event) diff --git a/api/host.go b/api/host.go index fda7cc581..b09384a7a 100644 --- a/api/host.go +++ b/api/host.go @@ -1,6 +1,7 @@ package api import ( + "encoding/json" "errors" "fmt" "net/url" @@ -61,9 +62,8 @@ type ( MaxConsecutiveScanFailures uint64 `json:"maxConsecutiveScanFailures"` } - // SearchHostsRequest is the request type for the /api/bus/search/hosts - // endpoint. - SearchHostsRequest struct { + // HostsRequest is the request type for the /api/bus/hosts endpoint. + HostsRequest struct { Offset int `json:"offset"` Limit int `json:"limit"` AutopilotID string `json:"autopilotID"` @@ -72,22 +72,6 @@ type ( AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` } - - // HostResponse is the response type for the GET - // /api/autopilot/host/:hostkey endpoint. - HostResponse struct { - Host Host `json:"host"` - Checks *HostChecks `json:"checks,omitempty"` - } - - HostChecks struct { - Gouging bool `json:"gouging"` - GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` - Score float64 `json:"score"` - ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` - Usable bool `json:"usable"` - UnusableReasons []string `json:"unusableReasons,omitempty"` - } ) type ( @@ -108,17 +92,13 @@ type ( // Option types. type ( - GetHostsOptions struct { - Offset int - Limit int - } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 Limit int Offset int } - SearchHostOptions struct { + HostOptions struct { AutopilotID string AddressContains string FilterMode string @@ -129,15 +109,6 @@ type ( } ) -func (opts GetHostsOptions) Apply(values url.Values) { - if opts.Offset != 0 { - values.Set("offset", fmt.Sprint(opts.Offset)) - } - if opts.Limit != 0 { - values.Set("limit", fmt.Sprint(opts.Limit)) - } -} - func (opts HostsForScanningOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) @@ -208,9 +179,9 @@ type ( } HostCheck struct { - Gouging HostGougingBreakdown `json:"gouging"` - Score HostScoreBreakdown `json:"score"` - Usability HostUsabilityBreakdown `json:"usability"` + GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` + ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` + UsabilityBreakdown HostUsabilityBreakdown `json:"usabilityBreakdown"` } HostGougingBreakdown struct { @@ -243,6 +214,19 @@ type ( } ) +func (hc HostCheck) MarshalJSON() ([]byte, error) { + type check HostCheck + return json.Marshal(struct { + check + Score float64 `json:"score"` + Usable bool `json:"usable"` + }{ + check: check(hc), + Score: hc.ScoreBreakdown.Score(), + Usable: hc.UsabilityBreakdown.IsUsable(), + }) +} + // IsAnnounced returns whether the host has been announced. func (h Host) IsAnnounced() bool { return !h.LastAnnouncement.IsZero() diff --git a/api/host_test.go b/api/host_test.go new file mode 100644 index 000000000..e2711723e --- /dev/null +++ b/api/host_test.go @@ -0,0 +1,27 @@ +package api + +import ( + "encoding/json" + "strings" + "testing" +) + +func TestMarshalHostScoreBreakdownJSON(t *testing.T) { + hc := HostCheck{ + ScoreBreakdown: HostScoreBreakdown{ + Age: 1.1, + Collateral: 1.1, + Interactions: 1.1, + StorageRemaining: 1.1, + Uptime: 1.1, + Version: 1.1, + Prices: 1.1, + }, + } + b, err := json.MarshalIndent(hc, " ", " ") + if err != nil { + t.Fatal(err) + } else if !strings.Contains(string(b), "\"score\": 1.9487171000000014") { + t.Fatal("expected a score field") + } +} diff --git a/api/multipart.go b/api/multipart.go index ecd19789f..f9b449600 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -31,11 +31,11 @@ var ( type ( MultipartUpload struct { - Bucket string `json:"bucket"` - Key object.EncryptionKey `json:"key"` - Path string `json:"path"` - UploadID string `json:"uploadID"` - CreatedAt TimeRFC3339 `json:"createdAt"` + Bucket string `json:"bucket"` + EncryptionKey object.EncryptionKey `json:"encryptionKey"` + Key string `json:"key"` + UploadID string `json:"uploadID"` + CreatedAt TimeRFC3339 `json:"createdAt"` } MultipartListPartItem struct { @@ -51,10 +51,9 @@ type ( } CreateMultipartOptions struct { - GenerateKey bool - Key *object.EncryptionKey - MimeType string - Metadata ObjectUserMetadata + DisableClientSideEncryption bool + MimeType string + Metadata ObjectUserMetadata } CompleteMultipartOptions struct { @@ -65,14 +64,14 @@ type ( type ( MultipartAbortRequest struct { Bucket string `json:"bucket"` - Path string `json:"path"` + Key string `json:"key"` UploadID string `json:"uploadID"` } MultipartAddPartRequest struct { Bucket string `json:"bucket"` ETag string `json:"eTag"` - Path string `json:"path"` + Key string `json:"key"` ContractSet string `json:"contractSet"` UploadID string `json:"uploadID"` PartNumber int `json:"partNumber"` @@ -86,21 +85,17 @@ type ( MultipartCompleteRequest struct { Bucket string `json:"bucket"` Metadata ObjectUserMetadata `json:"metadata"` - Path string `json:"path"` + Key string `json:"key"` UploadID string `json:"uploadID"` Parts []MultipartCompletedPart `json:"parts"` } MultipartCreateRequest struct { - Bucket string `json:"bucket"` - Path string `json:"path"` - Key *object.EncryptionKey `json:"key"` - MimeType string `json:"mimeType"` - Metadata ObjectUserMetadata `json:"metadata"` - - // TODO: The next major version change should invert this to create a - // key by default - GenerateKey bool `json:"generateKey"` + Bucket string `json:"bucket"` + Key string `json:"key"` + MimeType string `json:"mimeType"` + Metadata ObjectUserMetadata `json:"metadata"` + DisableClientSideEncryption bool `json:"disableClientSideEncryption"` } MultipartCreateResponse struct { @@ -109,7 +104,7 @@ type ( MultipartListPartsRequest struct { Bucket string `json:"bucket"` - Path string `json:"path"` + Key string `json:"key"` UploadID string `json:"uploadID"` PartNumberMarker int `json:"partNumberMarker"` Limit int64 `json:"limit"` @@ -124,7 +119,7 @@ type ( MultipartListUploadsRequest struct { Bucket string `json:"bucket"` Prefix string `json:"prefix"` - PathMarker string `json:"pathMarker"` + KeyMarker string `json:"keyMarker"` UploadIDMarker string `json:"uploadIDMarker"` Limit int `json:"limit"` } diff --git a/api/object.go b/api/object.go index a5cef0422..09e33dba1 100644 --- a/api/object.go +++ b/api/object.go @@ -23,8 +23,8 @@ const ( ObjectSortByName = "name" ObjectSortBySize = "size" - ObjectSortDirAsc = "asc" - ObjectSortDirDesc = "desc" + SortDirAsc = "asc" + SortDirDesc = "desc" ) var ( @@ -47,6 +47,10 @@ var ( // ErrSlabNotFound is returned when a slab can't be retrieved from the // database. ErrSlabNotFound = errors.New("slab not found") + + // ErrUnsupportedDelimiter is returned when an unsupported delimiter is + // provided. + ErrUnsupportedDelimiter = errors.New("unsupported delimiter") ) type ( @@ -62,7 +66,7 @@ type ( ETag string `json:"eTag,omitempty"` Health float64 `json:"health"` ModTime TimeRFC3339 `json:"modTime"` - Name string `json:"name"` + Key string `json:"key"` Size int64 `json:"size"` MimeType string `json:"mimeType,omitempty"` } @@ -76,13 +80,6 @@ type ( // well ObjectUserMetadata map[string]string - // ObjectsResponse is the response type for the /bus/objects endpoint. - ObjectsResponse struct { - HasMore bool `json:"hasMore"` - Entries []ObjectMetadata `json:"entries,omitempty"` - Object *Object `json:"object,omitempty"` - } - // GetObjectResponse is the response type for the GET /worker/object endpoint. GetObjectResponse struct { Content io.ReadCloser `json:"content"` @@ -99,16 +96,6 @@ type ( Metadata ObjectUserMetadata } - // ObjectsDeleteRequest is the request type for the /bus/objects/list endpoint. - ObjectsListRequest struct { - Bucket string `json:"bucket"` - Limit int `json:"limit"` - SortBy string `json:"sortBy"` - SortDir string `json:"sortDir"` - Prefix string `json:"prefix"` - Marker string `json:"marker"` - } - // ObjectsListResponse is the response type for the /bus/objects/list endpoint. ObjectsListResponse struct { HasMore bool `json:"hasMore"` @@ -159,7 +146,7 @@ func (o ObjectMetadata) ContentType() string { return o.MimeType } - if ext := filepath.Ext(o.Name); ext != "" { + if ext := filepath.Ext(o.Key); ext != "" { return mime.TypeByExtension(ext) } @@ -193,10 +180,10 @@ type ( // CopyObjectsRequest is the request type for the /bus/objects/copy endpoint. CopyObjectsRequest struct { SourceBucket string `json:"sourceBucket"` - SourcePath string `json:"sourcePath"` + SourceKey string `json:"sourcePath"` DestinationBucket string `json:"destinationBucket"` - DestinationPath string `json:"destinationPath"` + DestinationKey string `json:"destinationPath"` MimeType string `json:"mimeType"` Metadata ObjectUserMetadata `json:"metadata"` @@ -207,32 +194,24 @@ type ( } HeadObjectOptions struct { - IgnoreDelim bool - Range *DownloadRange + Range *DownloadRange } DownloadObjectOptions struct { - GetObjectOptions Range *DownloadRange } GetObjectOptions struct { - Prefix string - Offset int - Limit int - IgnoreDelim bool - Marker string OnlyMetadata bool - SortBy string - SortDir string } ListObjectOptions struct { - Prefix string - Marker string - Limit int - SortBy string - SortDir string + Delimiter string + Limit int + Marker string + SortBy string + SortDir string + Substring string } SearchObjectOptions struct { @@ -295,11 +274,6 @@ func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { values.Set("contractset", opts.ContractSet) } } - -func (opts DownloadObjectOptions) ApplyValues(values url.Values) { - opts.GetObjectOptions.Apply(values) -} - func (opts DownloadObjectOptions) ApplyHeaders(h http.Header) { if opts.Range != nil { if opts.Range.Length == -1 { @@ -317,9 +291,6 @@ func (opts DeleteObjectOptions) Apply(values url.Values) { } func (opts HeadObjectOptions) Apply(values url.Values) { - if opts.IgnoreDelim { - values.Set("ignoreDelim", "true") - } } func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { @@ -333,30 +304,30 @@ func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { } func (opts GetObjectOptions) Apply(values url.Values) { - if opts.Prefix != "" { - values.Set("prefix", opts.Prefix) + if opts.OnlyMetadata { + values.Set("onlyMetadata", "true") } - if opts.Offset != 0 { - values.Set("offset", fmt.Sprint(opts.Offset)) +} + +func (opts ListObjectOptions) Apply(values url.Values) { + if opts.Delimiter != "" { + values.Set("delimiter", opts.Delimiter) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - if opts.IgnoreDelim { - values.Set("ignoreDelim", "true") - } if opts.Marker != "" { values.Set("marker", opts.Marker) } - if opts.OnlyMetadata { - values.Set("onlymetadata", "true") - } if opts.SortBy != "" { values.Set("sortBy", opts.SortBy) } if opts.SortDir != "" { values.Set("sortDir", opts.SortDir) } + if opts.Substring != "" { + values.Set("substring", opts.Substring) + } } func (opts SearchObjectOptions) Apply(values url.Values) { @@ -375,6 +346,6 @@ func FormatETag(eTag string) string { return fmt.Sprintf("%q", eTag) } -func ObjectPathEscape(path string) string { - return url.PathEscape(strings.TrimPrefix(path, "/")) +func ObjectKeyEscape(key string) string { + return url.PathEscape(strings.TrimPrefix(key, "/")) } diff --git a/api/setting.go b/api/setting.go index 5976b00b2..c0b93e46a 100644 --- a/api/setting.go +++ b/api/setting.go @@ -9,15 +9,6 @@ import ( "go.sia.tech/core/types" ) -const ( - SettingContractSet = "contractset" - SettingGouging = "gouging" - SettingPricePinning = "pricepinning" - SettingRedundancy = "redundancy" - SettingS3Authentication = "s3authentication" - SettingUploadPacking = "uploadpacking" -) - const ( S3MinAccessKeyLen = 16 S3MaxAccessKeyLen = 128 @@ -28,20 +19,16 @@ var ( // ErrInvalidRedundancySettings is returned if the redundancy settings are // not valid ErrInvalidRedundancySettings = errors.New("invalid redundancy settings") +) - // ErrSettingNotFound is returned if a requested setting is not present in the - // database. - ErrSettingNotFound = errors.New("setting not found") - +var ( // DefaultGougingSettings define the default gouging settings the bus is - // configured with on startup. These values can be adjusted using the - // settings API. - // + // configured with on startup. DefaultGougingSettings = GougingSettings{ MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(15), // 15 SC per contract - MaxDownloadPrice: types.Siacoins(3000), // 3000 SC per 1 TB - MaxUploadPrice: types.Siacoins(3000), // 3000 SC per 1 TB + MaxDownloadPrice: types.Siacoins(3000).Div64(1e12), // 3000 SC per 1 TB + MaxUploadPrice: types.Siacoins(3000).Div64(1e12), // 3000 SC per 1 TB MaxStoragePrice: types.Siacoins(3000).Div64(1e12).Div64(144 * 30), // 3000 SC per TB per month HostBlockHeightLeeway: 6, // 6 blocks MinPriceTableValidity: 5 * time.Minute, // 5 minutes @@ -50,48 +37,51 @@ var ( MigrationSurchargeMultiplier: 10, // 10x } - // DefaultPricePinSettings define the default price pin settings the bus is + // DefaultPinnedSettings define the default price pin settings the bus is // configured with on startup. These values can be adjusted using the // settings API. - DefaultPricePinSettings = PricePinSettings{ - Enabled: false, - Currency: "usd", - ForexEndpointURL: "https://api.siascan.com/exchange-rate/siacoin", - Threshold: 0.05, - } - - // DefaultUploadPackingSettings define the default upload packing settings - // the bus is configured with on startup. - DefaultUploadPackingSettings = UploadPackingSettings{ - Enabled: true, - SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB - } - - // DefaultRedundancySettings define the default redundancy settings the bus - // is configured with on startup. These values can be adjusted using the - // settings API. - // - // NOTE: default redundancy settings for testnet are different from mainnet. - DefaultRedundancySettings = RedundancySettings{ - MinShards: 10, - TotalShards: 30, + DefaultPinnedSettings = PinnedSettings{ + Currency: "usd", + Threshold: 0.05, } - // Same as DefaultRedundancySettings but for running on testnet networks due - // to their reduced number of hosts. + // DefaultRedundancySettingsTestnet defines redundancy settings for the + // testnet, these are lower due to the reduced number of hosts on the + // testnet. DefaultRedundancySettingsTestnet = RedundancySettings{ MinShards: 2, TotalShards: 6, } + + // DefaultS3Settings defines the 3 settings the bus is configured with on + // startup. + DefaultS3Settings = S3Settings{ + Authentication: S3AuthenticationSettings{ + V4Keypairs: map[string]string{}, + }, + } ) -type ( - // ContractSetSetting contains the default contract set used by the worker for - // uploads and migrations. - ContractSetSetting struct { - Default string `json:"default"` +// DefaultUploadSettings define the default upload settings the bus is +// configured with on startup. +func DefaultUploadSettings(network string) UploadSettings { + rs := RedundancySettings{ + MinShards: 10, + TotalShards: 30, + } + if network != "mainnet" { + rs = DefaultRedundancySettingsTestnet } + return UploadSettings{ + Packing: UploadPackingSettings{ + Enabled: true, + SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB + }, + Redundancy: rs, + } +} +type ( // GougingSettings contain some price settings used in price gouging. GougingSettings struct { // MaxRPCPrice is the maximum allowed base price for RPCs @@ -132,22 +122,14 @@ type ( MigrationSurchargeMultiplier uint64 `json:"migrationSurchargeMultiplier"` } - // PricePinSettings holds the configuration for pinning certain settings to - // a specific currency (e.g., USD). It uses a Forex API to fetch the current - // exchange rate, allowing users to set prices in USD instead of SC. - PricePinSettings struct { - // Enabled can be used to either enable or temporarily disable price - // pinning. If enabled, both the currency and the Forex endpoint URL - // must be valid. - Enabled bool `json:"enabled"` - + // PinnedSettings holds the configuration for pinning certain settings to a + // specific currency (e.g., USD). It uses the configured explorer to fetch + // the current exchange rate, allowing users to set prices in USD instead of + // SC. + PinnedSettings struct { // Currency is the external three-letter currency code. Currency string `json:"currency"` - // ForexEndpointURL is the endpoint that returns the exchange rate for - // Siacoin against the underlying currency. - ForexEndpointURL string `json:"forexEndpointURL"` - // Threshold is a percentage between 0 and 1 that determines when the // pinned settings are updated based on the exchange rate at the time. Threshold float64 `json:"threshold"` @@ -160,6 +142,23 @@ type ( GougingSettingsPins GougingSettingsPins `json:"gougingSettingsPins"` } + // UploadSettings contains various settings related to uploads. + UploadSettings struct { + DefaultContractSet string `json:"defaultContractSet"` + Packing UploadPackingSettings `json:"packing"` + Redundancy RedundancySettings `json:"redundancy"` + } + + UploadPackingSettings struct { + Enabled bool `json:"enabled"` + SlabBufferMaxSizeSoft int64 `json:"slabBufferMaxSizeSoft"` + } + + RedundancySettings struct { + MinShards int `json:"minShards"` + TotalShards int `json:"totalShards"` + } + // AutopilotPins contains the available autopilot settings that can be // pinned. AutopilotPins struct { @@ -180,22 +179,15 @@ type ( Value float64 `json:"value"` } - // RedundancySettings contain settings that dictate an object's redundancy. - RedundancySettings struct { - MinShards int `json:"minShards"` - TotalShards int `json:"totalShards"` + // S3Settings contains various settings related to the S3 API. + S3Settings struct { + Authentication S3AuthenticationSettings `json:"authentication"` } // S3AuthenticationSettings contains S3 auth settings. S3AuthenticationSettings struct { V4Keypairs map[string]string `json:"v4Keypairs"` } - - // UploadPackingSettings contains upload packing settings. - UploadPackingSettings struct { - Enabled bool `json:"enabled"` - SlabBufferMaxSizeSoft int64 `json:"slabBufferMaxSizeSoft"` - } ) // IsPinned returns true if the pin is enabled and the value is greater than 0. @@ -203,15 +195,32 @@ func (p Pin) IsPinned() bool { return p.Pinned && p.Value > 0 } +// Enabled returns true if any pins are enabled. +func (ps PinnedSettings) Enabled() bool { + if ps.GougingSettingsPins.MaxDownload.Pinned || + ps.GougingSettingsPins.MaxStorage.Pinned || + ps.GougingSettingsPins.MaxUpload.Pinned { + return true + } + + for _, pin := range ps.Autopilots { + if pin.Allowance.Pinned { + return true + } + } + + return false +} + // Validate returns an error if the price pin settings are not considered valid. -func (pps PricePinSettings) Validate() error { - if pps.ForexEndpointURL == "" { - return fmt.Errorf("price pin settings must have a forex endpoint URL") +func (ps PinnedSettings) Validate() error { + if !ps.Enabled() { + return nil } - if pps.Currency == "" { + if ps.Currency == "" { return fmt.Errorf("price pin settings must have a currency") } - if pps.Threshold <= 0 || pps.Threshold >= 1 { + if ps.Threshold <= 0 || ps.Threshold >= 1 { return fmt.Errorf("price pin settings must have a threshold between 0 and 1") } return nil @@ -239,6 +248,14 @@ func (gs GougingSettings) Validate() error { return nil } +// Validate returns an error if the upload settings are not considered valid. +func (us UploadSettings) Validate() error { + if us.Packing.Enabled && us.Packing.SlabBufferMaxSizeSoft <= 0 { + return errors.New("SlabBufferMaxSizeSoft must be greater than zero when upload packing is enabled") + } + return us.Redundancy.Validate() +} + // Redundancy returns the effective storage redundancy of the // RedundancySettings. func (rs RedundancySettings) Redundancy() float64 { @@ -272,8 +289,8 @@ func (rs RedundancySettings) Validate() error { // Validate returns an error if the authentication settings are not considered // valid. -func (s3as S3AuthenticationSettings) Validate() error { - for accessKeyID, secretAccessKey := range s3as.V4Keypairs { +func (s3s S3Settings) Validate() error { + for accessKeyID, secretAccessKey := range s3s.Authentication.V4Keypairs { if accessKeyID == "" { return fmt.Errorf("AccessKeyID cannot be empty") } else if len(accessKeyID) < S3MinAccessKeyLen || len(accessKeyID) > S3MaxAccessKeyLen { diff --git a/api/slab.go b/api/slab.go index 65d19788d..f93ebe532 100644 --- a/api/slab.go +++ b/api/slab.go @@ -7,9 +7,9 @@ import ( type ( PackedSlab struct { - BufferID uint `json:"bufferID"` - Data []byte `json:"data"` - Key object.EncryptionKey `json:"key"` + BufferID uint `json:"bufferID"` + Data []byte `json:"data"` + EncryptionKey object.EncryptionKey `json:"encryptionKey"` } SlabBuffer struct { @@ -22,8 +22,8 @@ type ( } UnhealthySlab struct { - Key object.EncryptionKey `json:"key"` - Health float64 `json:"health"` + EncryptionKey object.EncryptionKey `json:"encryptionKey"` + Health float64 `json:"health"` } UploadedPackedSlab struct { diff --git a/api/wallet.go b/api/wallet.go index ad8acb56d..ec22076c3 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -6,6 +6,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" ) type ( @@ -52,12 +53,10 @@ type ( // WalletResponse is the response type for the /wallet endpoint. WalletResponse struct { - ScanHeight uint64 `json:"scanHeight"` - Address types.Address `json:"address"` - Spendable types.Currency `json:"spendable"` - Confirmed types.Currency `json:"confirmed"` - Unconfirmed types.Currency `json:"unconfirmed"` - Immature types.Currency `json:"immature"` + wallet.Balance + + Address types.Address `json:"address"` + ScanHeight uint64 `json:"scanHeight"` } WalletSendRequest struct { @@ -78,18 +77,6 @@ type ( // WalletTransactionsOption is an option for the WalletTransactions method. type WalletTransactionsOption func(url.Values) -func WalletTransactionsWithBefore(before time.Time) WalletTransactionsOption { - return func(q url.Values) { - q.Set("before", before.Format(time.RFC3339)) - } -} - -func WalletTransactionsWithSince(since time.Time) WalletTransactionsOption { - return func(q url.Values) { - q.Set("since", since.Format(time.RFC3339)) - } -} - func WalletTransactionsWithLimit(limit int) WalletTransactionsOption { return func(q url.Values) { q.Set("limit", fmt.Sprint(limit)) diff --git a/api/worker.go b/api/worker.go index d1c18b61b..ee72ca73e 100644 --- a/api/worker.go +++ b/api/worker.go @@ -58,9 +58,6 @@ type ( ContractsResponse struct { Contracts []Contract `json:"contracts"` Errors map[types.PublicKey]string `json:"errors,omitempty"` - - // deprecated - Error string `json:"error,omitempty"` } MemoryResponse struct { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7b287e586..1143d885e 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -11,6 +11,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" @@ -53,9 +54,9 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error // metrics @@ -72,9 +73,8 @@ type Bus interface { SlabsForMigration(ctx context.Context, healthCutoff float64, set string, limit int) ([]api.UnhealthySlab, error) // settings - UpdateSetting(ctx context.Context, key string, value interface{}) error GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) - RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) + UploadSettings(ctx context.Context) (us api.UploadSettings, err error) // syncer SyncerPeers(ctx context.Context) (resp []string, err error) @@ -85,9 +85,7 @@ type Bus interface { // wallet Wallet(ctx context.Context) (api.WalletResponse, error) - WalletDiscard(ctx context.Context, txn types.Transaction) error - WalletOutputs(ctx context.Context) (resp []api.SiacoinElement, err error) - WalletPending(ctx context.Context) (resp []types.Transaction, err error) + WalletPending(ctx context.Context) (resp []wallet.Event, err error) WalletRedistribute(ctx context.Context, outputs int, amount types.Currency) (ids []types.TransactionID, err error) } @@ -158,13 +156,11 @@ func (ap *Autopilot) Config(ctx context.Context) (api.Autopilot, error) { // Handler returns an HTTP handler that serves the autopilot api. func (ap *Autopilot) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ - "GET /config": ap.configHandlerGET, - "PUT /config": ap.configHandlerPUT, - "POST /config": ap.configHandlerPOST, - "POST /hosts": ap.hostsHandlerPOST, - "GET /host/:hostKey": ap.hostHandlerGET, - "GET /state": ap.stateHandlerGET, - "POST /trigger": ap.triggerHandlerPOST, + "GET /config": ap.configHandlerGET, + "PUT /config": ap.configHandlerPUT, + "POST /config": ap.configHandlerPOST, + "GET /state": ap.stateHandlerGET, + "POST /trigger": ap.triggerHandlerPOST, }) } @@ -191,7 +187,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // fetch hosts - hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) + hosts, err := ap.bus.Hosts(ctx, api.HostOptions{}) if jc.Check("failed to get hosts", err) != nil { return } @@ -612,27 +608,15 @@ func (ap *Autopilot) performWalletMaintenance(ctx context.Context) error { } for _, txn := range pending { for _, mTxnID := range ap.maintenanceTxnIDs { - if mTxnID == txn.ID() { + if mTxnID == types.TransactionID(txn.ID) { l.Debugf("wallet maintenance skipped, pending transaction found with id %v", mTxnID) return nil } } } - wantedNumOutputs := 10 - - // enough outputs - nothing to do - available, err := b.WalletOutputs(ctx) - if err != nil { - return err - } - if uint64(len(available)) >= uint64(wantedNumOutputs) { - l.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs)) - return nil - } - wantedNumOutputs -= len(available) - // figure out the amount per output + wantedNumOutputs := 10 amount := cfg.Contracts.Allowance.Div64(uint64(wantedNumOutputs)) // redistribute outputs @@ -706,93 +690,6 @@ func (ap *Autopilot) triggerHandlerPOST(jc jape.Context) { }) } -func (ap *Autopilot) hostHandlerGET(jc jape.Context) { - var hk types.PublicKey - if jc.DecodeParam("hostKey", &hk) != nil { - return - } - - state, err := ap.buildState(jc.Request.Context()) - if jc.Check("failed to build state", err) != nil { - return - } - - // TODO: remove on next major release - if jc.Check("failed to get host", compatV105Host(jc.Request.Context(), state.ContractsConfig(), ap.bus, hk)) != nil { - return - } - - hi, err := ap.bus.Host(jc.Request.Context(), hk) - if jc.Check("failed to get host info", err) != nil { - return - } - - check, ok := hi.Checks[ap.id] - if ok { - jc.Encode(api.HostResponse{ - Host: hi, - Checks: &api.HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), - }, - }) - return - } - - jc.Encode(api.HostResponse{Host: hi}) -} - -func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.SearchHostsRequest - if jc.Decode(&req) != nil { - return - } else if req.AutopilotID != "" && req.AutopilotID != ap.id { - jc.Error(errors.New("invalid autopilot id"), http.StatusBadRequest) - return - } - - // TODO: remove on next major release - if jc.Check("failed to get host info", compatV105UsabilityFilterModeCheck(req.UsabilityMode)) != nil { - return - } - - hosts, err := ap.bus.SearchHosts(jc.Request.Context(), api.SearchHostOptions{ - AutopilotID: ap.id, - Offset: req.Offset, - Limit: req.Limit, - FilterMode: req.FilterMode, - UsabilityMode: req.UsabilityMode, - AddressContains: req.AddressContains, - KeyIn: req.KeyIn, - }) - if jc.Check("failed to get host info", err) != nil { - return - } - resps := make([]api.HostResponse, len(hosts)) - for i, host := range hosts { - if check, ok := host.Checks[ap.id]; ok { - resps[i] = api.HostResponse{ - Host: host, - Checks: &api.HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), - }, - } - } else { - resps[i] = api.HostResponse{Host: host} - } - } - jc.Encode(resps) -} - func (ap *Autopilot) stateHandlerGET(jc jape.Context) { ap.mu.Lock() pruning, pLastStart := ap.pruning, ap.pruningLastStart // TODO: move to a 'pruner' type @@ -806,6 +703,7 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { } jc.Encode(api.AutopilotStateResponse{ + ID: ap.id, Configured: err == nil, Migrating: migrating, MigratingLastStart: api.TimeRFC3339(mLastStart), @@ -838,10 +736,10 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta return nil, fmt.Errorf("could not fetch consensus state, err: %v", err) } - // fetch redundancy settings - rs, err := ap.bus.RedundancySettings(ctx) + // fetch upload settings + us, err := ap.bus.UploadSettings(ctx) if err != nil { - return nil, fmt.Errorf("could not fetch redundancy settings, err: %v", err) + return nil, fmt.Errorf("could not fetch upload settings, err: %v", err) } // fetch gouging settings @@ -891,7 +789,7 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta return &contractor.MaintenanceState{ GS: gs, - RS: rs, + RS: us.Redundancy, AP: autopilot, Address: address, @@ -900,59 +798,6 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta }, nil } -// compatV105Host performs some state checks and bus calls we no longer need but -// are necessary checks to make sure our API is consistent. This should be -// removed in the next major release. -func compatV105Host(ctx context.Context, cfg api.ContractsConfig, b Bus, hk types.PublicKey) error { - // state checks - if cfg.Allowance.IsZero() { - return fmt.Errorf("can not score hosts because contracts allowance is zero") - } - if cfg.Amount == 0 { - return fmt.Errorf("can not score hosts because contracts amount is zero") - } - if cfg.Period == 0 { - return fmt.Errorf("can not score hosts because contract period is zero") - } - - // fetch host - _, err := b.Host(ctx, hk) - if err != nil { - return fmt.Errorf("failed to fetch requested host from bus: %w", err) - } - - // other checks - _, err = b.GougingSettings(ctx) - if err != nil { - return fmt.Errorf("failed to fetch gouging settings from bus: %w", err) - } - _, err = b.RedundancySettings(ctx) - if err != nil { - return fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) - } - _, err = b.ConsensusState(ctx) - if err != nil { - return fmt.Errorf("failed to fetch consensus state from bus: %w", err) - } - _, err = b.RecommendedFee(ctx) - if err != nil { - return fmt.Errorf("failed to fetch recommended fee from bus: %w", err) - } - return nil -} - -func compatV105UsabilityFilterModeCheck(usabilityMode string) error { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - case api.UsabilityFilterModeUnusable: - case api.UsabilityFilterModeAll: - case "": - default: - return fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) - } - return nil -} - func computeNextPeriod(bh, currentPeriod, period uint64) uint64 { prevPeriod := currentPeriod nextPeriod := prevPeriod diff --git a/autopilot/client.go b/autopilot/client.go index 010c1f037..05592662b 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -2,9 +2,7 @@ package autopilot import ( "context" - "fmt" - "go.sia.tech/core/types" "go.sia.tech/jape" "go.sia.tech/renterd/api" ) @@ -33,25 +31,6 @@ func (c *Client) UpdateConfig(cfg api.AutopilotConfig) error { return c.c.PUT("/config", cfg) } -// HostInfo returns information about the host with given host key. -func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostResponse, err error) { - err = c.c.GET(fmt.Sprintf("/host/%s", hostKey), &resp) - return -} - -// HostInfo returns information about all hosts. -func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostResponse, err error) { - err = c.c.POST("/hosts", api.SearchHostsRequest{ - Offset: offset, - Limit: limit, - FilterMode: filterMode, - UsabilityMode: usabilityMode, - AddressContains: addressContains, - KeyIn: keyIn, - }, &resp) - return -} - // State returns the current state of the autopilot. func (c *Client) State() (state api.AutopilotStateResponse, err error) { err = c.c.GET("/state", &state) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 70230f6ca..dfd675f50 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -91,8 +91,8 @@ type Bus interface { FormContract(ctx context.Context, renterAddress types.Address, renterFunds types.Currency, hostKey types.PublicKey, hostIP string, hostCollateral types.Currency, endHeight uint64) (api.ContractMetadata, error) RenewContract(ctx context.Context, fcid types.FileContractID, endHeight uint64, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedNewStorage uint64) (api.ContractMetadata, error) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) UpdateContractSet(ctx context.Context, set string, toAdd, toRemove []types.FileContractID) error UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error } @@ -964,8 +964,8 @@ func performContractChecks(ctx *mCtx, alerter alerts.Alerter, bus Bus, w Worker, } // check usability - if !check.Usability.IsUsable() { - reasons := strings.Join(check.Usability.UnusableReasons(), ",") + if !check.UsabilityBreakdown.IsUsable() { + reasons := strings.Join(check.UsabilityBreakdown.UnusableReasons(), ",") logger.With("reasons", reasons).Info("unusable host") churnReasons[c.ID] = reasons continue @@ -1096,11 +1096,7 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, for _, c := range contracts { usedHosts[c.HostKey] = struct{}{} } - allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{ - Limit: -1, - FilterMode: api.HostFilterModeAllowed, - UsabilityMode: api.UsabilityFilterModeAll, - }) + allHosts, err := bus.Hosts(ctx, api.HostOptions{}) if err != nil { return nil, fmt.Errorf("failed to fetch usable hosts: %w", err) } @@ -1116,11 +1112,11 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, } else if _, used := usedHosts[host.PublicKey]; used { logger.Debug("host already used") continue - } else if score := hc.Score.Score(); score == 0 { + } else if score := hc.ScoreBreakdown.Score(); score == 0 { logger.Error("host has a score of 0") continue } - candidates = append(candidates, newScoredHost(host, hc.Score)) + candidates = append(candidates, newScoredHost(host, hc.ScoreBreakdown)) } logger = logger.With("candidates", len(candidates)) @@ -1199,7 +1195,7 @@ func performContractFormations(ctx *mCtx, bus Bus, w Worker, cr contractReviser, func performHostChecks(ctx *mCtx, bus Bus, logger *zap.SugaredLogger) error { var usabilityBreakdown unusableHostsBreakdown // fetch all hosts that are not blocked - hosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) + hosts, err := bus.Hosts(ctx, api.HostOptions{}) if err != nil { return fmt.Errorf("failed to fetch all hosts: %w", err) } @@ -1229,11 +1225,11 @@ func performHostChecks(ctx *mCtx, bus Bus, logger *zap.SugaredLogger) error { if err := bus.UpdateHostCheck(ctx, ctx.ApID(), h.host.PublicKey, *hc); err != nil { return fmt.Errorf("failed to update host check for host %v: %w", h.host.PublicKey, err) } - usabilityBreakdown.track(hc.Usability) + usabilityBreakdown.track(hc.UsabilityBreakdown) - if !hc.Usability.IsUsable() { + if !hc.UsabilityBreakdown.IsUsable() { logger.With("hostKey", h.host.PublicKey). - With("reasons", strings.Join(hc.Usability.UnusableReasons(), ",")). + With("reasons", strings.Join(hc.UsabilityBreakdown.UnusableReasons(), ",")). Debug("host is not usable") } } @@ -1252,11 +1248,7 @@ func performPostMaintenanceTasks(ctx *mCtx, bus Bus, w Worker, alerter alerts.Al if err != nil { return fmt.Errorf("failed to fetch contracts: %w", err) } - allHosts, err := bus.SearchHosts(ctx, api.SearchHostOptions{ - Limit: -1, - FilterMode: api.HostFilterModeAllowed, - UsabilityMode: api.UsabilityFilterModeAll, - }) + allHosts, err := bus.Hosts(ctx, api.HostOptions{}) if err != nil { return fmt.Errorf("failed to fetch all hosts: %w", err) } diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go index e947009cb..7bacc44c9 100644 --- a/autopilot/contractor/evaluate.go +++ b/autopilot/contractor/evaluate.go @@ -14,7 +14,7 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. gc := gouging.NewChecker(gs, cs, fee, &period, &cfg.Contracts.RenewWindow) for _, host := range hosts { hc := checkHost(gc, scoreHost(host, cfg, rs.Redundancy()), minValidScore) - if hc.Usability.IsUsable() { + if hc.UsabilityBreakdown.IsUsable() { usables++ } } @@ -37,32 +37,32 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu for i, host := range hosts { hosts[i].PriceTable.HostBlockHeight = cs.BlockHeight // ignore block height hc := checkHost(gc, scoreHost(host, cfg, rs.Redundancy()), minValidScore) - if hc.Usability.IsUsable() { + if hc.UsabilityBreakdown.IsUsable() { resp.Usable++ continue } - if hc.Usability.Blocked { + if hc.UsabilityBreakdown.Blocked { resp.Unusable.Blocked++ } - if hc.Usability.NotAcceptingContracts { + if hc.UsabilityBreakdown.NotAcceptingContracts { resp.Unusable.NotAcceptingContracts++ } - if hc.Usability.NotCompletingScan { + if hc.UsabilityBreakdown.NotCompletingScan { resp.Unusable.NotScanned++ } - if hc.Gouging.ContractErr != "" { + if hc.GougingBreakdown.ContractErr != "" { resp.Unusable.Gouging.Contract++ } - if hc.Gouging.DownloadErr != "" { + if hc.GougingBreakdown.DownloadErr != "" { resp.Unusable.Gouging.Download++ } - if hc.Gouging.GougingErr != "" { + if hc.GougingBreakdown.GougingErr != "" { resp.Unusable.Gouging.Gouging++ } - if hc.Gouging.PruneErr != "" { + if hc.GougingBreakdown.PruneErr != "" { resp.Unusable.Gouging.Pruning++ } - if hc.Gouging.UploadErr != "" { + if hc.GougingBreakdown.UploadErr != "" { resp.Unusable.Gouging.Upload++ } } diff --git a/autopilot/contractor/hostfilter.go b/autopilot/contractor/hostfilter.go index 100128ff7..a1e370fc7 100644 --- a/autopilot/contractor/hostfilter.go +++ b/autopilot/contractor/hostfilter.go @@ -258,9 +258,9 @@ func checkHost(gc gouging.Checker, sh scoredHost, minScore float64) *api.HostChe } return &api.HostCheck{ - Usability: ub, - Gouging: gb, - Score: sh.sb, + UsabilityBreakdown: ub, + GougingBreakdown: gb, + ScoreBreakdown: sh.sb, } } diff --git a/autopilot/migrator.go b/autopilot/migrator.go index fd935cabb..251c5e893 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -50,7 +50,7 @@ type ( ) func (j *job) execute(ctx context.Context, w Worker) (_ api.MigrateSlabResponse, err error) { - slab, err := j.b.Slab(ctx, j.Key) + slab, err := j.b.Slab(ctx, j.EncryptionKey) if err != nil { return api.MigrateSlabResponse{}, fmt.Errorf("failed to fetch slab; %w", err) } @@ -161,7 +161,7 @@ func (m *migrator) performMigrations(p *workerPool) { res, err := j.execute(ctx, w) m.statsSlabMigrationSpeedMS.Track(float64(time.Since(start).Milliseconds())) if err != nil { - m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, err) + m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.EncryptionKey, j.Health, res.SurchargeApplied, err) if utils.IsErr(err, api.ErrConsensusNotSynced) { // interrupt migrations if consensus is not synced select { @@ -172,7 +172,7 @@ func (m *migrator) performMigrations(p *workerPool) { } else if !utils.IsErr(err, api.ErrSlabNotFound) { // fetch all object IDs for the slab we failed to migrate var objectIds map[string][]string - if res, err := m.objectIDsForSlabKey(ctx, j.Key); err != nil { + if res, err := m.objectIDsForSlabKey(ctx, j.EncryptionKey); err != nil { m.logger.Errorf("failed to fetch object ids for slab key; %w", err) } else { objectIds = res @@ -180,20 +180,20 @@ func (m *migrator) performMigrations(p *workerPool) { // register the alert if res.SurchargeApplied { - m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, objectIds, err)) + m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.EncryptionKey, j.Health, objectIds, err)) } else { - m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.Key, j.Health, objectIds, err)) + m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.EncryptionKey, j.Health, objectIds, err)) } } } else { - m.logger.Infof("%v: migration %d/%d succeeded, key: %v, health: %v, overpaid: %v, shards migrated: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, res.NumShardsMigrated) - m.ap.DismissAlert(ctx, alerts.IDForSlab(alertMigrationID, j.Key)) + m.logger.Infof("%v: migration %d/%d succeeded, key: %v, health: %v, overpaid: %v, shards migrated: %v", id, j.slabIdx+1, j.batchSize, j.EncryptionKey, j.Health, res.SurchargeApplied, res.NumShardsMigrated) + m.ap.DismissAlert(ctx, alerts.IDForSlab(alertMigrationID, j.EncryptionKey)) if res.SurchargeApplied { // this alert confirms the user his gouging // settings are working, it will be dismissed // automatically the next time this slab is // successfully migrated - m.ap.RegisterAlert(ctx, newCriticalMigrationSucceededAlert(j.Key)) + m.ap.RegisterAlert(ctx, newCriticalMigrationSucceededAlert(j.EncryptionKey)) } } } @@ -238,13 +238,13 @@ func (m *migrator) performMigrations(p *workerPool) { // starvation. migrateNewMap := make(map[object.EncryptionKey]*api.UnhealthySlab) for i, slab := range toMigrateNew { - migrateNewMap[slab.Key] = &toMigrateNew[i] + migrateNewMap[slab.EncryptionKey] = &toMigrateNew[i] } removed := 0 for i := 0; i < len(toMigrate)-removed; { slab := toMigrate[i] - if _, exists := migrateNewMap[slab.Key]; exists { - delete(migrateNewMap, slab.Key) // delete from map to leave only new slabs + if _, exists := migrateNewMap[slab.EncryptionKey]; exists { + delete(migrateNewMap, slab.EncryptionKey) // delete from map to leave only new slabs i++ } else { toMigrate[i] = toMigrate[len(toMigrate)-1-removed] @@ -337,7 +337,7 @@ func (m *migrator) objectIDsForSlabKey(ctx context.Context, key object.Encryptio idsPerBucket[bucket.Name] = make([]string, len(objects)) for i, object := range objects { - idsPerBucket[bucket.Name][i] = object.Name + idsPerBucket[bucket.Name][i] = object.Key } } diff --git a/bus/bus.go b/bus/bus.go index 6c4b3dc0c..4c3230e36 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -5,13 +5,11 @@ package bus import ( "context" - "encoding/json" "errors" "fmt" "math" "net" "net/http" - "strings" "time" "go.sia.tech/core/consensus" @@ -36,7 +34,6 @@ import ( "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" - "golang.org/x/crypto/blake2b" ) const ( @@ -197,12 +194,12 @@ type ( Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) RecordHostScans(ctx context.Context, scans []api.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, check api.HostCheck) error @@ -218,7 +215,6 @@ type ( Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) ContractSets(ctx context.Context) ([]string, error) - InsertContract(ctx context.Context, c api.ContractMetadata) error RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error RemoveContractSet(ctx context.Context, name string) error PutContract(ctx context.Context, c api.ContractMetadata) error @@ -238,24 +234,22 @@ type ( ListBuckets(_ context.Context) ([]api.Bucket, error) UpdateBucketPolicy(ctx context.Context, bucketName string, policy api.BucketPolicy) error - CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) - ListObjects(ctx context.Context, bucketName, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) - Object(ctx context.Context, bucketName, path string) (api.Object, error) - ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) - ObjectEntries(ctx context.Context, bucketName, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) + CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) + ListObjects(ctx context.Context, bucketName, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) + Object(ctx context.Context, bucketName, key string) (api.Object, error) + ObjectMetadata(ctx context.Context, bucketName, key string) (api.Object, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) - RemoveObject(ctx context.Context, bucketName, path string) error + RemoveObject(ctx context.Context, bucketName, key string) error RemoveObjects(ctx context.Context, bucketName, prefix string) error RenameObject(ctx context.Context, bucketName, from, to string, force bool) error RenameObjects(ctx context.Context, bucketName, from, to string, force bool) error - SearchObjects(ctx context.Context, bucketName, substring string, offset, limit int) ([]api.ObjectMetadata, error) - UpdateObject(ctx context.Context, bucketName, path, contractSet, ETag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error + UpdateObject(ctx context.Context, bucketName, key, contractSet, ETag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error - AbortMultipartUpload(ctx context.Context, bucketName, path string, uploadID string) (err error) - AddMultipartPart(ctx context.Context, bucketName, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) - CompleteMultipartUpload(ctx context.Context, bucketName, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) - CreateMultipartUpload(ctx context.Context, bucketName, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) + AbortMultipartUpload(ctx context.Context, bucketName, key string, uploadID string) (err error) + AddMultipartPart(ctx context.Context, bucketName, key, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) + CompleteMultipartUpload(ctx context.Context, bucketName, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) + CreateMultipartUpload(ctx context.Context, bucketName, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, _ error) MultipartUploads(ctx context.Context, bucketName, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) MultipartUploadParts(ctx context.Context, bucketName, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) @@ -292,10 +286,17 @@ type ( // A SettingStore stores settings. SettingStore interface { - DeleteSetting(ctx context.Context, key string) error - Setting(ctx context.Context, key string) (string, error) - Settings(ctx context.Context) ([]string, error) - UpdateSetting(ctx context.Context, key, value string) error + GougingSettings(ctx context.Context) (api.GougingSettings, error) + UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error + + PinnedSettings(ctx context.Context) (api.PinnedSettings, error) + UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error + + UploadSettings(ctx context.Context) (api.UploadSettings, error) + UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error + + S3Settings(ctx context.Context) (api.S3Settings, error) + UpdateS3Settings(ctx context.Context, s3as api.S3Settings) error } WalletMetricsRecorder interface { @@ -327,6 +328,7 @@ type Bus struct { rhp3 *rhp3.Client contractLocker ContractLocker + explorer *ibus.Explorer sectors UploadingSectorsCache walletMetricsRecorder WalletMetricsRecorder @@ -334,7 +336,7 @@ type Bus struct { } // New returns a new Bus -func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, l *zap.Logger) (_ *Bus, err error) { +func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, explorerURL string, l *zap.Logger) (_ *Bus, err error) { l = l.Named("bus") b := &Bus{ @@ -342,6 +344,7 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa masterKey: masterKey, accounts: store, + explorer: ibus.NewExplorer(explorerURL), s: s, cm: cm, w: w, @@ -360,11 +363,6 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa rhp3: rhp3.New(rhp.NewFallbackDialer(store, net.Dialer{}, l), l), } - // init settings - if err := b.initSettings(ctx); err != nil { - return nil, err - } - // create contract locker b.contractLocker = ibus.NewContractLocker() @@ -372,7 +370,7 @@ func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksMa b.sectors = ibus.NewSectorsCache() // create pin manager - b.pinMgr = ibus.NewPinManager(b.alerts, wm, store, defaultPinUpdateInterval, defaultPinRateWindow, l) + b.pinMgr = ibus.NewPinManager(b.alerts, wm, b.explorer, store, defaultPinUpdateInterval, defaultPinRateWindow, l) // create chain subscriber b.cs = ibus.NewChainSubscriber(wm, cm, store, w, announcementMaxAge, l) @@ -434,7 +432,7 @@ func (b *Bus) Handler() http.Handler { "GET /contract/:id/roots": b.contractIDRootsHandlerGET, "GET /contract/:id/size": b.contractSizeHandlerGET, - "GET /hosts": b.hostsHandlerGETDeprecated, + "POST /hosts": b.hostsHandlerPOST, "GET /hosts/allowlist": b.hostsAllowlistHandlerGET, "PUT /hosts/allowlist": b.hostsAllowlistHandlerPUT, "GET /hosts/blocklist": b.hostsBlocklistHandlerGET, @@ -458,12 +456,12 @@ func (b *Bus) Handler() http.Handler { "POST /multipart/listuploads": b.multipartHandlerListUploadsPOST, "POST /multipart/listparts": b.multipartHandlerListPartsPOST, - "GET /objects/*path": b.objectsHandlerGET, - "PUT /objects/*path": b.objectsHandlerPUT, - "DELETE /objects/*path": b.objectsHandlerDELETE, - "POST /objects/copy": b.objectsCopyHandlerPOST, - "POST /objects/rename": b.objectsRenameHandlerPOST, - "POST /objects/list": b.objectsListHandlerPOST, + "GET /listobjects/*prefix": b.objectsHandlerGET, + "GET /objects/*key": b.objectHandlerGET, + "PUT /objects/*key": b.objectsHandlerPUT, + "DELETE /objects/*key": b.objectsHandlerDELETE, + "POST /objects/copy": b.objectsCopyHandlerPOST, + "POST /objects/rename": b.objectsRenameHandlerPOST, "GET /params/gouging": b.paramsHandlerGougingGET, "GET /params/upload": b.paramsHandlerUploadGET, @@ -472,15 +470,16 @@ func (b *Bus) Handler() http.Handler { "POST /slabbuffer/done": b.packedSlabsHandlerDonePOST, "POST /slabbuffer/fetch": b.packedSlabsHandlerFetchPOST, - "POST /search/hosts": b.searchHostsHandlerPOST, - "GET /search/objects": b.searchObjectsHandlerGET, - "DELETE /sectors/:hk/:root": b.sectorsHostRootHandlerDELETE, - "GET /settings": b.settingsHandlerGET, - "GET /setting/:key": b.settingKeyHandlerGET, - "PUT /setting/:key": b.settingKeyHandlerPUT, - "DELETE /setting/:key": b.settingKeyHandlerDELETE, + "GET /settings/gouging": b.settingsGougingHandlerGET, + "PUT /settings/gouging": b.settingsGougingHandlerPUT, + "GET /settings/pinned": b.settingsPinnedHandlerGET, + "PUT /settings/pinned": b.settingsPinnedHandlerPUT, + "GET /settings/s3": b.settingsS3HandlerGET, + "PUT /settings/s3": b.settingsS3HandlerPUT, + "GET /settings/upload": b.settingsUploadHandlerGET, + "PUT /settings/upload": b.settingsUploadHandlerPUT, "POST /slabs/migration": b.slabsMigrationHandlerPOST, "GET /slabs/partial/:key": b.slabsPartialHandlerGET, @@ -505,15 +504,11 @@ func (b *Bus) Handler() http.Handler { "DELETE /upload/:id": b.uploadFinishedHandlerDELETE, "POST /upload/:id/sector": b.uploadAddSectorHandlerPOST, - "GET /wallet": b.walletHandler, - "POST /wallet/discard": b.walletDiscardHandler, - "POST /wallet/fund": b.walletFundHandler, - "GET /wallet/outputs": b.walletOutputsHandler, - "GET /wallet/pending": b.walletPendingHandler, - "POST /wallet/redistribute": b.walletRedistributeHandler, - "POST /wallet/send": b.walletSendSiacoinsHandler, - "POST /wallet/sign": b.walletSignHandler, - "GET /wallet/transactions": b.walletTransactionsHandler, + "GET /wallet": b.walletHandler, + "GET /wallet/events": b.walletEventsHandler, + "GET /wallet/pending": b.walletPendingHandler, + "POST /wallet/redistribute": b.walletRedistributeHandler, + "POST /wallet/send": b.walletSendSiacoinsHandler, "GET /webhooks": b.webhookHandlerGet, "POST /webhooks": b.webhookHandlerPost, @@ -533,7 +528,7 @@ func (b *Bus) Shutdown(ctx context.Context) error { } func (b *Bus) addContract(ctx context.Context, rev rhpv2.ContractRevision, contractPrice, initialRenterFunds types.Currency, startHeight uint64, state string) (api.ContractMetadata, error) { - if err := b.ms.InsertContract(ctx, api.ContractMetadata{ + if err := b.ms.PutContract(ctx, api.ContractMetadata{ ID: rev.ID(), HostKey: rev.HostKey(), StartHeight: startHeight, @@ -614,9 +609,11 @@ func (b *Bus) broadcastContract(ctx context.Context, fcid types.FileContractID) return types.TransactionID{}, fmt.Errorf("couldn't fetch contract; %w", err) } + // derive the renter key + renterKey := b.masterKey.DeriveContractKey(c.HostKey) + // fetch revision - rk := b.deriveRenterKey(c.HostKey) - rev, err := b.rhp2.SignedRevision(ctx, c.HostIP, c.HostKey, rk, fcid, time.Minute) + rev, err := b.rhp2.SignedRevision(ctx, c.HostIP, c.HostKey, renterKey, fcid, time.Minute) if err != nil { return types.TransactionID{}, fmt.Errorf("couldn't fetch revision; %w", err) } @@ -657,7 +654,7 @@ func (b *Bus) broadcastContract(ctx context.Context, fcid types.FileContractID) func (b *Bus) formContract(ctx context.Context, hostSettings rhpv2.HostSettings, renterAddress types.Address, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostIP string, endHeight uint64) (rhpv2.ContractRevision, error) { // derive the renter key - renterKey := b.deriveRenterKey(hostKey) + renterKey := b.masterKey.DeriveContractKey(hostKey) // prepare the transaction cs := b.cm.TipState() @@ -698,143 +695,11 @@ func (b *Bus) formContract(ctx context.Context, hostSettings rhpv2.HostSettings, return contract, nil } -// initSettings loads the default settings if the setting is not already set and -// ensures the settings are valid -func (b *Bus) initSettings(ctx context.Context) error { - // testnets have different redundancy settings - defaultRedundancySettings := api.DefaultRedundancySettings - if mn, _ := chain.Mainnet(); mn.Name != b.cm.TipState().Network.Name { - defaultRedundancySettings = api.DefaultRedundancySettingsTestnet - } - - // load default settings if the setting is not already set - for key, value := range map[string]interface{}{ - api.SettingGouging: api.DefaultGougingSettings, - api.SettingPricePinning: api.DefaultPricePinSettings, - api.SettingRedundancy: defaultRedundancySettings, - api.SettingUploadPacking: api.DefaultUploadPackingSettings, - } { - if _, err := b.ss.Setting(ctx, key); errors.Is(err, api.ErrSettingNotFound) { - if bytes, err := json.Marshal(value); err != nil { - panic("failed to marshal default settings") // should never happen - } else if err := b.ss.UpdateSetting(ctx, key, string(bytes)); err != nil { - return err - } - } - } - - // check redundancy settings for validity - var rs api.RedundancySettings - if rss, err := b.ss.Setting(ctx, api.SettingRedundancy); err != nil { - return err - } else if err := json.Unmarshal([]byte(rss), &rs); err != nil { - return err - } else if err := rs.Validate(); err != nil { - b.logger.Warn(fmt.Sprintf("invalid redundancy setting found '%v', overwriting the redundancy settings with the default settings", rss)) - bytes, _ := json.Marshal(defaultRedundancySettings) - if err := b.ss.UpdateSetting(ctx, api.SettingRedundancy, string(bytes)); err != nil { - return err - } - } - - // check gouging settings for validity - var gs api.GougingSettings - if gss, err := b.ss.Setting(ctx, api.SettingGouging); err != nil { - return err - } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { - return err - } else if err := gs.Validate(); err != nil { - // compat: apply default EA gouging settings - gs.MinMaxEphemeralAccountBalance = api.DefaultGougingSettings.MinMaxEphemeralAccountBalance - gs.MinPriceTableValidity = api.DefaultGougingSettings.MinPriceTableValidity - gs.MinAccountExpiry = api.DefaultGougingSettings.MinAccountExpiry - if err := gs.Validate(); err == nil { - b.logger.Info(fmt.Sprintf("updating gouging settings with default EA settings: %+v", gs)) - bytes, _ := json.Marshal(gs) - if err := b.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)); err != nil { - return err - } - } else { - // compat: apply default host block leeway settings - gs.HostBlockHeightLeeway = api.DefaultGougingSettings.HostBlockHeightLeeway - if err := gs.Validate(); err == nil { - b.logger.Info(fmt.Sprintf("updating gouging settings with default HostBlockHeightLeeway settings: %v", gs)) - bytes, _ := json.Marshal(gs) - if err := b.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)); err != nil { - return err - } - } else { - b.logger.Warn(fmt.Sprintf("invalid gouging setting found '%v', overwriting the gouging settings with the default settings", gss)) - bytes, _ := json.Marshal(api.DefaultGougingSettings) - if err := b.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)); err != nil { - return err - } - } - } - } - - // compat: default price pin settings - var pps api.PricePinSettings - if pss, err := b.ss.Setting(ctx, api.SettingPricePinning); err != nil { - return err - } else if err := json.Unmarshal([]byte(pss), &pps); err != nil { - return err - } else if err := pps.Validate(); err != nil { - // overwrite values with defaults - var updates []string - if pps.ForexEndpointURL == "" { - pps.ForexEndpointURL = api.DefaultPricePinSettings.ForexEndpointURL - updates = append(updates, fmt.Sprintf("set PricePinSettings.ForexEndpointURL to %v", pps.ForexEndpointURL)) - } - if pps.Currency == "" { - pps.Currency = api.DefaultPricePinSettings.Currency - updates = append(updates, fmt.Sprintf("set PricePinSettings.Currency to %v", pps.Currency)) - } - if pps.Threshold == 0 { - pps.Threshold = api.DefaultPricePinSettings.Threshold - updates = append(updates, fmt.Sprintf("set PricePinSettings.Threshold to %v", pps.Threshold)) - } - - var updated []byte - if err := pps.Validate(); err == nil { - b.logger.Info(fmt.Sprintf("updating price pinning settings with default values: %v", strings.Join(updates, ", "))) - updated, _ = json.Marshal(pps) - } else { - b.logger.Warn(fmt.Sprintf("updated price pinning settings are invalid (%v), they have been overwritten with the default settings", err)) - updated, _ = json.Marshal(api.DefaultPricePinSettings) - } - - if err := b.ss.UpdateSetting(ctx, api.SettingPricePinning, string(updated)); err != nil { - return err - } - } - - return nil -} - func (b *Bus) isPassedV2AllowHeight() bool { cs := b.cm.TipState() return cs.Index.Height >= cs.Network.HardforkV2.AllowHeight } -func (b *Bus) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { - seed := blake2b.Sum256(append(b.deriveSubKey("renterkey"), hostKey[:]...)) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk -} - -func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { - seed := blake2b.Sum256(append(b.masterKey[:], []byte(purpose)...)) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk -} - func (b *Bus) prepareRenew(cs consensus.State, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, endHeight, expectedStorage uint64) rhp3.PrepareRenewFn { return func(pt rhpv3.HostPriceTable) ([]types.Hash256, []types.Transaction, types.Currency, rhp3.DiscardTxnFn, error) { // create the final revision from the provided revision @@ -883,6 +748,9 @@ func (b *Bus) prepareRenew(cs consensus.State, revision types.FileContractRevisi } func (b *Bus) renewContract(ctx context.Context, cs consensus.State, gp api.GougingParams, c api.ContractMetadata, hs rhpv2.HostSettings, renterFunds, minNewCollateral, maxFundAmount types.Currency, endHeight, expectedNewStorage uint64) (rhpv2.ContractRevision, types.Currency, types.Currency, error) { + // derive the renter key + renterKey := b.masterKey.DeriveContractKey(c.HostKey) + // acquire contract lock indefinitely and defer the release lockID, err := b.contractLocker.Acquire(ctx, lockingPriorityRenew, c.ID, time.Duration(math.MaxInt64)) if err != nil { @@ -902,7 +770,6 @@ func (b *Bus) renewContract(ctx context.Context, cs consensus.State, gp api.Goug // renew contract gc := gouging.NewChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, nil, nil) - renterKey := b.deriveRenterKey(c.HostKey) prepareRenew := b.prepareRenew(cs, rev, hs.Address, b.w.Address(), renterFunds, minNewCollateral, maxFundAmount, endHeight, expectedNewStorage) newRevision, txnSet, contractPrice, fundAmount, err := b.rhp3.Renew(ctx, gc, rev, renterKey, c.HostKey, c.SiamuxAddr, prepareRenew, b.w.SignTransaction) if err != nil { diff --git a/bus/client/client.go b/bus/client/client.go index c31b6d4a2..8ca1bf81e 100644 --- a/bus/client/client.go +++ b/bus/client/client.go @@ -1,11 +1,8 @@ package client import ( - "net/http" - "go.sia.tech/jape" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/internal/utils" ) // A Client provides methods for interacting with a bus. @@ -26,12 +23,3 @@ func (c *Client) State() (state api.BusStateResponse, err error) { err = c.c.GET("/state", &state) return } - -func (c *Client) do(req *http.Request, resp interface{}) error { - req.Header.Set("Content-Type", "application/json") - if c.c.Password != "" { - req.SetBasicAuth("", c.c.Password) - } - _, _, err := utils.DoRequest(req, &resp) - return err -} diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 1e09ab3ea..d7aa5f6db 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -16,6 +16,20 @@ func (c *Client) Host(ctx context.Context, hostKey types.PublicKey) (h api.Host, return } +// Hosts returns all hosts that match certain search criteria. +func (c *Client) Hosts(ctx context.Context, opts api.HostOptions) (hosts []api.Host, err error) { + err = c.c.WithContext(ctx).POST("/hosts", api.HostsRequest{ + AutopilotID: opts.AutopilotID, + Offset: opts.Offset, + Limit: opts.Limit, + FilterMode: opts.FilterMode, + UsabilityMode: opts.UsabilityMode, + AddressContains: opts.AddressContains, + KeyIn: opts.KeyIn, + }, &hosts) + return +} + // HostAllowlist returns the allowlist. func (c *Client) HostAllowlist(ctx context.Context) (allowlist []types.PublicKey, err error) { err = c.c.WithContext(ctx).GET("/hosts/allowlist", &allowlist) @@ -28,14 +42,6 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err return } -// Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []api.Host, err error) { - values := url.Values{} - opts.Apply(values) - err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) - return -} - // HostsForScanning returns 'limit' host addresses at given 'offset' which // haven't been scanned after lastScan. func (c *Client) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) (hosts []api.HostAddress, err error) { @@ -76,20 +82,6 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) return } -// SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []api.Host, err error) { - err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ - AutopilotID: opts.AutopilotID, - Offset: opts.Offset, - Limit: opts.Limit, - FilterMode: opts.FilterMode, - UsabilityMode: opts.UsabilityMode, - AddressContains: opts.AddressContains, - KeyIn: opts.KeyIn, - }, &hosts) - return -} - // UpdateHostAllowlist updates the host allowlist, adding and removing the given entries. func (c *Client) UpdateHostAllowlist(ctx context.Context, add, remove []types.PublicKey, clear bool) (err error) { err = c.c.WithContext(ctx).PUT("/hosts/allowlist", api.UpdateAllowlistRequest{Add: add, Remove: remove, Clear: clear}) diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index 6fd06204c..8460eeb07 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -9,21 +9,21 @@ import ( ) // AbortMultipartUpload aborts a multipart upload. -func (c *Client) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) { +func (c *Client) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) (err error) { err = c.c.WithContext(ctx).POST("/multipart/abort", api.MultipartAbortRequest{ Bucket: bucket, - Path: path, + Key: key, UploadID: uploadID, }, nil) return } // AddMultipartPart adds a part to a multipart upload. -func (c *Client) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (c *Client) AddMultipartPart(ctx context.Context, bucket, key, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { err = c.c.WithContext(ctx).PUT("/multipart/part", api.MultipartAddPartRequest{ Bucket: bucket, ETag: eTag, - Path: path, + Key: key, ContractSet: contractSet, UploadID: uploadID, PartNumber: partNumber, @@ -33,10 +33,10 @@ func (c *Client) AddMultipartPart(ctx context.Context, bucket, path, contractSet } // CompleteMultipartUpload completes a multipart upload. -func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (resp api.MultipartCompleteResponse, err error) { +func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (resp api.MultipartCompleteResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/complete", api.MultipartCompleteRequest{ Bucket: bucket, - Path: path, + Key: key, Metadata: opts.Metadata, UploadID: uploadID, Parts: parts, @@ -45,14 +45,13 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uplo } // CreateMultipartUpload creates a new multipart upload. -func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { +func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, key string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/create", api.MultipartCreateRequest{ - Bucket: bucket, - GenerateKey: opts.GenerateKey, - Path: path, - Key: opts.Key, - MimeType: opts.MimeType, - Metadata: opts.Metadata, + Bucket: bucket, + DisableClientSideEncryption: opts.DisableClientSideEncryption, + Key: key, + MimeType: opts.MimeType, + Metadata: opts.Metadata, }, &resp) return } @@ -68,7 +67,7 @@ func (c *Client) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker err = c.c.WithContext(ctx).POST("/multipart/listuploads", api.MultipartListUploadsRequest{ Bucket: bucket, Prefix: prefix, - PathMarker: keyMarker, + KeyMarker: keyMarker, UploadIDMarker: uploadIDMarker, Limit: maxUploads, }, &resp) @@ -76,10 +75,10 @@ func (c *Client) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker } // MultipartUploadParts returns information about all parts of a multipart upload. -func (c *Client) MultipartUploadParts(ctx context.Context, bucket, path string, uploadID string, partNumberMarker int, limit int64) (resp api.MultipartListPartsResponse, err error) { +func (c *Client) MultipartUploadParts(ctx context.Context, bucket, key string, uploadID string, partNumberMarker int, limit int64) (resp api.MultipartListPartsResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/listparts", api.MultipartListPartsRequest{ Bucket: bucket, - Path: path, + Key: key, UploadID: uploadID, PartNumberMarker: partNumberMarker, Limit: limit, diff --git a/bus/client/objects.go b/bus/client/objects.go index fca893a49..1a1f7fc0b 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -11,7 +11,7 @@ import ( // AddObject stores the provided object under the given path. func (c *Client) AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) { - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) err = c.c.WithContext(ctx).PUT(fmt.Sprintf("/objects/%s", path), api.AddObjectRequest{ Bucket: bucket, ContractSet: contractSet, @@ -25,53 +25,53 @@ func (c *Client) AddObject(ctx context.Context, bucket, path, contractSet string // CopyObject copies the object from the source bucket and path to the // destination bucket and path. -func (c *Client) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) { +func (c *Client) CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) { err = c.c.WithContext(ctx).POST("/objects/copy", api.CopyObjectsRequest{ SourceBucket: srcBucket, DestinationBucket: dstBucket, - SourcePath: srcPath, - DestinationPath: dstPath, + SourceKey: srcKey, + DestinationKey: dstKey, MimeType: opts.MimeType, Metadata: opts.Metadata, }, &om) return } -// DeleteObject either deletes the object at the given path or if batch=true -// deletes all objects that start with the given path. -func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) { +// DeleteObject either deletes the object at the given key or if batch=true +// deletes all objects that start with the given key. +func (c *Client) DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) (err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectPathEscape(path) - err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), path)) + key = api.ObjectKeyEscape(key) + err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), key)) return } -// ListOBjects lists objects in the given bucket. -func (c *Client) ListObjects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { - err = c.c.WithContext(ctx).POST("/objects/list", api.ObjectsListRequest{ - Bucket: bucket, - Limit: opts.Limit, - Prefix: opts.Prefix, - Marker: opts.Marker, - SortBy: opts.SortBy, - SortDir: opts.SortDir, - }, &resp) +// Objects returns the object at given key. +func (c *Client) Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (res api.Object, err error) { + values := url.Values{} + values.Set("bucket", bucket) + opts.Apply(values) + + key = api.ObjectKeyEscape(key) + key += "?" + values.Encode() + + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", key), &res) return } -// Objects returns the object at given path. -func (c *Client) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.ObjectsResponse, err error) { +// Objects lists objects in the given bucket. +func (c *Client) Objects(ctx context.Context, bucket string, prefix string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectPathEscape(path) - path += "?" + values.Encode() + prefix = api.ObjectKeyEscape(prefix) + prefix += "?" + values.Encode() - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/objects/%s", path), &res) + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/listobjects/%s", prefix), &resp) return } @@ -103,16 +103,6 @@ func (c *Client) RenameObjects(ctx context.Context, bucket, from, to string, for return c.renameObjects(ctx, bucket, from, to, api.ObjectsRenameModeMulti, force) } -// SearchObjects returns all objects that contains a sub-string in their key. -func (c *Client) SearchObjects(ctx context.Context, bucket string, opts api.SearchObjectOptions) (entries []api.ObjectMetadata, err error) { - values := url.Values{} - values.Set("bucket", bucket) - opts.Apply(values) - - err = c.c.WithContext(ctx).GET("/search/objects?"+values.Encode(), &entries) - return -} - func (c *Client) renameObjects(ctx context.Context, bucket, from, to, mode string, force bool) (err error) { err = c.c.WithContext(ctx).POST("/objects/rename", api.ObjectsRenameRequest{ Bucket: bucket, diff --git a/bus/client/settings.go b/bus/client/settings.go index 22714cf8b..74d5f34c9 100644 --- a/bus/client/settings.go +++ b/bus/client/settings.go @@ -2,65 +2,50 @@ package client import ( "context" - "fmt" "go.sia.tech/renterd/api" ) -// ContractSetSettings returns the contract set settings. -func (c *Client) ContractSetSettings(ctx context.Context) (gs api.ContractSetSetting, err error) { - err = c.Setting(ctx, api.SettingContractSet, &gs) - return -} - -// DeleteSetting will delete the setting with given key. -func (c *Client) DeleteSetting(ctx context.Context, key string) error { - return c.c.WithContext(ctx).DELETE(fmt.Sprintf("/setting/%s", key)) -} - // GougingSettings returns the gouging settings. func (c *Client) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { - err = c.Setting(ctx, api.SettingGouging, &gs) + err = c.c.WithContext(ctx).GET("/settings/gouging", &gs) return } -// PricePinningSettings returns the contract set settings. -func (c *Client) PricePinningSettings(ctx context.Context) (pps api.PricePinSettings, err error) { - err = c.Setting(ctx, api.SettingPricePinning, &pps) - return +// UpdateGougingSettings updates the given setting. +func (c *Client) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + return c.c.WithContext(ctx).PUT("/settings/gouging", gs) } -// RedundancySettings returns the redundancy settings. -func (c *Client) RedundancySettings(ctx context.Context) (rs api.RedundancySettings, err error) { - err = c.Setting(ctx, api.SettingRedundancy, &rs) +// PinnedSettings returns the pinned settings. +func (c *Client) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, err error) { + err = c.c.WithContext(ctx).GET("/settings/pinned", &ps) return } -// S3AuthenticationSettings returns the S3 authentication settings. -func (c *Client) S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) { - err = c.Setting(ctx, api.SettingS3Authentication, &as) - return +// UpdatePinnedSettings updates the given setting. +func (c *Client) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + return c.c.WithContext(ctx).PUT("/settings/pinned", ps) } -// Setting returns the value for the setting with given key. -func (c *Client) Setting(ctx context.Context, key string, value interface{}) (err error) { - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/setting/%s", key), &value) +// S3Settings returns the S3 settings. +func (c *Client) S3Settings(ctx context.Context) (as api.S3Settings, err error) { + err = c.c.WithContext(ctx).GET("/settings/s3", &as) return } -// Settings returns the keys of all settings. -func (c *Client) Settings(ctx context.Context) (settings []string, err error) { - err = c.c.WithContext(ctx).GET("/settings", &settings) - return +// UpdateS3Settings updates the given setting. +func (c *Client) UpdateS3Settings(ctx context.Context, as api.S3Settings) error { + return c.c.WithContext(ctx).PUT("/settings/s3", as) } -// UpdateSetting will update the given setting under the given key. -func (c *Client) UpdateSetting(ctx context.Context, key string, value interface{}) error { - return c.c.WithContext(ctx).PUT(fmt.Sprintf("/setting/%s", key), value) +// UploadSettings returns the upload settings. +func (c *Client) UploadSettings(ctx context.Context) (css api.UploadSettings, err error) { + err = c.c.WithContext(ctx).GET("/settings/upload", &css) + return } -// UploadPackingSettings returns the upload packing settings. -func (c *Client) UploadPackingSettings(ctx context.Context) (ups api.UploadPackingSettings, err error) { - err = c.Setting(ctx, api.SettingUploadPacking, &ups) - return +// UpdateUploadSettings update the given setting. +func (c *Client) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { + return c.c.WithContext(ctx).PUT("/settings/upload", us) } diff --git a/bus/client/wallet.go b/bus/client/wallet.go index d91289b56..5314ade52 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -2,11 +2,10 @@ package client import ( "context" - "fmt" - "net/http" "net/url" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/api" ) @@ -27,37 +26,9 @@ func (c *Client) Wallet(ctx context.Context) (resp api.WalletResponse, err error return } -// WalletDiscard discards the provided txn, make its inputs usable again. This -// should only be called on transactions that will never be broadcast. -func (c *Client) WalletDiscard(ctx context.Context, txn types.Transaction) error { - return c.c.WithContext(ctx).POST("/wallet/discard", txn, nil) -} - -// WalletFund funds txn using inputs controlled by the wallet. -func (c *Client) WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTransactions bool) ([]types.Hash256, []types.Transaction, error) { - req := api.WalletFundRequest{ - Transaction: *txn, - Amount: amount, - UseUnconfirmedTxns: useUnconfirmedTransactions, - } - var resp api.WalletFundResponse - err := c.c.WithContext(ctx).POST("/wallet/fund", req, &resp) - if err != nil { - return nil, nil, err - } - *txn = resp.Transaction - return resp.ToSign, resp.DependsOn, nil -} - -// WalletOutputs returns the set of unspent outputs controlled by the wallet. -func (c *Client) WalletOutputs(ctx context.Context) (resp []api.SiacoinElement, err error) { - err = c.c.WithContext(ctx).GET("/wallet/outputs", &resp) - return -} - // WalletPending returns the txpool transactions that are relevant to the // wallet. -func (c *Client) WalletPending(ctx context.Context) (resp []types.Transaction, err error) { +func (c *Client) WalletPending(ctx context.Context) (resp []wallet.Event, err error) { err = c.c.WithContext(ctx).GET("/wallet/pending", &resp) return } @@ -75,33 +46,12 @@ func (c *Client) WalletRedistribute(ctx context.Context, outputs int, amount typ return } -// WalletSign signs txn using the wallet's private key. -func (c *Client) WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error { - req := api.WalletSignRequest{ - Transaction: *txn, - ToSign: toSign, - CoveredFields: cf, - } - return c.c.WithContext(ctx).POST("/wallet/sign", req, txn) -} - -// WalletTransactions returns all transactions relevant to the wallet. -func (c *Client) WalletTransactions(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []api.Transaction, err error) { - c.c.Custom("GET", "/wallet/transactions", nil, &resp) - +// WalletEvents returns all events relevant to the wallet. +func (c *Client) WalletEvents(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []wallet.Event, err error) { values := url.Values{} for _, opt := range opts { opt(values) } - u, err := url.Parse(fmt.Sprintf("%v/wallet/transactions", c.c.BaseURL)) - if err != nil { - panic(err) - } - u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) - if err != nil { - panic(err) - } - err = c.do(req, &resp) + err = c.c.WithContext(ctx).GET("/wallet/events?"+values.Encode(), &resp) return } diff --git a/bus/routes.go b/bus/routes.go index a3f89cbbb..4ede3f3fd 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -16,14 +16,13 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhp3 "go.sia.tech/renterd/internal/rhp/v3" + "go.sia.tech/renterd/stores/sql" - ibus "go.sia.tech/renterd/internal/bus" "go.sia.tech/renterd/internal/gouging" rhp2 "go.sia.tech/renterd/internal/rhp/v2" "go.sia.tech/core/gateway" "go.sia.tech/core/types" - "go.sia.tech/coreutils/wallet" "go.sia.tech/gofakes3" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" @@ -35,15 +34,6 @@ import ( "go.uber.org/zap" ) -func (b *Bus) fetchSetting(ctx context.Context, key string, value interface{}) error { - if val, err := b.ss.Setting(ctx, key); err != nil { - return fmt.Errorf("could not get contract set settings: %w", err) - } else if err := json.Unmarshal([]byte(val), &value); err != nil { - b.logger.Panicf("failed to unmarshal %v settings '%s': %v", key, val, err) - } - return nil -} - func (b *Bus) accountsFundHandler(jc jape.Context) { var req api.AccountsFundRequest if jc.Decode(&req) != nil { @@ -272,18 +262,14 @@ func (b *Bus) walletHandler(jc jape.Context) { return } - tip := b.w.Tip() jc.Encode(api.WalletResponse{ - ScanHeight: tip.Height, - Address: address, - Confirmed: balance.Confirmed, - Spendable: balance.Spendable, - Unconfirmed: balance.Unconfirmed, - Immature: balance.Immature, + Balance: balance, + Address: address, + ScanHeight: b.w.Tip().Height, }) } -func (b *Bus) walletTransactionsHandler(jc jape.Context) { +func (b *Bus) walletEventsHandler(jc jape.Context) { offset := 0 limit := -1 if jc.DecodeForm("offset", &offset) != nil || @@ -291,126 +277,20 @@ func (b *Bus) walletTransactionsHandler(jc jape.Context) { return } - // TODO: deprecate these parameters when moving to v2.0.0 - var before, since time.Time - if jc.DecodeForm("before", (*api.TimeRFC3339)(&before)) != nil || - jc.DecodeForm("since", (*api.TimeRFC3339)(&since)) != nil { - return - } - - // convertToTransaction converts wallet event data to a Transaction. - convertToTransaction := func(kind string, data wallet.EventData) (txn types.Transaction, ok bool) { - ok = true - switch kind { - case wallet.EventTypeMinerPayout, - wallet.EventTypeFoundationSubsidy, - wallet.EventTypeSiafundClaim: - payout, _ := data.(wallet.EventPayout) - txn = types.Transaction{SiacoinOutputs: []types.SiacoinOutput{payout.SiacoinElement.SiacoinOutput}} - case wallet.EventTypeV1Transaction: - v1Txn, _ := data.(wallet.EventV1Transaction) - txn = types.Transaction(v1Txn.Transaction) - case wallet.EventTypeV1ContractResolution: - fce, _ := data.(wallet.EventV1ContractResolution) - txn = types.Transaction{ - FileContracts: []types.FileContract{fce.Parent.FileContract}, - SiacoinOutputs: []types.SiacoinOutput{fce.SiacoinElement.SiacoinOutput}, - } - default: - ok = false - } - return - } - - // convertToTransactions converts wallet events to API transactions. - convertToTransactions := func(events []wallet.Event) []api.Transaction { - var transactions []api.Transaction - for _, e := range events { - if txn, ok := convertToTransaction(e.Type, e.Data); ok { - transactions = append(transactions, api.Transaction{ - Raw: txn, - Index: e.Index, - ID: types.TransactionID(e.ID), - Inflow: e.SiacoinInflow(), - Outflow: e.SiacoinOutflow(), - Timestamp: e.Timestamp, - }) - } - } - return transactions - } - - if before.IsZero() && since.IsZero() { - events, err := b.w.Events(offset, limit) - if jc.Check("couldn't load transactions", err) == nil { - jc.Encode(convertToTransactions(events)) - } - return - } - - // TODO: remove this when 'before' and 'since' are deprecated, until then we - // fetch all transactions and paginate manually if either is specified - events, err := b.w.Events(0, -1) - if jc.Check("couldn't load transactions", err) != nil { + events, err := b.w.Events(offset, limit) + if jc.Check("couldn't load events", err) != nil { return } - filtered := events[:0] - for _, txn := range events { - if (before.IsZero() || txn.Timestamp.Before(before)) && - (since.IsZero() || txn.Timestamp.After(since)) { - filtered = append(filtered, txn) - } - } - events = filtered - if limit == 0 || limit == -1 { - jc.Encode(convertToTransactions(events[offset:])) - } else { - jc.Encode(convertToTransactions(events[offset : offset+limit])) - } -} - -func (b *Bus) walletOutputsHandler(jc jape.Context) { - utxos, err := b.w.SpendableOutputs() - if jc.Check("couldn't load outputs", err) == nil { - // convert to siacoin elements - elements := make([]api.SiacoinElement, len(utxos)) - for i, sce := range utxos { - elements[i] = api.SiacoinElement{ - ID: sce.StateElement.ID, - SiacoinOutput: types.SiacoinOutput{ - Value: sce.SiacoinOutput.Value, - Address: sce.SiacoinOutput.Address, - }, - MaturityHeight: sce.MaturityHeight, - } - } - jc.Encode(elements) + relevant := []types.Address{b.w.Address()} + for i := range events { + // NOTE: add the wallet's address to every event. Theoretically, + // this information should be persisted next to the event but + // using a SingleAddress the address should always be set because + // only relevant events are persisted and because the wallet only + // has one address. + events[i].Relevant = relevant } -} - -func (b *Bus) walletFundHandler(jc jape.Context) { - var wfr api.WalletFundRequest - if jc.Decode(&wfr) != nil { - return - } - txn := wfr.Transaction - - if len(txn.MinerFees) == 0 { - // if no fees are specified, we add some - fee := b.cm.RecommendedFee().Mul64(b.cm.TipState().TransactionWeight(txn)) - txn.MinerFees = []types.Currency{fee} - } - - toSign, err := b.w.FundTransaction(&txn, wfr.Amount.Add(txn.MinerFees[0]), wfr.UseUnconfirmedTxns) - if jc.Check("couldn't fund transaction", err) != nil { - return - } - - jc.Encode(api.WalletFundResponse{ - Transaction: txn, - ToSign: toSign, - DependsOn: b.cm.UnconfirmedParents(txn), - }) + jc.Encode(events) } func (b *Bus) walletSendSiacoinsHandler(jc jape.Context) { @@ -488,15 +368,6 @@ func (b *Bus) walletSendSiacoinsHandler(jc jape.Context) { } } -func (b *Bus) walletSignHandler(jc jape.Context) { - var wsr api.WalletSignRequest - if jc.Decode(&wsr) != nil { - return - } - b.w.SignTransaction(&wsr.Transaction, wsr.ToSign, wsr.CoveredFields) - jc.Encode(wsr.Transaction) -} - func (b *Bus) walletRedistributeHandler(jc jape.Context) { var wfr api.WalletRedistributeRequest if jc.Decode(&wfr) != nil { @@ -507,10 +378,27 @@ func (b *Bus) walletRedistributeHandler(jc jape.Context) { return } + spendableOutputs, err := b.w.SpendableOutputs() + if jc.Check("couldn't fetch spendable outputs", err) != nil { + return + } + var available int + for _, so := range spendableOutputs { + if so.SiacoinOutput.Value.Cmp(wfr.Amount) >= 0 { + available++ + } + } + if available >= wfr.Outputs { + b.logger.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", available, wfr.Outputs) + jc.Encode([]types.TransactionID{}) + return + } + wantedOutputs := wfr.Outputs - available + var ids []types.TransactionID if state := b.cm.TipState(); state.Index.Height < state.Network.HardforkV2.AllowHeight { // v1 redistribution - txns, toSign, err := b.w.Redistribute(wfr.Outputs, wfr.Amount, b.cm.RecommendedFee()) + txns, toSign, err := b.w.Redistribute(wantedOutputs, wfr.Amount, b.cm.RecommendedFee()) if jc.Check("couldn't redistribute money in the wallet into the desired outputs", err) != nil { return } @@ -532,7 +420,7 @@ func (b *Bus) walletRedistributeHandler(jc jape.Context) { } } else { // v2 redistribution - txns, toSign, err := b.w.RedistributeV2(wfr.Outputs, wfr.Amount, b.cm.RecommendedFee()) + txns, toSign, err := b.w.RedistributeV2(wantedOutputs, wfr.Amount, b.cm.RecommendedFee()) if jc.Check("couldn't redistribute money in the wallet into the desired outputs", err) != nil { return } @@ -557,65 +445,70 @@ func (b *Bus) walletRedistributeHandler(jc jape.Context) { jc.Encode(ids) } -func (b *Bus) walletDiscardHandler(jc jape.Context) { - var txn types.Transaction - if jc.Decode(&txn) == nil { - b.w.ReleaseInputs([]types.Transaction{txn}, nil) +func (b *Bus) walletPendingHandler(jc jape.Context) { + events, err := b.w.UnconfirmedEvents() + if jc.Check("couldn't fetch unconfirmed events", err) != nil { + return } + jc.Encode(events) } -func (b *Bus) walletPendingHandler(jc jape.Context) { - isRelevant := func(txn types.Transaction) bool { - addr := b.w.Address() - for _, sci := range txn.SiacoinInputs { - if sci.UnlockConditions.UnlockHash() == addr { - return true - } - } - for _, sco := range txn.SiacoinOutputs { - if sco.Address == addr { - return true - } - } - return false +func (b *Bus) hostsHandlerPOST(jc jape.Context) { + var req api.HostsRequest + if jc.Decode(&req) != nil { + return } - txns := b.cm.PoolTransactions() - relevant := txns[:0] - for _, txn := range txns { - if isRelevant(txn) { - relevant = append(relevant, txn) - } + // validate the usability mode + switch req.UsabilityMode { + case api.UsabilityFilterModeUsable: + case api.UsabilityFilterModeUnusable: + case api.UsabilityFilterModeAll: + case "": + req.UsabilityMode = api.UsabilityFilterModeAll + default: + jc.Error(fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", req.UsabilityMode), http.StatusBadRequest) + return } - jc.Encode(relevant) -} -func (b *Bus) hostsHandlerGETDeprecated(jc jape.Context) { - offset := 0 - limit := -1 - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { + if req.AutopilotID == "" && req.UsabilityMode != api.UsabilityFilterModeAll { + jc.Error(errors.New("need to specify autopilot id when usability mode isn't 'all'"), http.StatusBadRequest) return } - // fetch hosts - hosts, err := b.hs.SearchHosts(jc.Request.Context(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, offset, limit) - if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { + // validate the filter mode + switch req.FilterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + case "": + req.FilterMode = api.HostFilterModeAllowed + default: + jc.Error(fmt.Errorf("invalid filter mode: '%v', options are 'allowed', 'blocked' or an empty string for 'allowed' filter", req.FilterMode), http.StatusBadRequest) return } - jc.Encode(hosts) -} -func (b *Bus) searchHostsHandlerPOST(jc jape.Context) { - var req api.SearchHostsRequest - if jc.Decode(&req) != nil { + // validate the offset and limit + if req.Offset < 0 { + jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) return } + if req.Limit < 0 && req.Limit != -1 { + jc.Error(errors.New("limit must be non-negative or equal to -1 to indicate no limit"), http.StatusBadRequest) + return + } else if req.Limit == 0 { + req.Limit = -1 + } - // TODO: on the next major release: - // - properly default search params (currently no defaults are set) - // - properly validate and return 400 (currently validation is done in autopilot and the store) - - hosts, err := b.hs.SearchHosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) + hosts, err := b.hs.Hosts(jc.Request.Context(), api.HostOptions{ + AutopilotID: req.AutopilotID, + FilterMode: req.FilterMode, + UsabilityMode: req.UsabilityMode, + AddressContains: req.AddressContains, + KeyIn: req.KeyIn, + Offset: req.Offset, + Limit: req.Limit, + }) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return } @@ -937,7 +830,8 @@ func (b *Bus) contractPruneHandlerPOST(jc jape.Context) { } // prune the contract - rev, spending, pruned, remaining, err := b.rhp2.PruneContract(pruneCtx, b.deriveRenterKey(c.HostKey), gc, c.HostIP, c.HostKey, fcid, c.RevisionNumber, func(fcid types.FileContractID, roots []types.Hash256) ([]uint64, error) { + rk := b.masterKey.DeriveContractKey(c.HostKey) + rev, spending, pruned, remaining, err := b.rhp2.PruneContract(pruneCtx, rk, gc, c.HostIP, c.HostKey, fcid, c.RevisionNumber, func(fcid types.FileContractID, roots []types.Hash256) ([]uint64, error) { indices, err := b.ms.PrunableContractRoots(ctx, fcid, roots) if err != nil { return nil, err @@ -1212,34 +1106,8 @@ func (b *Bus) contractsAllHandlerDELETE(jc jape.Context) { jc.Check("couldn't remove contracts", b.ms.ArchiveAllContracts(jc.Request.Context(), api.ContractArchivalReasonRemoved)) } -func (b *Bus) searchObjectsHandlerGET(jc jape.Context) { - offset := 0 - limit := -1 - var key string - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("key", &key) != nil { - return - } - bucket := api.DefaultBucketName - if jc.DecodeForm("bucket", &bucket) != nil { - return - } - keys, err := b.ms.SearchObjects(jc.Request.Context(), bucket, key, offset, limit) - if jc.Check("couldn't list objects", err) != nil { - return - } - jc.Encode(keys) -} - -func (b *Bus) objectsHandlerGET(jc jape.Context) { - var ignoreDelim bool - if jc.DecodeForm("ignoreDelim", &ignoreDelim) != nil { - return - } - path := jc.PathParam("path") - if strings.HasSuffix(path, "/") && !ignoreDelim { - b.objectEntriesHandlerGET(jc, path) - return - } +func (b *Bus) objectHandlerGET(jc jape.Context) { + key := jc.PathParam("key") bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return @@ -1251,10 +1119,11 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { var o api.Object var err error + if onlymetadata { - o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, path) + o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, key) } else { - o, err = b.ms.Object(jc.Request.Context(), bucket, path) + o, err = b.ms.Object(jc.Request.Context(), bucket, key) } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -1262,51 +1131,43 @@ func (b *Bus) objectsHandlerGET(jc jape.Context) { } else if jc.Check("couldn't load object", err) != nil { return } - jc.Encode(api.ObjectsResponse{Object: &o}) + jc.Encode(o) } -func (b *Bus) objectEntriesHandlerGET(jc jape.Context, path string) { +func (b *Bus) objectsHandlerGET(jc jape.Context) { + var marker, delim, sortBy, sortDir, substring string bucket := api.DefaultBucketName if jc.DecodeForm("bucket", &bucket) != nil { return } - - var prefix string - if jc.DecodeForm("prefix", &prefix) != nil { + if jc.DecodeForm("delimiter", &delim) != nil { return } - - var sortBy string - if jc.DecodeForm("sortBy", &sortBy) != nil { + limit := -1 + if jc.DecodeForm("limit", &limit) != nil { return } - - var sortDir string - if jc.DecodeForm("sortDir", &sortDir) != nil { + if jc.DecodeForm("marker", &marker) != nil { return } - - var marker string - if jc.DecodeForm("marker", &marker) != nil { + if jc.DecodeForm("sortBy", &sortBy) != nil { return } - - var offset int - if jc.DecodeForm("offset", &offset) != nil { + if jc.DecodeForm("sortDir", &sortDir) != nil { return } - limit := -1 - if jc.DecodeForm("limit", &limit) != nil { + if jc.DecodeForm("substring", &substring) != nil { return } - // look for object entries - entries, hasMore, err := b.ms.ObjectEntries(jc.Request.Context(), bucket, path, prefix, sortBy, sortDir, marker, offset, limit) - if jc.Check("couldn't list object entries", err) != nil { + resp, err := b.ms.ListObjects(jc.Request.Context(), bucket, jc.PathParam("prefix"), substring, delim, sortBy, sortDir, marker, limit) + if errors.Is(err, api.ErrUnsupportedDelimiter) { + jc.Error(err, http.StatusBadRequest) + return + } else if jc.Check("failed to query objects", err) != nil { return } - - jc.Encode(api.ObjectsResponse{Entries: entries, HasMore: hasMore}) + jc.Encode(resp) } func (b *Bus) objectsHandlerPUT(jc jape.Context) { @@ -1316,7 +1177,7 @@ func (b *Bus) objectsHandlerPUT(jc jape.Context) { } else if aor.Bucket == "" { aor.Bucket = api.DefaultBucketName } - jc.Check("couldn't store object", b.ms.UpdateObject(jc.Request.Context(), aor.Bucket, jc.PathParam("path"), aor.ContractSet, aor.ETag, aor.MimeType, aor.Metadata, aor.Object)) + jc.Check("couldn't store object", b.ms.UpdateObject(jc.Request.Context(), aor.Bucket, jc.PathParam("key"), aor.ContractSet, aor.ETag, aor.MimeType, aor.Metadata, aor.Object)) } func (b *Bus) objectsCopyHandlerPOST(jc jape.Context) { @@ -1324,7 +1185,7 @@ func (b *Bus) objectsCopyHandlerPOST(jc jape.Context) { if jc.Decode(&orr) != nil { return } - om, err := b.ms.CopyObject(jc.Request.Context(), orr.SourceBucket, orr.DestinationBucket, orr.SourcePath, orr.DestinationPath, orr.MimeType, orr.Metadata) + om, err := b.ms.CopyObject(jc.Request.Context(), orr.SourceBucket, orr.DestinationBucket, orr.SourceKey, orr.DestinationKey, orr.MimeType, orr.Metadata) if jc.Check("couldn't copy object", err) != nil { return } @@ -1334,24 +1195,6 @@ func (b *Bus) objectsCopyHandlerPOST(jc jape.Context) { jc.Encode(om) } -func (b *Bus) objectsListHandlerPOST(jc jape.Context) { - var req api.ObjectsListRequest - if jc.Decode(&req) != nil { - return - } - if req.Bucket == "" { - req.Bucket = api.DefaultBucketName - } - resp, err := b.ms.ListObjects(jc.Request.Context(), req.Bucket, req.Prefix, req.SortBy, req.SortDir, req.Marker, req.Limit) - if errors.Is(err, api.ErrMarkerNotFound) { - jc.Error(err, http.StatusBadRequest) - return - } else if jc.Check("couldn't list objects", err) != nil { - return - } - jc.Encode(resp) -} - func (b *Bus) objectsRenameHandlerPOST(jc jape.Context) { var orr api.ObjectsRenameRequest if jc.Decode(&orr) != nil { @@ -1393,9 +1236,9 @@ func (b *Bus) objectsHandlerDELETE(jc jape.Context) { } var err error if batch { - err = b.ms.RemoveObjects(jc.Request.Context(), bucket, jc.PathParam("path")) + err = b.ms.RemoveObjects(jc.Request.Context(), bucket, jc.PathParam("key")) } else { - err = b.ms.RemoveObject(jc.Request.Context(), bucket, jc.PathParam("path")) + err = b.ms.RemoveObject(jc.Request.Context(), bucket, jc.PathParam("key")) } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -1456,6 +1299,146 @@ func (b *Bus) packedSlabsHandlerDonePOST(jc jape.Context) { jc.Check("failed to mark packed slab(s) as uploaded", b.ms.MarkPackedSlabsUploaded(jc.Request.Context(), psrp.Slabs)) } +func (b *Bus) settingsGougingHandlerGET(jc jape.Context) { + gs, err := b.ss.GougingSettings(jc.Request.Context()) + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("gouging settings not found, returning defaults") + jc.Encode(api.DefaultGougingSettings) + return + } else if jc.Check("failed to get gouging settings", err) == nil { + jc.Encode(gs) + } +} + +func (b *Bus) settingsGougingHandlerPUT(jc jape.Context) { + var gs api.GougingSettings + if jc.Decode(&gs) != nil { + return + } else if err := gs.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update gouging settings, error: %v", err), http.StatusBadRequest) + return + } else if jc.Check("could not update gouging settings", b.ss.UpdateGougingSettings(jc.Request.Context(), gs)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + GougingSettings: &gs, + Timestamp: time.Now().UTC(), + }, + }) + b.pinMgr.TriggerUpdate() + } +} + +func (b *Bus) settingsPinnedHandlerGET(jc jape.Context) { + ps, err := b.ss.PinnedSettings(jc.Request.Context()) + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("pinned settings not found, returning defaults") + jc.Encode(api.DefaultPinnedSettings) + return + } else if jc.Check("failed to get pinned settings", err) == nil { + // populate the Autopilots map with the current autopilots + aps, err := b.as.Autopilots(jc.Request.Context()) + if jc.Check("failed to fetch autopilots", err) != nil { + return + } + if ps.Autopilots == nil { + ps.Autopilots = make(map[string]api.AutopilotPins) + } + for _, ap := range aps { + if _, exists := ps.Autopilots[ap.ID]; !exists { + ps.Autopilots[ap.ID] = api.AutopilotPins{} + } + } + jc.Encode(ps) + } +} + +func (b *Bus) settingsPinnedHandlerPUT(jc jape.Context) { + var ps api.PinnedSettings + if jc.Decode(&ps) != nil { + return + } else if err := ps.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update pinned settings, error: %v", err), http.StatusBadRequest) + return + } else if ps.Enabled() && !b.explorer.Enabled() { + jc.Error(fmt.Errorf("can't enable price pinning, %w", api.ErrExplorerDisabled), http.StatusBadRequest) + return + } + + if jc.Check("could not update pinned settings", b.ss.UpdatePinnedSettings(jc.Request.Context(), ps)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + PinnedSettings: &ps, + Timestamp: time.Now().UTC(), + }, + }) + b.pinMgr.TriggerUpdate() + } +} + +func (b *Bus) settingsUploadHandlerGET(jc jape.Context) { + us, err := b.ss.UploadSettings(jc.Request.Context()) + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("upload settings not found, returning defaults") + jc.Encode(api.DefaultUploadSettings(b.cm.TipState().Network.Name)) + return + } else if jc.Check("failed to get upload settings", err) == nil { + jc.Encode(us) + } +} + +func (b *Bus) settingsUploadHandlerPUT(jc jape.Context) { + var us api.UploadSettings + if jc.Decode(&us) != nil { + return + } else if err := us.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update upload settings, error: %v", err), http.StatusBadRequest) + return + } else if jc.Check("could not update upload settings", b.ss.UpdateUploadSettings(jc.Request.Context(), us)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + UploadSettings: &us, + Timestamp: time.Now().UTC(), + }, + }) + } +} + +func (b *Bus) settingsS3HandlerGET(jc jape.Context) { + s3s, err := b.ss.S3Settings(jc.Request.Context()) + if errors.Is(err, sql.ErrSettingNotFound) { + b.logger.Warn("S3 settings not found, returning defaults") + jc.Encode(api.DefaultS3Settings) + return + } else if jc.Check("failed to get S3 settings", err) == nil { + jc.Encode(s3s) + } +} + +func (b *Bus) settingsS3HandlerPUT(jc jape.Context) { + var s3s api.S3Settings + if jc.Decode(&s3s) != nil { + return + } else if err := s3s.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update S3 settings, error: %v", err), http.StatusBadRequest) + return + } else if jc.Check("could not update S3 settings", b.ss.UpdateS3Settings(jc.Request.Context(), s3s)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + S3Settings: &s3s, + Timestamp: time.Now().UTC(), + }, + }) + } +} + func (b *Bus) sectorsHostRootHandlerDELETE(jc jape.Context) { var hk types.PublicKey var root types.Hash256 @@ -1589,167 +1572,17 @@ func (b *Bus) slabsPartialHandlerPOST(jc jape.Context) { if jc.Check("failed to add partial slab", err) != nil { return } - var pus api.UploadPackingSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploadPacking, &pus); err != nil && !errors.Is(err, api.ErrSettingNotFound) { + us, err := b.ss.UploadSettings(jc.Request.Context()) + if err != nil { jc.Error(fmt.Errorf("could not get upload packing settings: %w", err), http.StatusInternalServerError) return } jc.Encode(api.AddPartialSlabResponse{ Slabs: slabs, - SlabBufferMaxSizeSoftReached: bufferSize >= pus.SlabBufferMaxSizeSoft, + SlabBufferMaxSizeSoftReached: bufferSize >= us.Packing.SlabBufferMaxSizeSoft, }) } -func (b *Bus) settingsHandlerGET(jc jape.Context) { - if settings, err := b.ss.Settings(jc.Request.Context()); jc.Check("couldn't load settings", err) == nil { - jc.Encode(settings) - } -} - -func (b *Bus) settingKeyHandlerGET(jc jape.Context) { - jc.Custom(nil, (any)(nil)) - - key := jc.PathParam("key") - if key == "" { - jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) - return - } - - setting, err := b.ss.Setting(jc.Request.Context(), jc.PathParam("key")) - if errors.Is(err, api.ErrSettingNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if err != nil { - jc.Error(err, http.StatusInternalServerError) - return - } - resp := []byte(setting) - - // populate autopilots of price pinning settings with defaults for better DX - if key == api.SettingPricePinning { - var pps api.PricePinSettings - err = json.Unmarshal([]byte(setting), &pps) - if jc.Check("failed to unmarshal price pinning settings", err) != nil { - return - } else if pps.Autopilots == nil { - pps.Autopilots = make(map[string]api.AutopilotPins) - } - // populate the Autopilots map with the current autopilots - aps, err := b.as.Autopilots(jc.Request.Context()) - if jc.Check("failed to fetch autopilots", err) != nil { - return - } - for _, ap := range aps { - if _, exists := pps.Autopilots[ap.ID]; !exists { - pps.Autopilots[ap.ID] = api.AutopilotPins{} - } - } - // encode the settings back - resp, err = json.Marshal(pps) - if jc.Check("failed to marshal price pinning settings", err) != nil { - return - } - } - jc.ResponseWriter.Header().Set("Content-Type", "application/json") - jc.ResponseWriter.Write(resp) -} - -func (b *Bus) settingKeyHandlerPUT(jc jape.Context) { - key := jc.PathParam("key") - if key == "" { - jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) - return - } - - var value interface{} - if jc.Decode(&value) != nil { - return - } - - data, err := json.Marshal(value) - if err != nil { - jc.Error(fmt.Errorf("couldn't marshal the given value, error: %v", err), http.StatusBadRequest) - return - } - - switch key { - case api.SettingGouging: - var gs api.GougingSettings - if err := json.Unmarshal(data, &gs); err != nil { - jc.Error(fmt.Errorf("couldn't update gouging settings, invalid request body, %t", value), http.StatusBadRequest) - return - } else if err := gs.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update gouging settings, error: %v", err), http.StatusBadRequest) - return - } - b.pinMgr.TriggerUpdate() - case api.SettingRedundancy: - var rs api.RedundancySettings - if err := json.Unmarshal(data, &rs); err != nil { - jc.Error(fmt.Errorf("couldn't update redundancy settings, invalid request body"), http.StatusBadRequest) - return - } else if err := rs.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update redundancy settings, error: %v", err), http.StatusBadRequest) - return - } - case api.SettingS3Authentication: - var s3as api.S3AuthenticationSettings - if err := json.Unmarshal(data, &s3as); err != nil { - jc.Error(fmt.Errorf("couldn't update s3 authentication settings, invalid request body"), http.StatusBadRequest) - return - } else if err := s3as.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update s3 authentication settings, error: %v", err), http.StatusBadRequest) - return - } - case api.SettingPricePinning: - var pps api.PricePinSettings - if err := json.Unmarshal(data, &pps); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid request body"), http.StatusBadRequest) - return - } else if err := pps.Validate(); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid settings, error: %v", err), http.StatusBadRequest) - return - } else if pps.Enabled { - if _, err := ibus.NewForexClient(pps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), pps.Currency); err != nil { - jc.Error(fmt.Errorf("couldn't update price pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) - return - } - } - b.pinMgr.TriggerUpdate() - } - - if jc.Check("could not update setting", b.ss.UpdateSetting(jc.Request.Context(), key, string(data))) == nil { - b.broadcastAction(webhooks.Event{ - Module: api.ModuleSetting, - Event: api.EventUpdate, - Payload: api.EventSettingUpdate{ - Key: key, - Update: value, - Timestamp: time.Now().UTC(), - }, - }) - } -} - -func (b *Bus) settingKeyHandlerDELETE(jc jape.Context) { - key := jc.PathParam("key") - if key == "" { - jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) - return - } - - if jc.Check("could not delete setting", b.ss.DeleteSetting(jc.Request.Context(), key)) == nil { - b.broadcastAction(webhooks.Event{ - Module: api.ModuleSetting, - Event: api.EventDelete, - Payload: api.EventSettingDelete{ - Key: key, - Timestamp: time.Now().UTC(), - }, - }) - } -} - func (b *Bus) contractIDAncestorsHandler(jc jape.Context) { var fcid types.FileContractID if jc.DecodeParam("id", &fcid) != nil { @@ -1784,22 +1617,12 @@ func (b *Bus) paramsHandlerUploadGET(jc jape.Context) { return } - var contractSet string - var css api.ContractSetSetting - if err := b.fetchSetting(jc.Request.Context(), api.SettingContractSet, &css); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - jc.Error(fmt.Errorf("could not get contract set settings: %w", err), http.StatusInternalServerError) - return - } else if err == nil { - contractSet = css.Default - } - var uploadPacking bool - var pus api.UploadPackingSettings - if err := b.fetchSetting(jc.Request.Context(), api.SettingUploadPacking, &pus); err != nil && !errors.Is(err, api.ErrSettingNotFound) { - jc.Error(fmt.Errorf("could not get upload packing settings: %w", err), http.StatusInternalServerError) - return - } else if err == nil { - uploadPacking = pus.Enabled + var contractSet string + us, err := b.ss.UploadSettings(jc.Request.Context()) + if jc.Check("could not get upload settings", err) == nil { + contractSet = us.DefaultContractSet + uploadPacking = us.Packing.Enabled } jc.Encode(api.UploadParams{ @@ -1838,18 +1661,18 @@ func (b *Bus) paramsHandlerGougingGET(jc jape.Context) { } func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { - var gs api.GougingSettings - if gss, err := b.ss.Setting(ctx, api.SettingGouging); err != nil { + gs, err := b.ss.GougingSettings(ctx) + if errors.Is(err, sql.ErrSettingNotFound) { + gs = api.DefaultGougingSettings + } else if err != nil { return api.GougingParams{}, err - } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { - b.logger.Panicf("failed to unmarshal gouging settings '%s': %v", gss, err) } - var rs api.RedundancySettings - if rss, err := b.ss.Setting(ctx, api.SettingRedundancy); err != nil { + us, err := b.ss.UploadSettings(ctx) + if errors.Is(err, sql.ErrSettingNotFound) { + us = api.DefaultUploadSettings(b.cm.TipState().Network.Name) + } else if err != nil { return api.GougingParams{}, err - } else if err := json.Unmarshal([]byte(rss), &rs); err != nil { - b.logger.Panicf("failed to unmarshal redundancy settings '%s': %v", rss, err) } cs, err := b.consensusState(ctx) @@ -1860,7 +1683,7 @@ func (b *Bus) gougingParams(ctx context.Context) (api.GougingParams, error) { return api.GougingParams{ ConsensusState: cs, GougingSettings: gs, - RedundancySettings: rs, + RedundancySettings: us.Redundancy, TransactionFee: b.cm.RecommendedFee(), }, nil } @@ -2041,6 +1864,10 @@ func (b *Bus) stateHandlerGET(jc jape.Context) { OS: runtime.GOOS, BuildTime: api.TimeRFC3339(build.BuildTime()), }, + Explorer: api.ExplorerState{ + Enabled: b.explorer.Enabled(), + URL: b.explorer.BaseURL(), + }, Network: b.cm.TipState().Network.Name, }) } @@ -2277,15 +2104,13 @@ func (b *Bus) multipartHandlerCreatePOST(jc jape.Context) { } var key object.EncryptionKey - if req.GenerateKey { - key = object.GenerateEncryptionKey() - } else if req.Key == nil { + if req.DisableClientSideEncryption { key = object.NoOpKey } else { - key = *req.Key + key = object.GenerateEncryptionKey() } - resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, key, req.MimeType, req.Metadata) + resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Key, key, req.MimeType, req.Metadata) if jc.Check("failed to create multipart upload", err) != nil { return } @@ -2297,7 +2122,7 @@ func (b *Bus) multipartHandlerAbortPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - err := b.ms.AbortMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, req.UploadID) + err := b.ms.AbortMultipartUpload(jc.Request.Context(), req.Bucket, req.Key, req.UploadID) if jc.Check("failed to abort multipart upload", err) != nil { return } @@ -2308,7 +2133,7 @@ func (b *Bus) multipartHandlerCompletePOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.CompleteMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, req.UploadID, req.Parts, api.CompleteMultipartOptions{ + resp, err := b.ms.CompleteMultipartUpload(jc.Request.Context(), req.Bucket, req.Key, req.UploadID, req.Parts, api.CompleteMultipartOptions{ Metadata: req.Metadata, }) if jc.Check("failed to complete multipart upload", err) != nil { @@ -2337,7 +2162,7 @@ func (b *Bus) multipartHandlerUploadPartPUT(jc jape.Context) { jc.Error(errors.New("upload_id must be non-empty"), http.StatusBadRequest) return } - err := b.ms.AddMultipartPart(jc.Request.Context(), req.Bucket, req.Path, req.ContractSet, req.ETag, req.UploadID, req.PartNumber, req.Slices) + err := b.ms.AddMultipartPart(jc.Request.Context(), req.Bucket, req.Key, req.ContractSet, req.ETag, req.UploadID, req.PartNumber, req.Slices) if jc.Check("failed to upload part", err) != nil { return } @@ -2356,7 +2181,7 @@ func (b *Bus) multipartHandlerListUploadsPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.MultipartUploads(jc.Request.Context(), req.Bucket, req.Prefix, req.PathMarker, req.UploadIDMarker, req.Limit) + resp, err := b.ms.MultipartUploads(jc.Request.Context(), req.Bucket, req.Prefix, req.KeyMarker, req.UploadIDMarker, req.Limit) if jc.Check("failed to list multipart uploads", err) != nil { return } @@ -2368,7 +2193,7 @@ func (b *Bus) multipartHandlerListPartsPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.MultipartUploadParts(jc.Request.Context(), req.Bucket, req.Path, req.UploadID, req.PartNumberMarker, int64(req.Limit)) + resp, err := b.ms.MultipartUploadParts(jc.Request.Context(), req.Bucket, req.Key, req.UploadID, req.PartNumberMarker, int64(req.Limit)) if jc.Check("failed to list multipart upload parts", err) != nil { return } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index e1200f121..769875866 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -2,7 +2,6 @@ package main import ( "bufio" - "encoding/hex" "errors" "flag" "fmt" @@ -20,10 +19,8 @@ import ( "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/api" "go.sia.tech/renterd/config" - "go.sia.tech/renterd/worker/s3" "golang.org/x/term" "gopkg.in/yaml.v3" - "lukechampine.com/frand" ) // TODO: handle RENTERD_S3_HOST_BUCKET_BASES correctly @@ -45,7 +42,6 @@ var ( enableANSI = runtime.GOOS != "windows" hostBasesStr string - keyPairsV4 string workerRemotePassStr string workerRemoteAddrsStr string ) @@ -68,8 +64,10 @@ func defaultConfig() config.Config { MetricsDatabase: "renterd_metrics", }, }, + Explorer: config.ExplorerData{ + URL: "https://api.siascan.com", + }, Log: config.Log{ - Path: "", // deprecated. included for compatibility. Level: "", File: config.LogFile{ Enabled: true, @@ -127,7 +125,6 @@ func defaultConfig() config.Config { Address: "localhost:8080", Enabled: true, DisableAuth: false, - KeypairsV4: nil, }, } } @@ -168,6 +165,12 @@ func loadConfig() (cfg config.Config, network *consensus.Network, genesis types. return } + // check explorer + if !cfg.Explorer.Disable && cfg.Explorer.URL == "" { + err = fmt.Errorf("explorer is enabled but no URL is set") + return + } + return } @@ -219,17 +222,6 @@ func sanitizeConfig(cfg *config.Config) error { } } - // parse S3 auth keys - if cfg.S3.Enabled { - if !cfg.S3.DisableAuth && keyPairsV4 != "" { - var err error - cfg.S3.KeypairsV4, err = s3.Parsev4AuthKeys(strings.Split(keyPairsV4, ";")) - if err != nil { - return fmt.Errorf("failed to parse keypairs: %v", err) - } - } - } - // default log levels if cfg.Log.Level == "" { cfg.Log.Level = "info" @@ -267,12 +259,6 @@ func parseYamlConfig(cfg *config.Config) { } func parseCLIFlags(cfg *config.Config) { - // deprecated - these go first so that they can be overwritten by the non-deprecated flags - flag.StringVar(&cfg.Log.Database.Level, "db.logger.logLevel", cfg.Log.Database.Level, "(deprecated) Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") - flag.BoolVar(&cfg.Database.Log.IgnoreRecordNotFoundError, "db.logger.ignoreNotFoundError", cfg.Database.Log.IgnoreRecordNotFoundError, "(deprecated) Ignores 'not found' errors in logger (overrides with RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR)") - flag.DurationVar(&cfg.Database.Log.SlowThreshold, "db.logger.slowThreshold", cfg.Database.Log.SlowThreshold, "(deprecated) Threshold for slow queries in logger (overrides with RENTERD_DB_LOGGER_SLOW_THRESHOLD)") - flag.StringVar(&cfg.Log.Path, "log-path", cfg.Log.Path, "(deprecated) Path to directory for logs (overrides with RENTERD_LOG_PATH)") - // node flag.StringVar(&cfg.HTTP.Address, "http", cfg.HTTP.Address, "Address for serving the API") flag.StringVar(&cfg.Directory, "dir", cfg.Directory, "Directory for storing node state") @@ -303,7 +289,6 @@ func parseCLIFlags(cfg *config.Config) { flag.Uint64Var(&cfg.Bus.AnnouncementMaxAgeHours, "bus.announcementMaxAgeHours", cfg.Bus.AnnouncementMaxAgeHours, "Max age for announcements") flag.BoolVar(&cfg.Bus.Bootstrap, "bus.bootstrap", cfg.Bus.Bootstrap, "Bootstraps gateway and consensus modules") flag.StringVar(&cfg.Bus.GatewayAddr, "bus.gatewayAddr", cfg.Bus.GatewayAddr, "Address for Sia peer connections (overrides with RENTERD_BUS_GATEWAY_ADDR)") - flag.DurationVar(&cfg.Bus.PersistInterval, "bus.persistInterval", cfg.Bus.PersistInterval, "(deprecated) Interval for persisting consensus updates") flag.DurationVar(&cfg.Bus.UsedUTXOExpiry, "bus.usedUTXOExpiry", cfg.Bus.UsedUTXOExpiry, "Expiry for used UTXOs in transactions") flag.Int64Var(&cfg.Bus.SlabBufferCompletionThreshold, "bus.slabBufferCompletionThreshold", cfg.Bus.SlabBufferCompletionThreshold, "Threshold for slab buffer upload (overrides with RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD)") @@ -340,6 +325,10 @@ func parseCLIFlags(cfg *config.Config) { flag.StringVar(&hostBasesStr, "s3.hostBases", "", "Enables bucket rewriting in the router for specific hosts provided via comma-separated list (overrides with RENTERD_S3_HOST_BUCKET_BASES)") flag.BoolVar(&cfg.S3.HostBucketEnabled, "s3.hostBucketEnabled", cfg.S3.HostBucketEnabled, "Enables bucket rewriting in the router for all hosts (overrides with RENTERD_S3_HOST_BUCKET_ENABLED)") + // explorer + flag.StringVar(&cfg.Explorer.URL, "explorer.url", cfg.Explorer.URL, "URL of service to retrieve data about the Sia network (overrides with RENTERD_EXPLORER_URL)") + flag.BoolVar(&cfg.Explorer.Disable, "explorer.disable", cfg.Explorer.Disable, "Disables explorer service (overrides with RENTERD_EXPLORER_DISABLE)") + // custom usage flag.Usage = func() { log.Print(usageHeader) @@ -373,10 +362,7 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_DB_PASSWORD", &cfg.Database.MySQL.Password) parseEnvVar("RENTERD_DB_NAME", &cfg.Database.MySQL.Database) parseEnvVar("RENTERD_DB_METRICS_NAME", &cfg.Database.MySQL.MetricsDatabase) - - parseEnvVar("RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR", &cfg.Database.Log.IgnoreRecordNotFoundError) parseEnvVar("RENTERD_DB_LOGGER_LOG_LEVEL", &cfg.Log.Level) - parseEnvVar("RENTERD_DB_LOGGER_SLOW_THRESHOLD", &cfg.Database.Log.SlowThreshold) parseEnvVar("RENTERD_WORKER_ENABLED", &cfg.Worker.Enabled) parseEnvVar("RENTERD_WORKER_ID", &cfg.Worker.ID) @@ -395,7 +381,6 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_S3_HOST_BUCKET_ENABLED", &cfg.S3.HostBucketEnabled) parseEnvVar("RENTERD_S3_HOST_BUCKET_BASES", &cfg.S3.HostBucketBases) - parseEnvVar("RENTERD_LOG_PATH", &cfg.Log.Path) parseEnvVar("RENTERD_LOG_LEVEL", &cfg.Log.Level) parseEnvVar("RENTERD_LOG_FILE_ENABLED", &cfg.Log.File.Enabled) parseEnvVar("RENTERD_LOG_FILE_FORMAT", &cfg.Log.File.Format) @@ -411,7 +396,8 @@ func parseEnvironmentVariables(cfg *config.Config) { parseEnvVar("RENTERD_WORKER_REMOTE_ADDRS", &workerRemoteAddrsStr) parseEnvVar("RENTERD_WORKER_API_PASSWORD", &workerRemotePassStr) - parseEnvVar("RENTERD_S3_KEYPAIRS_V4", &keyPairsV4) + parseEnvVar("RENTERD_EXPLORER_DISABLE", &cfg.Explorer.Disable) + parseEnvVar("RENTERD_EXPLORER_URL", &cfg.Explorer.URL) } // readPasswordInput reads a password from stdin. @@ -688,54 +674,4 @@ func setS3Config(cfg *config.Config) { fmt.Println("The S3 API provides an S3-compatible gateway for uploading data to Sia.") fmt.Println("It should not be exposed to the public internet without setting up a reverse proxy.") setListenAddress("S3 Address", &cfg.S3.Address, true) - - // s3 access key - if len(cfg.S3.KeypairsV4) != 0 { - fmt.Println("") - fmt.Println("A S3 keypair has already been created.") - fmt.Println("If you change your S3 key pair, you will need to update any scripts or applications that use the S3 API.") - if !promptYesNo("Would you like to change your S3 key pair?") { - return - } - } - - cfg.S3.KeypairsV4 = make(map[string]string) - - fmt.Println("") - answer := promptQuestion("Would you like to automatically generate a new S3 key pair or set your own?", []string{"auto", "manual"}) - if strings.EqualFold(answer, "auto") { - // generate a new key pair - accessKey := hex.EncodeToString(frand.Bytes(20)) - secretKey := hex.EncodeToString(frand.Bytes(20)) - cfg.S3.KeypairsV4[accessKey] = secretKey - fmt.Println("") - fmt.Println("A new S3 key pair has been generated below.") - fmt.Println(wrapANSI("\033[34;1m", "Access Key:", "\033[0m"), accessKey) - fmt.Println(wrapANSI("\033[34;1m", "Secret Key:", "\033[0m"), secretKey) - fmt.Println("") - return - } - - var accessKey, secretKey string - for { - fmt.Println("") - fmt.Println("Enter your S3 access key. It must between 16 and 128 characters long.") - accessKey = readInput("Enter access key") - if len(accessKey) >= 16 && len(accessKey) <= 128 { - break - } - fmt.Println(wrapANSI("\033[31m", "Access key must be between 16 and 128 characters!", "\033[0m")) - } - - for { - fmt.Println("") - fmt.Println("Enter your S3 secret key. It must be 40 characters long.") - secretKey = readInput("Enter secret key") - if len(secretKey) == 40 { - break - } - fmt.Println(wrapANSI("\033[31m", "Secret key must be be 40 characters!", "\033[0m")) - } - - cfg.S3.KeypairsV4[accessKey] = secretKey } diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go index d107cc4a0..d53bdd709 100644 --- a/cmd/renterd/logger.go +++ b/cmd/renterd/logger.go @@ -14,10 +14,6 @@ import ( func NewLogger(dir, filename string, cfg config.Log) (*zap.Logger, func(context.Context) error, error) { // path path := filepath.Join(dir, filename) - if cfg.Path != "" { - path = filepath.Join(cfg.Path, filename) - } - if cfg.File.Path != "" { path = cfg.File.Path } diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index a9758439c..aa36b9f24 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -9,7 +9,6 @@ import ( "os" "path/filepath" "runtime" - "strings" "time" "go.sia.tech/core/consensus" @@ -21,7 +20,6 @@ import ( "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" - "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" "go.sia.tech/renterd/build" "go.sia.tech/renterd/bus" @@ -380,9 +378,15 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network // to ensure contracts formed by the bus can be renewed by the autopilot masterKey := blake2b.Sum256(append([]byte("worker"), pk...)) + // get explorer URL + var explorerURL string + if !cfg.Explorer.Disable { + explorerURL = cfg.Explorer.URL + } + // create bus announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, explorerURL, logger) if err != nil { return nil, nil, fmt.Errorf("failed to create bus: %w", err) } @@ -411,35 +415,6 @@ func (n *node) Run() error { } } - // set initial S3 keys - if n.cfg.S3.Enabled && !n.cfg.S3.DisableAuth { - as, err := n.bus.S3AuthenticationSettings(context.Background()) - if err != nil && !strings.Contains(err.Error(), api.ErrSettingNotFound.Error()) { - return fmt.Errorf("failed to fetch S3 authentication settings: %w", err) - } else if as.V4Keypairs == nil { - as.V4Keypairs = make(map[string]string) - } - - // S3 key pair validation was broken at one point, we need to remove the - // invalid key pairs here to ensure we don't fail when we update the - // setting below. - for k, v := range as.V4Keypairs { - if err := (api.S3AuthenticationSettings{V4Keypairs: map[string]string{k: v}}).Validate(); err != nil { - n.logger.Infof("removing invalid S3 keypair for AccessKeyID %s, reason: %v", k, err) - delete(as.V4Keypairs, k) - } - } - - // merge keys - for k, v := range n.cfg.S3.KeypairsV4 { - as.V4Keypairs[k] = v - } - // update settings - if err := n.bus.UpdateSetting(context.Background(), api.SettingS3Authentication, as); err != nil { - return fmt.Errorf("failed to update S3 authentication settings: %w", err) - } - } - // start S3 server if n.s3Srv != nil { go n.s3Srv.Serve(n.s3Listener) diff --git a/config/config.go b/config/config.go index 6755d3869..f74a765e8 100644 --- a/config/config.go +++ b/config/config.go @@ -24,7 +24,14 @@ type ( Worker Worker `yaml:"worker,omitempty"` S3 S3 `yaml:"s3,omitempty"` - Database Database `yaml:"database,omitempty"` + Database Database `yaml:"database,omitempty"` + Explorer ExplorerData `yaml:"explorer,omitempty"` + } + + // ExplorerData contains the configuration for using an external explorer. + ExplorerData struct { + Disable bool `yaml:"disable,omitempty"` + URL string `yaml:"url,omitempty"` } // HTTP contains the configuration for the HTTP server. @@ -41,7 +48,6 @@ type ( } Database struct { - Log DatabaseLog `yaml:"log,omitempty"` // deprecated. included for compatibility. // optional fields depending on backend MySQL MySQL `yaml:"mysql,omitempty"` } @@ -55,7 +61,6 @@ type ( RemotePassword string `yaml:"remotePassword,omitempty"` UsedUTXOExpiry time.Duration `yaml:"usedUtxoExpiry,omitempty"` SlabBufferCompletionThreshold int64 `yaml:"slabBufferCompleionThreshold,omitempty"` - PersistInterval time.Duration `yaml:"persistInterval,omitempty"` // deprecated } // LogFile configures the file output of the logger. @@ -76,7 +81,6 @@ type ( } Log struct { - Path string `yaml:"path,omitempty"` // deprecated. included for compatibility. Level string `yaml:"level,omitempty"` // global log level StdOut StdOut `yaml:"stdout,omitempty"` File LogFile `yaml:"file,omitempty"` @@ -104,12 +108,11 @@ type ( } S3 struct { - Address string `yaml:"address,omitempty"` - DisableAuth bool `yaml:"disableAuth,omitempty"` - Enabled bool `yaml:"enabled,omitempty"` - KeypairsV4 map[string]string `yaml:"keypairsV4,omitempty"` // deprecated. included for compatibility. - HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` - HostBucketBases []string `yaml:"hostBucketBases,omitempty"` + Address string `yaml:"address,omitempty"` + DisableAuth bool `yaml:"disableAuth,omitempty"` + Enabled bool `yaml:"enabled,omitempty"` + HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` + HostBucketBases []string `yaml:"hostBucketBases,omitempty"` } // Worker contains the configuration for a worker. diff --git a/go.mod b/go.mod index 908c20ad3..0f17e8970 100644 --- a/go.mod +++ b/go.mod @@ -8,14 +8,14 @@ require ( github.com/go-sql-driver/mysql v1.8.1 github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.20.0 - github.com/klauspost/reedsolomon v1.12.3 + github.com/klauspost/reedsolomon v1.12.4 github.com/mattn/go-sqlite3 v1.14.23 github.com/minio/minio-go/v7 v7.0.76 github.com/montanaflynn/stats v0.7.1 github.com/shopspring/decimal v1.4.0 go.sia.tech/core v0.4.6 go.sia.tech/coreutils v0.3.2 - go.sia.tech/gofakes3 v0.0.4 + go.sia.tech/gofakes3 v0.0.5 go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238 go.sia.tech/jape v0.12.1 go.sia.tech/mux v1.2.0 @@ -52,6 +52,6 @@ require ( golang.org/x/net v0.28.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.23.0 // indirect nhooyr.io/websocket v1.8.17 // indirect ) diff --git a/go.sum b/go.sum index f32b39629..7e34bf8ed 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ib github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/klauspost/reedsolomon v1.12.3 h1:tzUznbfc3OFwJaTebv/QdhnFf2Xvb7gZ24XaHLBPmdc= -github.com/klauspost/reedsolomon v1.12.3/go.mod h1:3K5rXwABAvzGeR01r6pWZieUALXO/Tq7bFKGIb4m4WI= +github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= +github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -76,8 +76,8 @@ go.sia.tech/core v0.4.6 h1:QLm97a7GWBonfnMEOokqWRAqsWCUPL7kzo6k3Adwx8E= go.sia.tech/core v0.4.6/go.mod h1:Zuq0Tn2aIXJyO0bjGu8cMeVWe+vwQnUfZhG1LCmjD5c= go.sia.tech/coreutils v0.3.2 h1:3gJqvs18n1FVZmcrnfIYyzS+rBu06OtIscDDAfUAYQI= go.sia.tech/coreutils v0.3.2/go.mod h1:woPVmN6GUpIKHdi71Hkb9goIbl7b45TquCsAyEzyxnI= -go.sia.tech/gofakes3 v0.0.4 h1:Kvo8j5cVdJRBXvV1KBJ69bocY23twG8ao/HCdwuPMeI= -go.sia.tech/gofakes3 v0.0.4/go.mod h1:6hh4lETCMbyFFNWp3FRE838geY6vh1Aeas7LtYDpQdc= +go.sia.tech/gofakes3 v0.0.5 h1:vFhVBUFbKE9ZplvLE2w4TQxFMQyF8qvgxV4TaTph+Vw= +go.sia.tech/gofakes3 v0.0.5/go.mod h1:LXEzwGw+OHysWLmagleCttX93cJZlT9rBu/icOZjQ54= go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238 h1:DP9o+TnNeS34EmxZ/zqZ4px3DgL8en/2RL4EsiSd4GU= go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238/go.mod h1:InmB5LdO6EP+ZW9uolUCO+zh+zVdbJF3iCgU7xokJxQ= go.sia.tech/jape v0.12.1 h1:xr+o9V8FO8ScRqbSaqYf9bjj1UJ2eipZuNcI1nYousU= @@ -97,8 +97,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= @@ -118,8 +118,8 @@ golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/bus/explorer.go b/internal/bus/explorer.go new file mode 100644 index 000000000..a4f7374d6 --- /dev/null +++ b/internal/bus/explorer.go @@ -0,0 +1,51 @@ +package bus + +import ( + "context" + "fmt" + "net/http" + + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" +) + +type ( + Explorer struct { + url string + } +) + +// NewExplorer returns a new Explorer. +func NewExplorer(url string) *Explorer { + return &Explorer{ + url: url, + } +} + +// BaseURL returns the base URL of the Explorer. +func (e *Explorer) BaseURL() string { + return e.url +} + +// Enabled returns true if the explorer is enabled. +func (e *Explorer) Enabled() bool { + return e.url != "" +} + +// SiacoinExchangeRate returns the exchange rate for the given currency. +func (e *Explorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { + // return early if the explorer is disabled + if !e.Enabled() { + return 0, api.ErrExplorerDisabled + } + + // create request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/exchange-rate/siacoin/%s", e.url, currency), http.NoBody) + if err != nil { + return 0, fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Accept", "application/json") + + _, _, err = utils.DoRequest(req, &rate) + return +} diff --git a/internal/bus/forex.go b/internal/bus/forex.go deleted file mode 100644 index 122056949..000000000 --- a/internal/bus/forex.go +++ /dev/null @@ -1,31 +0,0 @@ -package bus - -import ( - "context" - "fmt" - "net/http" - - "go.sia.tech/renterd/internal/utils" -) - -type ( - client struct { - url string - } -) - -func NewForexClient(url string) *client { - return &client{url: url} -} - -func (f *client) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { - // create request - req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/%s", f.url, currency), http.NoBody) - if err != nil { - return 0, fmt.Errorf("failed to create request: %w", err) - } - req.Header.Set("Accept", "application/json") - - _, _, err = utils.DoRequest(req, &rate) - return -} diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index 32e283812..0238b057b 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -2,7 +2,6 @@ package bus import ( "context" - "encoding/json" "errors" "fmt" "sync" @@ -13,29 +12,43 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" ) type ( + // An ExchangeRateExplorer retrieves exchange rate data about + // the SC token. + ExchangeRateExplorer interface { + Enabled() bool + BaseURL() string + SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) + } + Store interface { Autopilot(ctx context.Context, id string) (api.Autopilot, error) - Setting(ctx context.Context, key string) (string, error) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error - UpdateSetting(ctx context.Context, key, value string) error + + GougingSettings(ctx context.Context) (api.GougingSettings, error) + UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error + + PinnedSettings(ctx context.Context) (api.PinnedSettings, error) + UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error } ) type ( pinManager struct { a alerts.Alerter + e ExchangeRateExplorer s Store broadcaster webhooks.Broadcaster updateInterval time.Duration rateWindow time.Duration - triggerChan chan struct{} + triggerChan chan bool closedChan chan struct{} wg sync.WaitGroup @@ -50,9 +63,10 @@ type ( // NewPinManager returns a new PinManager, responsible for pinning prices to a // fixed value in an underlying currency. The returned pin manager is already // running and can be stopped by calling Shutdown. -func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, s Store, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { +func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, e ExchangeRateExplorer, s Store, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { pm := &pinManager{ a: alerts, + e: e, s: s, broadcaster: broadcaster, @@ -61,16 +75,19 @@ func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, s St updateInterval: updateInterval, rateWindow: rateWindow, - triggerChan: make(chan struct{}, 1), + triggerChan: make(chan bool, 1), closedChan: make(chan struct{}), } // start the pin manager - pm.wg.Add(1) - go func() { - pm.run() - pm.wg.Done() - }() + if e.Enabled() { + pm.wg.Add(1) + go func() { + pm.run() + pm.wg.Done() + }() + } + return pm } @@ -93,7 +110,7 @@ func (pm *pinManager) Shutdown(ctx context.Context) error { func (pm *pinManager) TriggerUpdate() { select { - case pm.triggerChan <- struct{}{}: + case pm.triggerChan <- true: default: } } @@ -106,16 +123,6 @@ func (pm *pinManager) averageRate() decimal.Decimal { return decimal.NewFromFloat(median) } -func (pm *pinManager) pinnedSettings(ctx context.Context) (api.PricePinSettings, error) { - var ps api.PricePinSettings - if pss, err := pm.s.Setting(ctx, api.SettingPricePinning); err != nil { - return api.PricePinSettings{}, err - } else if err := json.Unmarshal([]byte(pss), &ps); err != nil { - pm.logger.Panicf("failed to unmarshal pinned settings '%s': %v", pss, err) - } - return ps, nil -} - func (pm *pinManager) rateExceedsThreshold(threshold float64) bool { pm.mu.Lock() defer pm.mu.Unlock() @@ -168,8 +175,7 @@ func (pm *pinManager) run() { select { case <-pm.closedChan: return - case <-pm.triggerChan: - forced = true + case forced = <-pm.triggerChan: case <-t.C: } } @@ -241,20 +247,19 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin var updated bool // fetch gouging settings - var gs api.GougingSettings - if gss, err := pm.s.Setting(ctx, api.SettingGouging); err != nil { - return err - } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { - pm.logger.Panicf("failed to unmarshal gouging settings '%s': %v", gss, err) + gs, err := pm.s.GougingSettings(ctx) + if errors.Is(err, sql.ErrSettingNotFound) { + gs = api.DefaultGougingSettings + } else if err != nil { return err } // update max download price if pins.MaxDownload.IsPinned() { - update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxDownload.Value), rate) + maxDownloadCurr, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxDownload.Value), rate) if err != nil { pm.logger.Warn("failed to convert max download price to currency") - } else if !gs.MaxDownloadPrice.Equals(update) { + } else if update := maxDownloadCurr.Div64(1e12); !gs.MaxDownloadPrice.Equals(update) { gs.MaxDownloadPrice = update pm.logger.Infow("updating max download price", "old", gs.MaxDownloadPrice, "new", update, "rate", rate) updated = true @@ -275,10 +280,10 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin // update max upload price if pins.MaxUpload.IsPinned() { - update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxUpload.Value), rate) + maxUploadCurr, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxUpload.Value), rate) if err != nil { pm.logger.Warnw("failed to convert max upload price to currency", zap.Error(err)) - } else if !gs.MaxUploadPrice.Equals(update) { + } else if update := maxUploadCurr.Div64(1e12); !gs.MaxUploadPrice.Equals(update) { pm.logger.Infow("updating max upload price", "old", gs.MaxUploadPrice, "new", update, "rate", rate) gs.MaxUploadPrice = update updated = true @@ -292,15 +297,14 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin } // validate settings - err := gs.Validate() + err = gs.Validate() if err != nil { pm.logger.Warnw("failed to update gouging setting, new settings make the setting invalid", zap.Error(err)) return err } // update settings - bytes, _ := json.Marshal(gs) - err = pm.s.UpdateSetting(ctx, api.SettingGouging, string(bytes)) + err = pm.s.UpdateGougingSettings(ctx, gs) // broadcast event if err == nil { @@ -308,9 +312,8 @@ func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.Gougin Module: api.ModuleSetting, Event: api.EventUpdate, Payload: api.EventSettingUpdate{ - Key: api.SettingGouging, - Update: string(bytes), - Timestamp: time.Now().UTC(), + GougingSettings: &gs, + Timestamp: time.Now().UTC(), }, }) } @@ -322,19 +325,21 @@ func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { pm.logger.Debugw("updating prices", zap.Bool("forced", forced)) // fetch pinned settings - settings, err := pm.pinnedSettings(ctx) - if errors.Is(err, api.ErrSettingNotFound) { - pm.logger.Debug("price pinning not configured, skipping price update") - return nil + settings, err := pm.s.PinnedSettings(ctx) + if errors.Is(err, sql.ErrSettingNotFound) { + settings = api.DefaultPinnedSettings } else if err != nil { return fmt.Errorf("failed to fetch pinned settings: %w", err) - } else if !settings.Enabled { - pm.logger.Debug("price pinning is disabled, skipping price update") + } + + // check if pinning is enabled + if !settings.Enabled() { + pm.logger.Debug("no pinned settings, skipping price update") return nil } // fetch exchange rate - rate, err := NewForexClient(settings.ForexEndpointURL).SiacoinExchangeRate(ctx, settings.Currency) + rate, err := pm.e.SiacoinExchangeRate(ctx, settings.Currency) if err != nil { return fmt.Errorf("failed to fetch exchange rate for '%s': %w", settings.Currency, err) } else if rate <= 0 { diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go index e5158836d..d7800ed71 100644 --- a/internal/bus/pinmanager_test.go +++ b/internal/bus/pinmanager_test.go @@ -4,8 +4,6 @@ import ( "context" "encoding/json" "errors" - "net/http" - "net/http/httptest" "reflect" "sync" "testing" @@ -71,62 +69,56 @@ func (meb *mockBroadcaster) BroadcastAction(ctx context.Context, e webhooks.Even return nil } -type mockForexAPI struct { - s *httptest.Server - +type mockExplorer struct { mu sync.Mutex rate float64 unreachable bool } -func newTestForexAPI() *mockForexAPI { - api := &mockForexAPI{rate: 1} - api.s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - api.mu.Lock() - defer api.mu.Unlock() - if api.unreachable { - w.WriteHeader(http.StatusInternalServerError) - return - } - json.NewEncoder(w).Encode(api.rate) - })) - return api +func (e *mockExplorer) Enabled() bool { + return true } -func (api *mockForexAPI) Close() { - api.s.Close() +func (e *mockExplorer) BaseURL() string { + return "" } -func (api *mockForexAPI) setRate(rate float64) { - api.mu.Lock() - defer api.mu.Unlock() - api.rate = rate +func (e *mockExplorer) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { + e.mu.Lock() + defer e.mu.Unlock() + + if e.unreachable { + return 0, errors.New("unreachable") + } + return e.rate, nil } -func (api *mockForexAPI) setUnreachable(unreachable bool) { - api.mu.Lock() - defer api.mu.Unlock() - api.unreachable = unreachable +func (e *mockExplorer) setRate(rate float64) { + e.mu.Lock() + defer e.mu.Unlock() + e.rate = rate +} + +func (e *mockExplorer) setUnreachable(unreachable bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.unreachable = unreachable } type mockPinStore struct { mu sync.Mutex - settings map[string]string + gs api.GougingSettings + ps api.PinnedSettings autopilots map[string]api.Autopilot } func newTestStore() *mockPinStore { s := &mockPinStore{ autopilots: make(map[string]api.Autopilot), - settings: make(map[string]string), + gs: api.DefaultGougingSettings, + ps: api.DefaultPinnedSettings, } - // add default price pin - and gouging settings - b, _ := json.Marshal(api.DefaultPricePinSettings) - s.settings[api.SettingPricePinning] = string(b) - b, _ = json.Marshal(api.DefaultGougingSettings) - s.settings[api.SettingGouging] = string(b) - // add default autopilot s.autopilots[testAutopilotID] = api.Autopilot{ ID: testAutopilotID, @@ -140,34 +132,38 @@ func newTestStore() *mockPinStore { return s } -func (ms *mockPinStore) gougingSettings() api.GougingSettings { - val, err := ms.Setting(context.Background(), api.SettingGouging) - if err != nil { - panic(err) - } - var gs api.GougingSettings - if err := json.Unmarshal([]byte(val), &gs); err != nil { - panic(err) - } - return gs +func (ms *mockPinStore) GougingSettings(ctx context.Context) (api.GougingSettings, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + return ms.gs, nil } -func (ms *mockPinStore) updatPinnedSettings(pps api.PricePinSettings) { - b, _ := json.Marshal(pps) - ms.UpdateSetting(context.Background(), api.SettingPricePinning, string(b)) - time.Sleep(2 * testUpdateInterval) +func (ms *mockPinStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + ms.mu.Lock() + defer ms.mu.Unlock() + ms.gs = gs + return nil } -func (ms *mockPinStore) Setting(ctx context.Context, key string) (string, error) { +func (ms *mockPinStore) PinnedSettings(ctx context.Context) (api.PinnedSettings, error) { ms.mu.Lock() defer ms.mu.Unlock() - return ms.settings[key], nil + return ms.ps, nil } -func (ms *mockPinStore) UpdateSetting(ctx context.Context, key, value string) error { +func (ms *mockPinStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { ms.mu.Lock() defer ms.mu.Unlock() - ms.settings[key] = value + + b, err := json.Marshal(ps) + if err != nil { + return err + } + var cloned api.PinnedSettings + if err := json.Unmarshal(b, &cloned); err != nil { + return err + } + ms.ps = cloned return nil } @@ -186,95 +182,86 @@ func (ms *mockPinStore) UpdateAutopilot(ctx context.Context, autopilot api.Autop func TestPinManager(t *testing.T) { // mock dependencies - ms := newTestStore() - eb := &mockBroadcaster{} a := &mockAlerter{} - - // mock forex api - forex := newTestForexAPI() - defer forex.Close() + b := &mockBroadcaster{} + e := &mockExplorer{rate: 1} + s := newTestStore() // create a pinmanager - pm := NewPinManager(a, eb, ms, testUpdateInterval, time.Minute, zap.NewNop()) + pm := NewPinManager(a, b, e, s, testUpdateInterval, time.Minute, zap.NewNop()) defer func() { if err := pm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - // define a small helper to fetch the price manager's rates - rates := func() []float64 { + // waitForUpdate waits for the price manager to update + waitForUpdate := func() { t.Helper() - pm.mu.Lock() - defer pm.mu.Unlock() - return pm.rates - } - - // assert price manager is disabled by default - if cnt := len(rates()); cnt != 0 { - t.Fatalf("expected no rates, got %d", cnt) + pm.triggerChan <- false + time.Sleep(testUpdateInterval) } // enable price pinning - pps := api.DefaultPricePinSettings - pps.Enabled = true - pps.Currency = "usd" - pps.Threshold = 0.5 - pps.ForexEndpointURL = forex.s.URL - ms.updatPinnedSettings(pps) - - // assert price manager is running now - if cnt := len(rates()); cnt < 1 { - t.Fatal("expected at least one rate") - } + ps := api.DefaultPinnedSettings + ps.Currency = "usd" + ps.Threshold = 0.5 + s.UpdatePinnedSettings(context.Background(), ps) - // update exchange rate and fetch current gouging settings - forex.setRate(2.5) - gs := ms.gougingSettings() + // fetch current gouging settings + gs, _ := s.GougingSettings(context.Background()) // configure all pins but disable them for now - pps.GougingSettingsPins.MaxDownload = api.Pin{Value: 3, Pinned: false} - pps.GougingSettingsPins.MaxStorage = api.Pin{Value: 3, Pinned: false} - pps.GougingSettingsPins.MaxUpload = api.Pin{Value: 3, Pinned: false} - ms.updatPinnedSettings(pps) + ps.GougingSettingsPins.MaxDownload = api.Pin{Value: 3, Pinned: false} + ps.GougingSettingsPins.MaxStorage = api.Pin{Value: 3, Pinned: false} + ps.GougingSettingsPins.MaxUpload = api.Pin{Value: 3, Pinned: false} + s.UpdatePinnedSettings(context.Background(), ps) // assert gouging settings are unchanged - if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + if gss, _ := s.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { t.Fatalf("expected gouging settings to be the same, got %v", gss) } - // enable the max download pin, with the threshold at 0.5 it should remain unchanged - pps.GougingSettingsPins.MaxDownload.Pinned = true - ms.updatPinnedSettings(pps) - if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { - t.Fatalf("expected gouging settings to be the same, got %v", gss) + // enable the max download pin + ps.GougingSettingsPins.MaxDownload.Pinned = true + s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() + + // assert prices are not updated + if gss, _ := s.GougingSettings(context.Background()); !reflect.DeepEqual(gs, gss) { + t.Fatalf("expected gouging settings to be the same, got %v expected %v", gss, gs) } - // lower the threshold, gouging settings should be updated - pps.Threshold = 0.05 - ms.updatPinnedSettings(pps) - if gss := ms.gougingSettings(); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { + // adjust and lower the threshold + e.setRate(1.5) + ps.Threshold = 0.05 + s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() + + // assert prices are updated + if gss, _ := s.GougingSettings(context.Background()); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) } // enable the rest of the pins - pps.GougingSettingsPins.MaxDownload.Pinned = true - pps.GougingSettingsPins.MaxStorage.Pinned = true - pps.GougingSettingsPins.MaxUpload.Pinned = true - ms.updatPinnedSettings(pps) + ps.GougingSettingsPins.MaxDownload.Pinned = true + ps.GougingSettingsPins.MaxStorage.Pinned = true + ps.GougingSettingsPins.MaxUpload.Pinned = true + s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() // assert they're all updated - if gss := ms.gougingSettings(); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || + if gss, _ := s.GougingSettings(context.Background()); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || gss.MaxStoragePrice.Equals(gs.MaxStoragePrice) || gss.MaxUploadPrice.Equals(gs.MaxUploadPrice) { t.Fatalf("expected gouging settings to be updated, got %v = %v", gss, gs) } // increase rate so average isn't catching up to us - forex.setRate(3) + e.setRate(3) // fetch autopilot - ap, _ := ms.Autopilot(context.Background(), testAutopilotID) + ap, _ := s.Autopilot(context.Background(), testAutopilotID) // add autopilot pin, but disable it pins := api.AutopilotPins{ @@ -283,39 +270,41 @@ func TestPinManager(t *testing.T) { Value: 2, }, } - pps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} - ms.updatPinnedSettings(pps) + ps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} + s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() // assert autopilot was not updated - if app, _ := ms.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { + if app, _ := s.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { t.Fatalf("expected autopilot to not be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) } // enable the pin pins.Allowance.Pinned = true - pps.Autopilots[testAutopilotID] = pins - ms.updatPinnedSettings(pps) + ps.Autopilots[testAutopilotID] = pins + s.UpdatePinnedSettings(context.Background(), ps) + waitForUpdate() // assert autopilot was updated - if app, _ := ms.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { + if app, _ := s.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { t.Fatalf("expected autopilot to be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) } - // make forex API return an error - forex.setUnreachable(true) + // make explorer return an error + e.setUnreachable(true) + waitForUpdate() // assert alert was registered - ms.updatPinnedSettings(pps) res, _ := a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) == 0 { t.Fatalf("expected 1 alert, got %d", len(a.alerts)) } - // make forex API return a valid response - forex.setUnreachable(false) + // make explorer return a valid response + e.setUnreachable(false) + waitForUpdate() // assert alert was dismissed - ms.updatPinnedSettings(pps) res, _ = a.Alerts(context.Background(), alerts.AlertsOpts{}) if len(res.Alerts) != 0 { t.Fatalf("expected 0 alerts, got %d", len(a.alerts)) diff --git a/internal/gouging/gouging.go b/internal/gouging/gouging.go index aadfdd57f..f2fc11dec 100644 --- a/internal/gouging/gouging.go +++ b/internal/gouging/gouging.go @@ -13,8 +13,6 @@ import ( ) const ( - bytesPerTB = 1e12 - // maxBaseRPCPriceVsBandwidth is the max ratio for sane pricing between the // MinBaseRPCPrice and the MinDownloadBandwidthPrice. This ensures that 1 // million base RPC charges are at most 1% of the cost to download 4TB. This @@ -190,7 +188,11 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t } // check LatestRevisionCost - expect sane value - maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(gs.MaxDownloadPrice.Div64(bytesPerTB).Mul64(2048)) + twoKiBMax, overflow := gs.MaxDownloadPrice.Mul64WithOverflow(2048) + if overflow { + twoKiBMax = types.MaxCurrency + } + maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(twoKiBMax) if overflow { maxRevisionCost = types.MaxCurrency } @@ -292,12 +294,9 @@ func checkPruneGougingRHPv2(gs api.GougingSettings, hs *rhpv2.HostSettings) erro if overflow { return fmt.Errorf("%w: overflow detected when computing sector download price", ErrHostSettingsGouging) } - dpptb, overflow := sectorDownloadPrice.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TB - if overflow { - return fmt.Errorf("%w: overflow detected when computing download price per TiB", ErrHostSettingsGouging) - } - if !gs.MaxDownloadPrice.IsZero() && dpptb.Cmp(gs.MaxDownloadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max dl price: %v > %v", ErrHostSettingsGouging, dpptb, gs.MaxDownloadPrice) + dppb := sectorDownloadPrice.Div64(rhpv2.SectorSize) + if !gs.MaxDownloadPrice.IsZero() && dppb.Cmp(gs.MaxDownloadPrice) > 0 { + return fmt.Errorf("%w: cost per byte exceeds max dl price: %v > %v", ErrHostSettingsGouging, dppb, gs.MaxDownloadPrice) } return nil } @@ -310,12 +309,9 @@ func checkDownloadGougingRHPv3(gs api.GougingSettings, pt *rhpv3.HostPriceTable) if overflow { return fmt.Errorf("%w: overflow detected when computing sector download price", ErrPriceTableGouging) } - dpptb, overflow := sectorDownloadPrice.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TiB - if overflow { - return fmt.Errorf("%w: overflow detected when computing download price per TiB", ErrPriceTableGouging) - } - if !gs.MaxDownloadPrice.IsZero() && dpptb.Cmp(gs.MaxDownloadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max dl price: %v > %v", ErrPriceTableGouging, dpptb, gs.MaxDownloadPrice) + dppb := sectorDownloadPrice.Div64(rhpv2.SectorSize) + if !gs.MaxDownloadPrice.IsZero() && dppb.Cmp(gs.MaxDownloadPrice) > 0 { + return fmt.Errorf("%w: cost per byte exceeds max dl price: %v > %v", ErrPriceTableGouging, dppb, gs.MaxDownloadPrice) } return nil } @@ -328,12 +324,9 @@ func checkUploadGougingRHPv3(gs api.GougingSettings, pt *rhpv3.HostPriceTable) e if overflow { return fmt.Errorf("%w: overflow detected when computing sector price", ErrPriceTableGouging) } - uploadPrice, overflow := sectorUploadPricePerMonth.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TiB - if overflow { - return fmt.Errorf("%w: overflow detected when computing upload price per TiB", ErrPriceTableGouging) - } + uploadPrice := sectorUploadPricePerMonth.Div64(rhpv2.SectorSize) if !gs.MaxUploadPrice.IsZero() && uploadPrice.Cmp(gs.MaxUploadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max ul price: %v > %v", ErrPriceTableGouging, uploadPrice, gs.MaxUploadPrice) + return fmt.Errorf("%w: cost per byte exceeds max ul price: %v > %v", ErrPriceTableGouging, uploadPrice, gs.MaxUploadPrice) } return nil } diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index 441297c34..66611a528 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -28,6 +28,7 @@ type ( MainMigrator interface { Migrator MakeDirsForPath(ctx context.Context, tx Tx, path string) (int64, error) + UpdateSetting(ctx context.Context, tx Tx, key, value string) error } ) @@ -218,9 +219,27 @@ var ( }, }, { - ID: "00018_archived_contracts", + ID: "00018_gouging_units", Migrate: func(tx Tx) error { - return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00018_archived_contracts", log) + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00018_gouging_units", log) + }, + }, + { + ID: "00019_settings", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00019_settings", log) + }, + }, + { + ID: "00020_idx_db_directory", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00020_idx_db_directory", log) + }, + }, + { + ID: "00021_archived_contracts", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00021_archived_contracts", log) }, }, } diff --git a/internal/test/config.go b/internal/test/config.go index 64dc98c7f..b33c2b0d6 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -33,10 +33,7 @@ var ( }, } - ContractSet = "testset" - ContractSetSettings = api.ContractSetSetting{ - Default: ContractSet, - } + ContractSet = "testset" GougingSettings = api.GougingSettings{ MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC @@ -52,13 +49,18 @@ var ( MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC } - PricePinSettings = api.DefaultPricePinSettings + PricePinSettings = api.DefaultPinnedSettings RedundancySettings = api.RedundancySettings{ MinShards: 2, TotalShards: 3, } + UploadSettings = api.UploadSettings{ + DefaultContractSet: ContractSet, + Redundancy: RedundancySettings, + } + S3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" S3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" S3Credentials = credentials.NewStaticV4(S3AccessKeyID, S3SecretAccessKey, "") diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 94659b277..c82283726 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -121,7 +121,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -146,7 +146,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -156,7 +156,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 332215159..607dc7cd7 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -486,18 +486,25 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { })) } - // Update the bus settings. - tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, test.GougingSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, test.ContractSetSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingPricePinning, test.PricePinSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, test.RedundancySettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingS3Authentication, api.S3AuthenticationSettings{ - V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, - })) - tt.OK(busClient.UpdateSetting(ctx, api.SettingUploadPacking, api.UploadPackingSettings{ + // Build upload settings. + us := test.UploadSettings + us.Packing = api.UploadPackingSettings{ Enabled: enableUploadPacking, - SlabBufferMaxSizeSoft: api.DefaultUploadPackingSettings.SlabBufferMaxSizeSoft, - })) + SlabBufferMaxSizeSoft: 1 << 32, // 4 GiB, + } + + // Build S3 settings. + s3 := api.S3Settings{ + Authentication: api.S3AuthenticationSettings{ + V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, + }, + } + + // Update the bus settings. + tt.OK(busClient.UpdateGougingSettings(ctx, test.GougingSettings)) + tt.OK(busClient.UpdatePinnedSettings(ctx, test.PricePinSettings)) + tt.OK(busClient.UpdateUploadSettings(ctx, us)) + tt.OK(busClient.UpdateS3Settings(ctx, s3)) // Fund the bus. if funding { @@ -538,13 +545,14 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { } func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, pk types.PrivateKey, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, *chain.Manager, bus.Store, error) { - // create store + // create store config alertsMgr := alerts.NewManager() storeCfg, err := buildStoreConfig(alertsMgr, dir, cfg.SlabBufferCompletionThreshold, cfgDb, pk, logger) if err != nil { return nil, nil, nil, nil, err } + // create store sqlStore, err := stores.NewSQLStore(storeCfg) if err != nil { return nil, nil, nil, nil, err @@ -631,7 +639,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create bus announcementMaxAgeHours := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, "", logger) if err != nil { return nil, nil, nil, nil, err } @@ -736,6 +744,13 @@ func (c *TestCluster) sync() { return fmt.Errorf("subscriber hasn't caught up, %d < %d", cs.BlockHeight, tip.Height) } + wallet, err := c.Bus.Wallet(context.Background()) + if err != nil { + return err + } else if wallet.ScanHeight < tip.Height { + return fmt.Errorf("wallet hasn't caught up, %d < %d", wallet.ScanHeight, tip.Height) + } + for _, h := range c.hosts { if hh := h.cm.Tip().Height; hh < tip.Height { return fmt.Errorf("host %v is not synced, %v < %v", h.PublicKey(), hh, cs.BlockHeight) @@ -778,8 +793,8 @@ func (c *TestCluster) WaitForContracts() []api.Contract { // fetch all contracts resp, err := c.Worker.Contracts(context.Background(), time.Minute) c.tt.OK(err) - if resp.Error != "" { - c.tt.Fatal(resp.Error) + if len(resp.Errors) > 0 { + c.tt.Fatal(resp.Errors) } return resp.Contracts } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 3dc5ac5e0..bd00734ca 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -34,7 +34,7 @@ import ( "lukechampine.com/frand" ) -func TestListObjects(t *testing.T) { +func TestListObjectsWithNoDelimiter(t *testing.T) { if testing.Short() { t.SkipNow() } @@ -45,13 +45,13 @@ func TestListObjects(t *testing.T) { assertMetadata := func(entries []api.ObjectMetadata) { for i := range entries { // assert mod time - if !strings.HasSuffix(entries[i].Name, "/") && !entries[i].ModTime.Std().After(start.UTC()) { + if !strings.HasSuffix(entries[i].Key, "/") && !entries[i].ModTime.Std().After(start.UTC()) { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file + isDir := strings.HasSuffix(entries[i].Key, "/") && entries[i].Key != "//double/" // double is a file if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -77,7 +77,7 @@ func TestListObjects(t *testing.T) { // upload the following paths uploads := []struct { - path string + key string size int }{ {"/foo/bar", 1}, @@ -90,11 +90,11 @@ func TestListObjects(t *testing.T) { for _, upload := range uploads { if upload.size == 0 { - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } else { data := make([]byte, upload.size) frand.Read(data) - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } } @@ -104,21 +104,20 @@ func TestListObjects(t *testing.T) { sortDir string want []api.ObjectMetadata }{ - {"/", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, - {"/", api.ObjectSortByHealth, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", api.ObjectSortByHealth, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/foo/b", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/", "", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.SortDirAsc, []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.SortDirDesc, []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/FOO/bar", Size: 6, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.SortDirAsc, []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.SortDirDesc, []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/foo/b", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, {"o/baz/quu", "", "", []api.ObjectMetadata{}}, - {"/foo", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/foo", api.ObjectSortBySize, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/foo", api.ObjectSortBySize, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, + {"/foo", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.SortDirAsc, []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.SortDirDesc, []api.ObjectMetadata{{Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}}}, } for _, test := range tests { // use the bus client - res, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: test.prefix, + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.prefix, api.ListObjectOptions{ SortBy: test.sortBy, SortDir: test.sortDir, Limit: -1, @@ -136,8 +135,7 @@ func TestListObjects(t *testing.T) { if len(res.Objects) > 0 { marker := "" for offset := 0; offset < len(test.want); offset++ { - res, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ - Prefix: test.prefix, + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.prefix, api.ListObjectOptions{ SortBy: test.sortBy, SortDir: test.sortDir, Marker: marker, @@ -153,8 +151,8 @@ func TestListObjects(t *testing.T) { got := res.Objects if len(got) != 1 { t.Fatalf("expected 1 object, got %v", len(got)) - } else if got[0].Name != test.want[offset].Name { - t.Fatalf("expected %v, got %v, offset %v, marker %v, sortBy %v, sortDir %v", test.want[offset].Name, got[0].Name, offset, marker, test.sortBy, test.sortDir) + } else if got[0].Key != test.want[offset].Key { + t.Fatalf("expected %v, got %v, offset %v, marker %v, sortBy %v, sortDir %v", test.want[offset].Key, got[0].Key, offset, marker, test.sortBy, test.sortDir) } marker = res.NextMarker } @@ -162,7 +160,7 @@ func TestListObjects(t *testing.T) { } // list invalid marker - _, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + _, err := b.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{ Marker: "invalid", SortBy: api.ObjectSortByHealth, }) @@ -180,27 +178,25 @@ func TestNewTestCluster(t *testing.T) { tt := cluster.tt // Upload packing should be disabled by default. - ups, err := b.UploadPackingSettings(context.Background()) + us, err := b.UploadSettings(context.Background()) tt.OK(err) - if ups.Enabled { - t.Fatalf("expected upload packing to be disabled by default, got %v", ups.Enabled) + if us.Packing.Enabled { + t.Fatalf("expected upload packing to be disabled by default, got %v", us.Packing.Enabled) } - // PricePinningSettings should have default values - pps, err := b.PricePinningSettings(context.Background()) + // PinnedSettings should have default values + ps, err := b.PinnedSettings(context.Background()) tt.OK(err) - if pps.ForexEndpointURL == "" { - t.Fatal("expected default value for ForexEndpointURL") - } else if pps.Currency == "" { + if ps.Currency == "" { t.Fatal("expected default value for Currency") - } else if pps.Threshold == 0 { + } else if ps.Threshold == 0 { t.Fatal("expected default value for Threshold") } // Autopilot shouldn't have its prices pinned - if len(pps.Autopilots) != 1 { - t.Fatalf("expected 1 autopilot, got %v", len(pps.Autopilots)) - } else if pin, exists := pps.Autopilots[api.DefaultAutopilotID]; !exists { + if len(ps.Autopilots) != 1 { + t.Fatalf("expected 1 autopilot, got %v", len(ps.Autopilots)) + } else if pin, exists := ps.Autopilots[api.DefaultAutopilotID]; !exists { t.Fatalf("expected autopilot %v to exist", api.DefaultAutopilotID) } else if pin.Allowance != (api.Pin{}) { t.Fatalf("expected autopilot %v to have no pinned allowance, got %v", api.DefaultAutopilotID, pin.Allowance) @@ -292,67 +288,68 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) for _, host := range hosts { - hi, err := cluster.Autopilot.HostInfo(host.PublicKey) + hi, err := cluster.Bus.Host(context.Background(), host.PublicKey) if err != nil { t.Fatal(err) - } - if hi.Checks.ScoreBreakdown.Score() == 0 { - js, _ := json.MarshalIndent(hi.Checks.ScoreBreakdown, "", " ") + } else if checks := hi.Checks[testApCfg().ID]; checks == (api.HostCheck{}) { + t.Fatal("host check not found") + } else if checks.ScoreBreakdown.Score() == 0 { + js, _ := json.MarshalIndent(checks.ScoreBreakdown, "", " ") t.Fatalf("score shouldn't be 0 because that means one of the fields was 0: %s", string(js)) - } - if hi.Checks.Score == 0 { - t.Fatal("score shouldn't be 0") - } - if !hi.Checks.Usable { + } else if !checks.UsabilityBreakdown.IsUsable() { t.Fatal("host should be usable") - } - if len(hi.Checks.UnusableReasons) != 0 { + } else if len(checks.UsabilityBreakdown.UnusableReasons()) != 0 { t.Fatal("usable hosts don't have any reasons set") - } - if reflect.DeepEqual(hi.Host, api.Host{}) { + } else if reflect.DeepEqual(hi, api.Host{}) { t.Fatal("host wasn't set") - } - if hi.Host.Settings.Release == "" { + } else if hi.Settings.Release == "" { t.Fatal("release should be set") } } - hostInfos, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + hostInfos, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + }) tt.OK(err) allHosts := make(map[types.PublicKey]struct{}) for _, hi := range hostInfos { - if hi.Checks.ScoreBreakdown.Score() == 0 { - js, _ := json.MarshalIndent(hi.Checks.ScoreBreakdown, "", " ") + if checks := hi.Checks[testApCfg().ID]; checks == (api.HostCheck{}) { + t.Fatal("host check not found") + } else if checks.ScoreBreakdown.Score() == 0 { + js, _ := json.MarshalIndent(checks.ScoreBreakdown, "", " ") t.Fatalf("score shouldn't be 0 because that means one of the fields was 0: %s", string(js)) - } - if hi.Checks.Score == 0 { - t.Fatal("score shouldn't be 0") - } - if !hi.Checks.Usable { + } else if !checks.UsabilityBreakdown.IsUsable() { t.Fatal("host should be usable") - } - if len(hi.Checks.UnusableReasons) != 0 { + } else if len(checks.UsabilityBreakdown.UnusableReasons()) != 0 { t.Fatal("usable hosts don't have any reasons set") - } - if reflect.DeepEqual(hi.Host, api.Host{}) { + } else if reflect.DeepEqual(hi, api.Host{}) { t.Fatal("host wasn't set") } - allHosts[hi.Host.PublicKey] = struct{}{} + allHosts[hi.PublicKey] = struct{}{} } - hostInfosUnusable, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) + hostInfosUnusable, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + AutopilotID: testApCfg().ID, + FilterMode: api.UsabilityFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUnusable, + }) tt.OK(err) if len(hostInfosUnusable) != 0 { t.Fatal("there should be no unusable hosts", len(hostInfosUnusable)) } - hostInfosUsable, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + hostInfosUsable, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{ + AutopilotID: testApCfg().ID, + FilterMode: api.UsabilityFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUsable, + }) tt.OK(err) for _, hI := range hostInfosUsable { - delete(allHosts, hI.Host.PublicKey) + delete(allHosts, hI.PublicKey) } if len(hostInfosUsable) != len(hostInfos) || len(allHosts) != 0 { t.Fatalf("result for 'usable' should match the result for 'all', \n\nall: %+v \n\nusable: %+v", hostInfos, hostInfosUsable) @@ -361,28 +358,26 @@ func TestNewTestCluster(t *testing.T) { // Fetch the autopilot state state, err := cluster.Autopilot.State() tt.OK(err) - if time.Time(state.StartTime).IsZero() { + if state.ID != api.DefaultAutopilotID { + t.Fatal("autopilot should have default id", state.ID) + } else if time.Time(state.StartTime).IsZero() { t.Fatal("autopilot should have start time") - } - if time.Time(state.MigratingLastStart).IsZero() { + } else if time.Time(state.MigratingLastStart).IsZero() { t.Fatal("autopilot should have completed a migration") - } - if time.Time(state.ScanningLastStart).IsZero() { + } else if time.Time(state.ScanningLastStart).IsZero() { t.Fatal("autopilot should have completed a scan") - } - if state.UptimeMS == 0 { + } else if state.UptimeMS == 0 { t.Fatal("uptime should be set") - } - if !state.Configured { + } else if !state.Configured { t.Fatal("autopilot should be configured") } } -// TestObjectEntries is an integration test that verifies objects are uploaded, -// download and deleted from and to the paths we would expect. It is similar to -// the TestObjectEntries unit test, but uses the worker and bus client to verify -// paths are passed correctly. -func TestObjectEntries(t *testing.T) { +// TestListObjectsWithDelimiterSlash is an integration test that verifies +// objects are uploaded, download and deleted from and to the paths we +// would expect. It is similar to the TestObjectEntries unit test, but uses +// the worker and bus client to verify paths are passed correctly. +func TestListObjectsWithDelimiterSlash(t *testing.T) { if testing.Short() { t.SkipNow() } @@ -393,13 +388,13 @@ func TestObjectEntries(t *testing.T) { assertMetadata := func(entries []api.ObjectMetadata) { for i := range entries { // assert mod time - if !strings.HasSuffix(entries[i].Name, "/") && !entries[i].ModTime.Std().After(start.UTC()) { + if !strings.HasSuffix(entries[i].Key, "/") && !entries[i].ModTime.Std().After(start.UTC()) { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file + isDir := strings.HasSuffix(entries[i].Key, "/") && entries[i].Key != "//double/" // double is a file if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -425,7 +420,7 @@ func TestObjectEntries(t *testing.T) { // upload the following paths uploads := []struct { - path string + key string size int }{ {"/foo/bar", 1}, @@ -441,11 +436,11 @@ func TestObjectEntries(t *testing.T) { for _, upload := range uploads { if upload.size == 0 { - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } else { data := make([]byte, upload.size) frand.Read(data) - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.key, api.UploadObjectOptions{})) } } @@ -456,59 +451,61 @@ func TestObjectEntries(t *testing.T) { sortDir string want []api.ObjectMetadata }{ - {"/", "", "", "", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"//", "", "", "", []api.ObjectMetadata{{Name: "///", Size: 8, Health: 1}, {Name: "//double/", Size: 7, Health: 1}}}, - {"///", "", "", "", []api.ObjectMetadata{{Name: "///triple", Size: 8, Health: 1}}}, - {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: 1}}}, - {"/FOO/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 9, Health: 1}}}, - {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, - - {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}}}, + {"/", "", "", "", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"//", "", "", "", []api.ObjectMetadata{{Key: "///", Size: 8, Health: 1}, {Key: "//double/", Size: 7, Health: 1}}}, + {"///", "", "", "", []api.ObjectMetadata{{Key: "///triple", Size: 8, Health: 1}}}, + {"/foo/", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/", Size: 7, Health: 1}}}, + {"/FOO/", "", "", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 9, Health: 1}}}, + {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/gab/", "", "", "", []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, + + {"/", "f", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}}}, {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, - {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}}}, + {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}}}, {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, - {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "//", Size: 15, Health: 1}}}, + {"/", "", "name", "ASC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "name", "DESC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "//", Size: 15, Health: 1}}}, - {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "ASC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "DESC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "//", Size: 15, Health: 1}}}, - {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "//", Size: 15, Health: 1}, {Name: "/foo/", Size: 10, Health: 1}, {Name: "/FOO/", Size: 9, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "size", "ASC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "//", Size: 15, Health: 1}}}, + {"/", "", "size", "DESC", []api.ObjectMetadata{{Key: "//", Size: 15, Health: 1}, {Key: "/foo/", Size: 10, Health: 1}, {Key: "/FOO/", Size: 9, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, } for _, test := range tests { // use the bus client - res, err := b.Object(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.path+test.prefix, api.ListObjectOptions{ + Delimiter: "/", + SortBy: test.sortBy, + SortDir: test.sortDir, }) if err != nil { t.Fatal(err, test.path) } - assertMetadata(res.Entries) + assertMetadata(res.Objects) - if !(len(res.Entries) == 0 && len(test.want) == 0) && !reflect.DeepEqual(res.Entries, test.want) { - t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Entries, test.want) + if !(len(res.Objects) == 0 && len(test.want) == 0) && !reflect.DeepEqual(res.Objects, test.want) { + t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Objects, test.want) } + var marker string for offset := 0; offset < len(test.want); offset++ { - res, err := b.Object(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, - Offset: offset, - Limit: 1, + res, err := b.Objects(context.Background(), api.DefaultBucketName, test.path+test.prefix, api.ListObjectOptions{ + Delimiter: "/", + SortBy: test.sortBy, + SortDir: test.sortDir, + Marker: marker, + Limit: 1, }) + marker = res.NextMarker if err != nil { t.Fatal(err) } - assertMetadata(res.Entries) + assertMetadata(res.Objects) - if len(res.Entries) != 1 || res.Entries[0] != test.want[offset] { - t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Entries, test.want[offset]) + if len(res.Objects) != 1 || res.Objects[0] != test.want[offset] { + t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.sortBy, test.sortDir, res.Objects, test.want[offset]) } moreRemaining := len(test.want)-offset-1 > 0 if res.HasMore != moreRemaining { @@ -520,64 +517,39 @@ func TestObjectEntries(t *testing.T) { continue } - res, err = b.Object(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, - Marker: test.want[offset].Name, - Limit: 1, + res, err = b.Objects(context.Background(), api.DefaultBucketName, test.path+test.prefix, api.ListObjectOptions{ + Delimiter: "/", + SortBy: test.sortBy, + SortDir: test.sortDir, + Marker: test.want[offset].Key, + Limit: 1, }) if err != nil { - t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %vmarker: %v\n\nerr: %v", test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, err) + t.Fatalf("\nlist: %v\nprefix: %v\nsortBy: %v\nsortDir: %vmarker: %v\n\nerr: %v", test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Key, err) } - assertMetadata(res.Entries) + assertMetadata(res.Objects) - if len(res.Entries) != 1 || res.Entries[0] != test.want[offset+1] { - t.Errorf("\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.want[offset].Name, res.Entries, test.want[offset+1]) + if len(res.Objects) != 1 || res.Objects[0] != test.want[offset+1] { + t.Errorf("\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", test.path, test.prefix, test.want[offset].Key, res.Objects, test.want[offset+1]) } moreRemaining = len(test.want)-offset-2 > 0 if res.HasMore != moreRemaining { - t.Errorf("invalid value for hasMore (%t) at marker (%s) test (%+v)", res.HasMore, test.want[offset].Name, test) - } - } - - // use the worker client - got, err := w.ObjectEntries(context.Background(), api.DefaultBucketName, test.path, api.GetObjectOptions{ - Prefix: test.prefix, - SortBy: test.sortBy, - SortDir: test.sortDir, - }) - if err != nil { - t.Fatal(err) - } - assertMetadata(got) - - if !(len(got) == 0 && len(test.want) == 0) && !reflect.DeepEqual(got, test.want) { - t.Errorf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) - } - for _, entry := range got { - if !strings.HasSuffix(entry.Name, "/") { - buf := new(bytes.Buffer) - if err := w.DownloadObject(context.Background(), buf, api.DefaultBucketName, entry.Name, api.DownloadObjectOptions{}); err != nil { - t.Fatal(err) - } else if buf.Len() != int(entry.Size) { - t.Fatal("unexpected", buf.Len(), entry.Size) - } + t.Errorf("invalid value for hasMore (%t) at marker (%s) test (%+v)", res.HasMore, test.want[offset].Key, test) } } } // delete all uploads for _, upload := range uploads { - tt.OK(w.DeleteObject(context.Background(), api.DefaultBucketName, upload.path, api.DeleteObjectOptions{})) + tt.OK(w.DeleteObject(context.Background(), api.DefaultBucketName, upload.key, api.DeleteObjectOptions{})) } // assert root dir is empty - if entries, err := w.ObjectEntries(context.Background(), api.DefaultBucketName, "/", api.GetObjectOptions{}); err != nil { + if resp, err := b.Objects(context.Background(), api.DefaultBucketName, "/", api.ListObjectOptions{}); err != nil { t.Fatal(err) - } else if len(entries) != 0 { - t.Fatal("there should be no entries left", entries) + } else if len(resp.Objects) != 0 { + t.Fatal("there should be no entries left", resp.Objects) } } @@ -746,7 +718,7 @@ func TestUploadDownloadBasic(t *testing.T) { // check that stored data on hosts was updated tt.Retry(100, 100*time.Millisecond, func() error { - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) for _, host := range hosts { if host.StoredData != rhpv2.SectorSize { @@ -788,37 +760,36 @@ func TestUploadDownloadExtended(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(file2), api.DefaultBucketName, "fileÅ›/file2", api.UploadObjectOptions{})) // fetch all entries from the worker - entries, err := cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{}) + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "fileÅ›/", api.ListObjectOptions{ + Delimiter: "/", + }) tt.OK(err) - if len(entries) != 2 { - t.Fatal("expected two entries to be returned", len(entries)) + if len(resp.Objects) != 2 { + t.Fatal("expected two entries to be returned", len(resp.Objects)) } - for _, entry := range entries { + for _, entry := range resp.Objects { if entry.MimeType != "application/octet-stream" { t.Fatal("wrong mime type", entry.MimeType) } } - // fetch entries with "file" prefix - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{Prefix: "file"}) - tt.OK(err) - if len(res.Entries) != 2 { - t.Fatal("expected two entry to be returned", len(entries)) - } - - // fetch entries with "fileÅ›" prefix - res, err = cluster.Bus.Object(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{Prefix: "foo"}) + // fetch entries in /fileÅ› starting with "file" + res, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "fileÅ›/file", api.ListObjectOptions{ + Delimiter: "/", + }) tt.OK(err) - if len(res.Entries) != 0 { - t.Fatal("expected no entries to be returned", len(entries)) + if len(res.Objects) != 2 { + t.Fatal("expected two entry to be returned", len(res.Objects)) } - // fetch entries from the worker for unexisting path - entries, err = cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "bar/", api.GetObjectOptions{}) + // fetch entries in /fileÅ› starting with "foo" + res, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "fileÅ›/foo", api.ListObjectOptions{ + Delimiter: "/", + }) tt.OK(err) - if len(entries) != 0 { - t.Fatal("expected no entries to be returned", len(entries)) + if len(res.Objects) != 0 { + t.Fatal("expected no entries to be returned", len(res.Objects)) } // prepare two files, a small one and a large one @@ -964,20 +935,9 @@ func TestUploadDownloadSpending(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) // Should be registered in bus. - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, "", api.GetObjectOptions{}) + _, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) tt.OK(err) - var found bool - for _, entry := range res.Entries { - if entry.Name == fmt.Sprintf("/%s", path) { - found = true - break - } - } - if !found { - t.Fatal("uploaded object not found in bus") - } - // download the data var buffer bytes.Buffer tt.OK(w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{})) @@ -993,20 +953,20 @@ func TestUploadDownloadSpending(t *testing.T) { uploadDownload() // Fuzzy search for uploaded data in various ways. - objects, err := cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{}) + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{}) tt.OK(err) - if len(objects) != 2 { - t.Fatalf("should have 2 objects but got %v", len(objects)) + if len(resp.Objects) != 2 { + t.Fatalf("should have 2 objects but got %v", len(resp.Objects)) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "ata"}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "ata"}) tt.OK(err) - if len(objects) != 2 { - t.Fatalf("should have 2 objects but got %v", len(objects)) + if len(resp.Objects) != 2 { + t.Fatalf("should have 2 objects but got %v", len(resp.Objects)) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "1258"}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "1258"}) tt.OK(err) - if len(objects) != 1 { - t.Fatalf("should have 1 objects but got %v", len(objects)) + if len(resp.Objects) != 1 { + t.Fatalf("should have 1 objects but got %v", len(resp.Objects)) } // renew contracts. @@ -1234,28 +1194,29 @@ func TestParallelUpload(t *testing.T) { wg.Wait() // Check if objects exist. - objects, err := cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/dir/", Limit: 100}) + resp, err := cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/dir/", Limit: 100}) tt.OK(err) - if len(objects) != 3 { - t.Fatal("wrong number of objects", len(objects)) + if len(resp.Objects) != 3 { + t.Fatal("wrong number of objects", len(resp.Objects)) } // Upload one more object. tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader([]byte("data")), api.DefaultBucketName, "/foo", api.UploadObjectOptions{})) - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/", Limit: 100}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) tt.OK(err) - if len(objects) != 4 { - t.Fatal("wrong number of objects", len(objects)) + if len(resp.Objects) != 4 { + t.Fatal("wrong number of objects", len(resp.Objects)) } // Delete all objects under /dir/. if err := cluster.Bus.DeleteObject(context.Background(), api.DefaultBucketName, "/dir/", api.DeleteObjectOptions{Batch: true}); err != nil { t.Fatal(err) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/", Limit: 100}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) + cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) tt.OK(err) - if len(objects) != 1 { + if len(resp.Objects) != 1 { t.Fatal("objects weren't deleted") } @@ -1263,9 +1224,10 @@ func TestParallelUpload(t *testing.T) { if err := cluster.Bus.DeleteObject(context.Background(), api.DefaultBucketName, "/", api.DeleteObjectOptions{Batch: true}); err != nil { t.Fatal(err) } - objects, err = cluster.Bus.SearchObjects(context.Background(), api.DefaultBucketName, api.SearchObjectOptions{Key: "/", Limit: 100}) + resp, err = cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) + cluster.Bus.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{Substring: "/", Limit: 100}) tt.OK(err) - if len(objects) != 0 { + if len(resp.Objects) != 0 { t.Fatal("objects weren't deleted") } } @@ -1420,7 +1382,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // upload 3 objects so every host has 3 sectors var err error - var res api.ObjectsResponse + var res api.Object shards := make(map[types.PublicKey][]object.Sector) for i := 0; i < 3; i++ { // upload object @@ -1446,7 +1408,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] - tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object.Object, api.AddObjectOptions{})) + tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object, api.AddObjectOptions{})) // assert we can download this object tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, "frankenstein", api.DownloadObjectOptions{})) @@ -1522,7 +1484,7 @@ func TestUnconfirmedContractArchival(t *testing.T) { c := contracts[0] // manually insert a contract - err = cluster.bs.InsertContract(context.Background(), api.ContractMetadata{ + err = cluster.bs.PutContract(context.Background(), api.ContractMetadata{ ID: types.FileContractID{1}, HostKey: c.HostKey, StartHeight: cs.BlockHeight, @@ -1557,7 +1519,7 @@ func TestUnconfirmedContractArchival(t *testing.T) { }) } -func TestWalletTransactions(t *testing.T) { +func TestWalletEvents(t *testing.T) { if testing.Short() { t.SkipNow() } @@ -1573,67 +1535,38 @@ func TestWalletTransactions(t *testing.T) { time.Sleep(time.Second) cluster.MineBlocks(1) - // Get all transactions of the wallet. - allTxns, err := b.WalletTransactions(context.Background()) + // Get all events of the wallet. + allTxns, err := b.WalletEvents(context.Background()) tt.OK(err) if len(allTxns) < 5 { - t.Fatalf("expected at least 5 transactions, got %v", len(allTxns)) + t.Fatalf("expected at least 5 events, got %v", len(allTxns)) } if !sort.SliceIsSorted(allTxns, func(i, j int) bool { return allTxns[i].Timestamp.Unix() > allTxns[j].Timestamp.Unix() }) { - t.Fatal("transactions are not sorted by timestamp") + t.Fatal("events are not sorted by timestamp") } - // Get the transactions at an offset and compare. - txns, err := b.WalletTransactions(context.Background(), api.WalletTransactionsWithOffset(2)) + // Get the events at an offset and compare. + txns, err := b.WalletEvents(context.Background(), api.WalletTransactionsWithOffset(2)) tt.OK(err) if !reflect.DeepEqual(txns, allTxns[2:]) { - t.Fatal("transactions don't match", cmp.Diff(txns, allTxns[2:])) - } - - // Find the first index that has a different timestamp than the first. - var txnIdx int - for i := 1; i < len(allTxns); i++ { - if allTxns[i].Timestamp.Unix() != allTxns[0].Timestamp.Unix() { - txnIdx = i - break - } + t.Fatal("events don't match", cmp.Diff(txns, allTxns[2:])) } - medianTxnTimestamp := allTxns[txnIdx].Timestamp - // Limit the number of transactions to 5. - txns, err = b.WalletTransactions(context.Background(), api.WalletTransactionsWithLimit(5)) + // Limit the number of events to 5. + txns, err = b.WalletEvents(context.Background(), api.WalletTransactionsWithLimit(5)) tt.OK(err) if len(txns) != 5 { - t.Fatalf("expected exactly 5 transactions, got %v", len(txns)) + t.Fatalf("expected exactly 5 events, got %v", len(txns)) } - // Fetch txns before and since median. - txns, err = b.WalletTransactions(context.Background(), api.WalletTransactionsWithBefore(medianTxnTimestamp)) - tt.OK(err) - if len(txns) == 0 { - for _, txn := range allTxns { - fmt.Println(txn.Timestamp.Unix()) - } - t.Fatal("expected at least 1 transaction before median timestamp", medianTxnTimestamp.Unix()) - } - for _, txn := range txns { - if txn.Timestamp.Unix() >= medianTxnTimestamp.Unix() { - t.Fatal("expected only transactions before median timestamp") - } - } - txns, err = b.WalletTransactions(context.Background(), api.WalletTransactionsWithSince(medianTxnTimestamp)) + // Events should have 'Relevant' field set. + resp, err := b.Wallet(context.Background()) tt.OK(err) - if len(txns) == 0 { - for _, txn := range allTxns { - fmt.Println(txn.Timestamp.Unix()) - } - t.Fatal("expected at least 1 transaction after median timestamp") - } for _, txn := range txns { - if txn.Timestamp.Unix() < medianTxnTimestamp.Unix() { - t.Fatal("expected only transactions after median timestamp", medianTxnTimestamp.Unix()) + if len(txn.Relevant) != 1 || txn.Relevant[0] != resp.Address { + t.Fatal("invalid 'Relevant' field in wallet event", txn.Relevant, resp.Address) } } } @@ -1672,14 +1605,14 @@ func TestUploadPacking(t *testing.T) { frand.Read(data3) // declare helpers - download := func(path string, data []byte, offset, length int64) { + download := func(key string, data []byte, offset, length int64) { t.Helper() var buffer bytes.Buffer if err := w.DownloadObject( context.Background(), &buffer, api.DefaultBucketName, - path, + key, api.DownloadObjectOptions{Range: &api.DownloadRange{Offset: offset, Length: length}}, ); err != nil { t.Fatal(err) @@ -1696,16 +1629,16 @@ func TestUploadPacking(t *testing.T) { if err != nil { t.Fatal(err) } - if res.Object.Size != int64(len(data)) { - t.Fatal("unexpected size after upload", res.Object.Size, len(data)) + if res.Size != int64(len(data)) { + t.Fatal("unexpected size after upload", res.Size, len(data)) } - entries, err := w.ObjectEntries(context.Background(), api.DefaultBucketName, "/", api.GetObjectOptions{}) + resp, err := b.Objects(context.Background(), api.DefaultBucketName, "", api.ListObjectOptions{}) if err != nil { t.Fatal(err) } var found bool - for _, entry := range entries { - if entry.Name == "/"+name { + for _, entry := range resp.Objects { + if entry.Key == "/"+name { if entry.Size != int64(len(data)) { t.Fatal("unexpected size after upload", entry.Size, len(data)) } @@ -1714,7 +1647,7 @@ func TestUploadPacking(t *testing.T) { } } if !found { - t.Fatal("object not found in list", name, entries) + t.Fatal("object not found in list", name, resp.Objects) } } @@ -1796,18 +1729,18 @@ func TestUploadPacking(t *testing.T) { // and file2 share the same slab. res, err := b.Object(context.Background(), api.DefaultBucketName, "file1", api.GetObjectOptions{}) tt.OK(err) - objs, err := b.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, res.Object.Slabs[0].Key) + objs, err := b.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, res.Object.Slabs[0].EncryptionKey) tt.OK(err) if len(objs) != 2 { t.Fatal("expected 2 objects", len(objs)) } sort.Slice(objs, func(i, j int) bool { - return objs[i].Name < objs[j].Name // make result deterministic + return objs[i].Key < objs[j].Key // make result deterministic }) - if objs[0].Name != "/file1" { - t.Fatal("expected file1", objs[0].Name) - } else if objs[1].Name != "/file2" { - t.Fatal("expected file2", objs[1].Name) + if objs[0].Key != "/file1" { + t.Fatal("expected file1", objs[0].Key) + } else if objs[1].Key != "/file2" { + t.Fatal("expected file2", objs[1].Key) } } @@ -1822,57 +1755,64 @@ func TestWallet(t *testing.T) { tt := cluster.tt // Check wallet info is sane after startup. - wallet, err := b.Wallet(context.Background()) + wr, err := b.Wallet(context.Background()) tt.OK(err) - if wallet.ScanHeight == 0 { - t.Fatal("wallet scan height should not be 0") - } - if wallet.Confirmed.IsZero() { + if wr.Confirmed.IsZero() { t.Fatal("wallet confirmed balance should not be zero") } - if !wallet.Spendable.Equals(wallet.Confirmed) { + if !wr.Spendable.Equals(wr.Confirmed) { t.Fatal("wallet spendable balance should match confirmed") } - if !wallet.Unconfirmed.IsZero() { + if !wr.Unconfirmed.IsZero() { t.Fatal("wallet unconfirmed balance should be zero") } - if wallet.Address == (types.Address{}) { + if wr.Address == (types.Address{}) { t.Fatal("wallet address should be set") } - // Send 1 SC to an address outside our wallet. We manually do this to be in - // control of the miner fees. + // Send 1 SC to an address outside our wallet. sendAmt := types.HastingsPerSiacoin - minerFee := types.NewCurrency64(1) - txn := types.Transaction{ - SiacoinOutputs: []types.SiacoinOutput{ - {Value: sendAmt, Address: types.VoidAddress}, - }, - MinerFees: []types.Currency{minerFee}, - } - toSign, parents, err := b.WalletFund(context.Background(), &txn, txn.SiacoinOutputs[0].Value, false) + _, err = b.SendSiacoins(context.Background(), types.Address{1, 2, 3}, sendAmt, false) tt.OK(err) - err = b.WalletSign(context.Background(), &txn, toSign, types.CoveredFields{WholeTransaction: true}) + + txns, err := b.WalletEvents(context.Background()) + tt.OK(err) + + txns, err = b.WalletPending(context.Background()) tt.OK(err) - tt.OK(b.BroadcastTransaction(context.Background(), append(parents, txn))) + if len(txns) != 1 { + t.Fatalf("expected 1 txn got %v", len(txns)) + } + + var minerFee types.Currency + switch txn := txns[0].Data.(type) { + case wallet.EventV1Transaction: + for _, fee := range txn.Transaction.MinerFees { + minerFee = minerFee.Add(fee) + } + case wallet.EventV2Transaction: + minerFee = txn.MinerFee + default: + t.Fatalf("unexpected event %T", txn) + } // The wallet should still have the same confirmed balance, a lower // spendable balance and a greater unconfirmed balance. tt.Retry(600, 100*time.Millisecond, func() error { updated, err := b.Wallet(context.Background()) tt.OK(err) - if !updated.Confirmed.Equals(wallet.Confirmed) { - return fmt.Errorf("wallet confirmed balance should not have changed: %v %v", updated.Confirmed, wallet.Confirmed) + if !updated.Confirmed.Equals(wr.Confirmed) { + return fmt.Errorf("wr confirmed balance should not have changed: %v %v", updated.Confirmed, wr.Confirmed) } // The diffs of the spendable balance and unconfirmed balance should add up // to the amount of money sent as well as the miner fees used. - spendableDiff := wallet.Spendable.Sub(updated.Spendable) + spendableDiff := wr.Spendable.Sub(updated.Spendable) if updated.Unconfirmed.Cmp(spendableDiff) > 0 { t.Fatalf("unconfirmed balance can't be greater than the difference in spendable balance here: \nconfirmed %v (%v) - >%v (%v) \nunconfirmed %v (%v) -> %v (%v) \nspendable %v (%v) -> %v (%v) \nfee %v (%v)", - wallet.Confirmed, wallet.Confirmed.ExactString(), updated.Confirmed, updated.Confirmed.ExactString(), - wallet.Unconfirmed, wallet.Unconfirmed.ExactString(), updated.Unconfirmed, updated.Unconfirmed.ExactString(), - wallet.Spendable, wallet.Spendable.ExactString(), updated.Spendable, updated.Spendable.ExactString(), + wr.Confirmed, wr.Confirmed.ExactString(), updated.Confirmed, updated.Confirmed.ExactString(), + wr.Unconfirmed, wr.Unconfirmed.ExactString(), updated.Unconfirmed, updated.Unconfirmed.ExactString(), + wr.Spendable, wr.Spendable.ExactString(), updated.Spendable, updated.Spendable.ExactString(), minerFee, minerFee.ExactString()) } withdrawnAmt := spendableDiff.Sub(updated.Unconfirmed) @@ -2147,7 +2087,7 @@ func TestMultipartUploads(t *testing.T) { // Start a new multipart upload. objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{GenerateKey: true}) + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") @@ -2158,7 +2098,7 @@ func TestMultipartUploads(t *testing.T) { tt.OK(err) if len(lmu.Uploads) != 1 { t.Fatal("expected 1 upload got", len(lmu.Uploads)) - } else if upload := lmu.Uploads[0]; upload.UploadID != mpr.UploadID || upload.Path != objPath { + } else if upload := lmu.Uploads[0]; upload.UploadID != mpr.UploadID || upload.Key != objPath { t.Fatal("unexpected upload:", upload) } @@ -2481,8 +2421,7 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // start a new multipart upload. We upload the parts in reverse order objPath := "/foo" - key := object.GenerateEncryptionKey() - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: &key}) + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") @@ -2555,34 +2494,51 @@ func TestWalletRedistribute(t *testing.T) { }) defer cluster.Shutdown() - // redistribute into 5 outputs - _, err := cluster.Bus.WalletRedistribute(context.Background(), 5, types.Siacoins(10)) + // redistribute into 2 outputs of 500KS each + numOutputs := 2 + outputAmt := types.Siacoins(500e3) + txnSet, err := cluster.Bus.WalletRedistribute(context.Background(), numOutputs, outputAmt) if err != nil { t.Fatal(err) + } else if len(txnSet) == 0 { + t.Fatal("nothing happened") } cluster.MineBlocks(1) // assert we have 5 outputs with 10 SC - outputs, err := cluster.Bus.WalletOutputs(context.Background()) - if err != nil { - t.Fatal(err) - } + txns, err := cluster.Bus.WalletEvents(context.Background()) + cluster.tt.OK(err) - var cnt int - for _, output := range outputs { - if output.Value.Cmp(types.Siacoins(10)) == 0 { - cnt++ + nOutputs := 0 + for _, txn := range txns { + switch txn := txn.Data.(type) { + case wallet.EventV1Transaction: + for _, sco := range txn.Transaction.SiacoinOutputs { + if sco.Value.Equals(types.Siacoins(500e3)) { + nOutputs++ + } + } + case wallet.EventV2Transaction: + for _, sco := range txn.SiacoinOutputs { + if sco.Value.Equals(types.Siacoins(500e3)) { + nOutputs++ + } + } + case wallet.EventPayout: + default: + t.Fatalf("unexpected transaction type %T", txn) } } - if cnt != 5 { + if cnt := nOutputs; cnt != numOutputs { t.Fatalf("expected 5 outputs with 10 SC, got %v", cnt) } // assert redistributing into 3 outputs succeeds, used to fail because we // were broadcasting an empty transaction set - _, err = cluster.Bus.WalletRedistribute(context.Background(), 3, types.Siacoins(10)) - if err != nil { - t.Fatal(err) + txnSet, err = cluster.Bus.WalletRedistribute(context.Background(), nOutputs, outputAmt) + cluster.tt.OK(err) + if len(txnSet) != 0 { + t.Fatal("txnSet should be empty") } } diff --git a/internal/test/e2e/events_test.go b/internal/test/e2e/events_test.go index 515bce5a4..c86fb1d10 100644 --- a/internal/test/e2e/events_test.go +++ b/internal/test/e2e/events_test.go @@ -26,7 +26,6 @@ func TestEvents(t *testing.T) { api.WebhookContractRenew, api.WebhookContractSetUpdate, api.WebhookHostUpdate, - api.WebhookSettingDelete, api.WebhookSettingUpdate, } @@ -125,10 +124,7 @@ func TestEvents(t *testing.T) { // update settings gs := gp.GougingSettings gs.HostBlockHeightLeeway = 100 - tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, gs)) - - // delete setting - tt.OK(b.DeleteSetting(context.Background(), api.SettingRedundancy)) + tt.OK(b.UpdateGougingSettings(context.Background(), gs)) // update host setting h := cluster.hosts[0] @@ -173,17 +169,7 @@ func TestEvents(t *testing.T) { t.Fatalf("unexpected event %+v", e) } case api.EventSettingUpdate: - if e.Key != api.SettingGouging || e.Timestamp.IsZero() { - t.Fatalf("unexpected event %+v", e) - } - var update api.GougingSettings - bytes, _ := json.Marshal(e.Update) - tt.OK(json.Unmarshal(bytes, &update)) - if update.HostBlockHeightLeeway != 100 { - t.Fatalf("unexpected update %+v", update) - } - case api.EventSettingDelete: - if e.Key != api.SettingRedundancy || e.Timestamp.IsZero() { + if e.GougingSettings == nil || e.GougingSettings.HostBlockHeightLeeway != 100 || e.Timestamp.IsZero() { t.Fatalf("unexpected event %+v", e) } } diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 851362489..be24b7186 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -69,7 +69,7 @@ func TestGouging(t *testing.T) { // update the gouging settings to limit the max storage price to 100H gs := test.GougingSettings gs.MaxStoragePrice = types.NewCurrency64(100) - if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + if err := b.UpdateGougingSettings(context.Background(), gs); err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestGouging(t *testing.T) { } // set optimised settings - tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) + tt.OK(b.UpdateGougingSettings(context.Background(), resp.Recommendation.GougingSettings)) // evaluate optimised settings resp, err = cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, resp.Recommendation.GougingSettings, test.RedundancySettings) diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index 4dd6c1229..cbbabf053 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -44,8 +44,8 @@ func TestObjectMetadata(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(or.Object.Metadata, opts.Metadata) { - t.Fatal("metadata mismatch", or.Object.Metadata) + if !reflect.DeepEqual(or.Metadata, opts.Metadata) { + t.Fatal("metadata mismatch", or.Metadata) } // get the object from the worker and assert it has the metadata @@ -62,7 +62,7 @@ func TestObjectMetadata(t *testing.T) { // HeadObject retrieves the modtime from a http header so it's not as // accurate as the modtime from the object GET endpoint which returns it in // the body. - orModtime, err := time.Parse(http.TimeFormat, or.Object.ModTime.Std().Format(http.TimeFormat)) + orModtime, err := time.Parse(http.TimeFormat, or.ModTime.Std().Format(http.TimeFormat)) if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestObjectMetadata(t *testing.T) { if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ - ContentType: or.Object.ContentType(), + ContentType: or.ContentType(), Etag: gor.Etag, LastModified: api.TimeRFC3339(orModtime), Range: &api.ContentRange{Offset: 1, Length: 1, Size: int64(len(data))}, diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index b049da908..ab03a3339 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -41,8 +41,8 @@ func TestMigrations(t *testing.T) { tt := cluster.tt // create a helper to fetch used hosts - usedHosts := func(path string) map[types.PublicKey]struct{} { - res, _ := b.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) + usedHosts := func(key string) map[types.PublicKey]struct{} { + res, _ := b.Object(context.Background(), api.DefaultBucketName, key, api.GetObjectOptions{}) if res.Object == nil { t.Fatal("object not found") } diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 84cce4b21..c612cbe32 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -61,7 +61,7 @@ func TestHostPruning(t *testing.T) { tt.OKAll(a.Trigger(true)) // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err := b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -73,7 +73,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err = b.Hosts(context.Background(), api.HostOptions{}) tt.OK(err) if len(hostss) != 0 { a.Trigger(true) // trigger autopilot diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 3a574ce91..08e1fd8fa 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -85,8 +85,8 @@ func TestS3Basic(t *testing.T) { tt.OK(err) if busObject.Object == nil { t.Fatal("expected object to exist") - } else if api.FormatETag(busObject.Object.ETag) != *uploadInfo.ETag { - t.Fatalf("expected ETag %v, got %v", *uploadInfo.ETag, busObject.Object.ETag) + } else if api.FormatETag(busObject.ETag) != *uploadInfo.ETag { + t.Fatalf("expected ETag %v, got %v", uploadInfo.ETag, busObject.ETag) } _, err = s3.PutObject(&s3aws.PutObjectInput{ @@ -845,9 +845,11 @@ func TestS3SettingsValidate(t *testing.T) { }, } for i, test := range tests { - err := cluster.Bus.UpdateSetting(context.Background(), api.SettingS3Authentication, api.S3AuthenticationSettings{ - V4Keypairs: map[string]string{ - test.id: test.key, + err := cluster.Bus.UpdateS3Settings(context.Background(), api.S3Settings{ + Authentication: api.S3AuthenticationSettings{ + V4Keypairs: map[string]string{ + test.id: test.key, + }, }, }) if err != nil && !test.shouldFail { diff --git a/internal/worker/accounts.go b/internal/worker/accounts.go index 1022075f1..f73c9f529 100644 --- a/internal/worker/accounts.go +++ b/internal/worker/accounts.go @@ -336,7 +336,7 @@ func (a *AccountMgr) refillAccounts() { defer cancel() // refill - err := a.refillAccount(rCtx, c, cs.BlockHeight, a.revisionSubmissionBuffer) + refilled, err := a.refillAccount(rCtx, c, cs.BlockHeight, a.revisionSubmissionBuffer) // determine whether to log something shouldLog := true @@ -351,7 +351,7 @@ func (a *AccountMgr) refillAccounts() { if err != nil && shouldLog { a.logger.Error("failed to refill account for host", zap.Stringer("hostKey", contract.HostKey), zap.Error(err)) - } else { + } else if refilled { a.logger.Infow("successfully refilled account for host", zap.Stringer("hostKey", contract.HostKey), zap.Error(err)) } }(c) @@ -359,7 +359,7 @@ func (a *AccountMgr) refillAccounts() { } } -func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMetadata, bh, revisionSubmissionBuffer uint64) error { +func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMetadata, bh, revisionSubmissionBuffer uint64) (bool, error) { // fetch the account account := a.Account(contract.HostKey) @@ -367,7 +367,7 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet // trying to refill the account would result in the host not returning the // revision and returning an obfuscated error if (bh + revisionSubmissionBuffer) > contract.WindowStart { - return fmt.Errorf("contract %v is too close to the proof window to be revised", contract.ID) + return false, fmt.Errorf("contract %v is too close to the proof window to be revised", contract.ID) } // check if a host is potentially cheating before refilling. @@ -382,7 +382,7 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet "drift", account.Drift.String(), ) _ = a.alerts.RegisterAlert(a.shutdownCtx, alert) - return fmt.Errorf("not refilling account since host is potentially cheating: %w", errMaxDriftExceeded) + return false, fmt.Errorf("not refilling account since host is potentially cheating: %w", errMaxDriftExceeded) } else { _ = a.alerts.DismissAlerts(a.shutdownCtx, alerts.IDForAccount(alertAccountRefillID, account.ID)) } @@ -392,7 +392,7 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet // sync the account err := a.syncer.SyncAccount(ctx, contract.ID, contract.HostKey, contract.SiamuxAddr) if err != nil { - return fmt.Errorf("failed to sync account's balance: %w", err) + return false, fmt.Errorf("failed to sync account's balance: %w", err) } // refetch the account after syncing @@ -401,15 +401,15 @@ func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMet // check if refill is needed if account.Balance.Cmp(minBalance) >= 0 { - return nil + return false, nil } // fund the account err := a.funder.FundAccount(ctx, contract.ID, contract.HostKey, maxBalance) if err != nil { - return fmt.Errorf("failed to fund account: %w", err) + return false, fmt.Errorf("failed to fund account: %w", err) } - return nil + return true, nil } // WithSync syncs an accounts balance with the bus. To do so, the account is diff --git a/internal/worker/cache.go b/internal/worker/cache.go index dfc749d2a..1f5d28d22 100644 --- a/internal/worker/cache.go +++ b/internal/worker/cache.go @@ -181,11 +181,8 @@ func (c *cache) HandleEvent(event webhooks.Event) (err error) { log = log.With("hk", e.HostKey, "ts", e.Timestamp) c.handleHostUpdate(e) case api.EventSettingUpdate: - log = log.With("key", e.Key, "ts", e.Timestamp) - err = c.handleSettingUpdate(e) - case api.EventSettingDelete: - log = log.With("key", e.Key, "ts", e.Timestamp) - c.handleSettingDelete(e) + log = log.With("gouging", e.GougingSettings != nil, "pinned", e.PinnedSettings != nil, "upload", e.UploadSettings != nil, "ts", e.Timestamp) + c.handleSettingUpdate(e) default: log.Info("unhandled event", e) return @@ -310,52 +307,22 @@ func (c *cache) handleHostUpdate(e api.EventHostUpdate) { c.cache.Set(cacheKeyDownloadContracts, contracts) } -func (c *cache) handleSettingDelete(e api.EventSettingDelete) { - if e.Key == api.SettingGouging || e.Key == api.SettingRedundancy { - c.cache.Invalidate(cacheKeyGougingParams) - } -} - -func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) (err error) { +func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) { // return early if the cache doesn't have gouging params to update value, found, _ := c.cache.Get(cacheKeyGougingParams) if !found { - return nil + return } - gp := value.(api.GougingParams) - // marshal the updated value - data, err := json.Marshal(e.Update) - if err != nil { - return fmt.Errorf("couldn't marshal the given value, error: %v", err) + // update the cache + gp := value.(api.GougingParams) + if e.GougingSettings != nil { + gp.GougingSettings = *e.GougingSettings } - - // unmarshal into the appropriated setting and update the cache - switch e.Key { - case api.SettingGouging: - var gs api.GougingSettings - if err := json.Unmarshal(data, &gs); err != nil { - return fmt.Errorf("couldn't update gouging settings, invalid request body, %t", e.Update) - } else if err := gs.Validate(); err != nil { - return fmt.Errorf("couldn't update gouging settings, error: %v", err) - } - - gp.GougingSettings = gs - c.cache.Set(cacheKeyGougingParams, gp) - case api.SettingRedundancy: - var rs api.RedundancySettings - if err := json.Unmarshal(data, &rs); err != nil { - return fmt.Errorf("couldn't update redundancy settings, invalid request body, %t", e.Update) - } else if err := rs.Validate(); err != nil { - return fmt.Errorf("couldn't update redundancy settings, error: %v", err) - } - - gp.RedundancySettings = rs - c.cache.Set(cacheKeyGougingParams, gp) - default: + if e.UploadSettings != nil { + gp.RedundancySettings = e.UploadSettings.Redundancy } - - return nil + c.cache.Set(cacheKeyGougingParams, gp) } func contractsEqual(x, y []api.ContractMetadata) bool { diff --git a/internal/worker/cache_test.go b/internal/worker/cache_test.go index 9bc8d682d..0fa3c10d8 100644 --- a/internal/worker/cache_test.go +++ b/internal/worker/cache_test.go @@ -170,7 +170,6 @@ func TestWorkerCache(t *testing.T) { {Module: api.ModuleContract, Event: api.EventRenew, Payload: nil}, {Module: api.ModuleHost, Event: api.EventUpdate, Payload: nil}, {Module: api.ModuleSetting, Event: api.EventUpdate, Payload: nil}, - {Module: api.ModuleSetting, Event: api.EventDelete, Payload: nil}, } { if err := c.HandleEvent(event); err != nil { t.Fatal(err) diff --git a/object/object.go b/object/object.go index 95517619f..1fa4a98ec 100644 --- a/object/object.go +++ b/object/object.go @@ -117,7 +117,7 @@ func GenerateEncryptionKey() EncryptionKey { // tagged omitempty to make sure responses where no object is returned remain // clean. type Object struct { - Key EncryptionKey `json:"key,omitempty"` + Key EncryptionKey `json:"encryptionKey,omitempty"` Slabs SlabSlices `json:"slabs,omitempty"` } diff --git a/object/slab.go b/object/slab.go index e52e7bd7b..55d070142 100644 --- a/object/slab.go +++ b/object/slab.go @@ -23,10 +23,10 @@ type Sector struct { // be used for each Slab, and should not be the same key used for the parent // Object. type Slab struct { - Health float64 `json:"health"` - Key EncryptionKey `json:"key"` - MinShards uint8 `json:"minShards"` - Shards []Sector `json:"shards,omitempty"` + Health float64 `json:"health"` + EncryptionKey EncryptionKey `json:"encryptionKey"` + MinShards uint8 `json:"minShards"` + Shards []Sector `json:"shards,omitempty"` } func (s Slab) IsPartial() bool { @@ -36,18 +36,18 @@ func (s Slab) IsPartial() bool { // NewSlab returns a new slab for the shards. func NewSlab(minShards uint8) Slab { return Slab{ - Key: GenerateEncryptionKey(), - MinShards: minShards, + EncryptionKey: GenerateEncryptionKey(), + MinShards: minShards, } } // NewPartialSlab returns a new partial slab. func NewPartialSlab(ec EncryptionKey, minShards uint8) Slab { return Slab{ - Health: 1, - Key: ec, - MinShards: minShards, - Shards: nil, + Health: 1, + EncryptionKey: ec, + MinShards: minShards, + Shards: nil, } } @@ -98,7 +98,7 @@ func (s Slab) Encrypt(shards [][]byte) { wg.Add(1) go func(i int) { nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) + c, _ := chacha20.NewUnauthenticatedCipher(s.EncryptionKey.entropy[:], nonce[:]) c.XORKeyStream(shards[i], shards[i]) wg.Done() }(i) @@ -176,7 +176,7 @@ func (ss SlabSlice) Decrypt(shards [][]byte) { wg.Add(1) go func(i int) { nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) + c, _ := chacha20.NewUnauthenticatedCipher(ss.EncryptionKey.entropy[:], nonce[:]) c.SetCounter(offset) c.XORKeyStream(shards[i], shards[i]) wg.Done() diff --git a/object/slab_test.go b/object/slab_test.go index 1138b7c8b..c4c4a2006 100644 --- a/object/slab_test.go +++ b/object/slab_test.go @@ -72,7 +72,7 @@ func TestReedSolomon(t *testing.T) { func BenchmarkReedSolomon(b *testing.B) { makeSlab := func(m, n uint8) (Slab, []byte, [][]byte) { - return Slab{Key: GenerateEncryptionKey(), MinShards: m, Shards: make([]Sector, n)}, + return Slab{EncryptionKey: GenerateEncryptionKey(), MinShards: m, Shards: make([]Sector, n)}, frand.Bytes(rhpv2.SectorSize * int(m)), make([][]byte, n) } diff --git a/stores/hostdb.go b/stores/hostdb.go index 5111682d1..1df6a30d6 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -17,7 +17,15 @@ var ( // Host returns information about a host. func (s *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) { - hosts, err := s.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hostKey}, 0, 1) + hosts, err := s.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + AddressContains: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + KeyIn: []types.PublicKey{hostKey}, + Offset: 0, + Limit: 1, + }) if err != nil { return api.Host{}, err } else if len(hosts) == 0 { @@ -48,20 +56,15 @@ func (s *SQLStore) ResetLostSectors(ctx context.Context, hk types.PublicKey) err }) } -func (s *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { +func (s *SQLStore) Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) { var hosts []api.Host err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - hosts, err = tx.SearchHosts(ctx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) + hosts, err = tx.Hosts(ctx, opts) return }) return hosts, err } -// Hosts returns non-blocked hosts at given offset and limit. -func (s *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]api.Host, error) { - return s.SearchHosts(ctx, "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, offset, limit) -} - func (s *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index d59640d26..155bb2c0a 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -38,7 +38,15 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, 0, -1) + allHosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -63,7 +71,15 @@ func TestSQLHostDB(t *testing.T) { } // Same thing again but with hosts. - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -106,90 +122,8 @@ func TestSQLHostDB(t *testing.T) { } } -// TestSQLHosts tests the Hosts method of the SQLHostDB type. -func TestSQLHosts(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { - t.Fatal("unexpected", len(hosts), err) - } - if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if host := hosts[0]; host.PublicKey != hk1 { - t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) - } - if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if host := hosts[0]; host.PublicKey != hk2 { - t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) - } - if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { - t.Fatal("unexpected", len(hosts), err) - } - if _, err := ss.Hosts(ctx, -1, -1); !errors.Is(err, sql.ErrNegativeOffset) { - t.Fatal("unexpected error", err) - } - - // Add a scan for each host. - n := time.Now() - if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { - t.Fatal(err) - } - if err := ss.addTestScan(hk2, n.Add(-2*time.Minute), nil, rhpv2.HostSettings{}); err != nil { - t.Fatal(err) - } - if err := ss.addTestScan(hk3, n.Add(-3*time.Minute), nil, rhpv2.HostSettings{}); err != nil { - t.Fatal(err) - } - - // Fetch all hosts using the HostsForScanning method. - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) - if err != nil { - t.Fatal(err) - } - if len(hostAddresses) != 3 { - t.Fatal("wrong number of addresses") - } - if hostAddresses[0].PublicKey != hk3 { - t.Fatal("wrong key") - } - if hostAddresses[1].PublicKey != hk2 { - t.Fatal("wrong key") - } - if hostAddresses[2].PublicKey != hk1 { - t.Fatal("wrong key") - } - - // Fetch one host by setting the cutoff exactly to hk2. - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) - if err != nil { - t.Fatal(err) - } - if len(hostAddresses) != 1 { - t.Fatal("wrong number of addresses") - } - - // Fetch no hosts. - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) - if err != nil { - t.Fatal(err) - } - if len(hostAddresses) != 0 { - t.Fatal("wrong number of addresses") - } -} - -// TestSearchHosts is a unit test for SearchHosts. -func TestSearchHosts(t *testing.T) { +// TestHosts is a unit test for the Hosts method of the SQLHostDB type. +func TestHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() @@ -205,7 +139,15 @@ func TestSearchHosts(t *testing.T) { hk1, hk2, hk3 := hks[0], hks[1], hks[2] // search all hosts - his, err := ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err := ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -213,19 +155,43 @@ func TestSearchHosts(t *testing.T) { } // assert offset & limit are taken into account - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: 1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { t.Fatal("unexpected") } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 2) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 1, + Limit: 2, + }) if err != nil { t.Fatal(err) } else if len(his) != 2 { t.Fatal("unexpected") } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 3, 1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 3, + Limit: 1, + }) if err != nil { t.Fatal(err) } else if len(his) != 0 { @@ -233,16 +199,48 @@ func TestSearchHosts(t *testing.T) { } // assert address and key filters are taken into account - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1001", nil, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "com:1001", + KeyIn: nil, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 2 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: []types.PublicKey{hk2, hk3}, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 2 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "com:1002", + KeyIn: []types.PublicKey{hk2, hk3}, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "com:1002", []types.PublicKey{hk1}, 0, -1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "com:1002", + KeyIn: []types.PublicKey{hk1}, + Offset: 0, + Limit: -1, + }); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } @@ -251,7 +249,15 @@ func TestSearchHosts(t *testing.T) { if err != nil { t.Fatal(err) } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 2 { @@ -259,7 +265,15 @@ func TestSearchHosts(t *testing.T) { } else if his[0].PublicKey != (types.PublicKey{2}) || his[1].PublicKey != (types.PublicKey{3}) { t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) } - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeBlocked, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -286,19 +300,19 @@ func TestSearchHosts(t *testing.T) { // add host checks, h1 gets ap1 and h2 gets both h1c := newTestHostCheck() - h1c.Score.Age = .1 + h1c.ScoreBreakdown.Age = .1 err = ss.UpdateHostCheck(context.Background(), ap1, hk1, h1c) if err != nil { t.Fatal(err) } h2c1 := newTestHostCheck() - h2c1.Score.Age = .21 + h2c1.ScoreBreakdown.Age = .21 err = ss.UpdateHostCheck(context.Background(), ap1, hk2, h2c1) if err != nil { t.Fatal(err) } h2c2 := newTestHostCheck() - h2c2.Score.Age = .22 + h2c2.ScoreBreakdown.Age = .22 err = ss.UpdateHostCheck(context.Background(), ap2, hk2, h2c2) if err != nil { t.Fatal(err) @@ -314,7 +328,15 @@ func TestSearchHosts(t *testing.T) { } // fetch all hosts - his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -331,7 +353,15 @@ func TestSearchHosts(t *testing.T) { } // assert autopilot filter is taken into account - his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: ap1, + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 3 { @@ -340,7 +370,7 @@ func TestSearchHosts(t *testing.T) { // assert h1 and h2 have the expected checks if c1, ok := his[0].Checks[ap1]; !ok || c1 != h1c { - t.Fatal("unexpected", c1, ok) + t.Fatal("unexpected", c1, ok, his[0]) } else if c2, ok := his[1].Checks[ap1]; !ok || c2 != h2c1 { t.Fatal("unexpected", c2, ok) } else if _, ok := his[1].Checks[ap2]; ok { @@ -348,12 +378,20 @@ func TestSearchHosts(t *testing.T) { } // assert usability filter is taken into account - h2c1.Usability.RedundantIP = true + h2c1.UsabilityBreakdown.RedundantIP = true err = ss.UpdateHostCheck(context.Background(), ap1, hk2, h2c1) if err != nil { t.Fatal(err) } - his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: ap1, + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUsable, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -365,7 +403,15 @@ func TestSearchHosts(t *testing.T) { t.Fatal("unexpected", c1, ok) } - his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) + his, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: ap1, + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeUnusable, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } else if len(his) != 1 { @@ -681,7 +727,15 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -759,23 +813,47 @@ func TestSQLHostAllowlist(t *testing.T) { t.Fatal(err) } - assertSearch := func(total, allowed, blocked int) error { + assertHosts := func(total, allowed, blocked int) error { t.Helper() - hosts, err := ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err := ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAll, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { return err } if len(hosts) != total { return fmt.Errorf("invalid number of hosts: %v", len(hosts)) } - hosts, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { return err } if len(hosts) != allowed { return fmt.Errorf("invalid number of hosts: %v", len(hosts)) } - hosts, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + hosts, err = ss.Hosts(context.Background(), api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeBlocked, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { return err } @@ -787,7 +865,7 @@ func TestSQLHostAllowlist(t *testing.T) { // Search for hosts using different modes. Should have 3 hosts in total, 2 // allowed ones and 2 blocked ones. - if err := assertSearch(3, 1, 2); err != nil { + if err := assertHosts(3, 1, 2); err != nil { t.Fatal(err) } @@ -807,7 +885,7 @@ func TestSQLHostAllowlist(t *testing.T) { // Search for hosts using different modes. Should have 2 hosts in total, 0 // allowed ones and 2 blocked ones. - if err := assertSearch(2, 0, 2); err != nil { + if err := assertHosts(2, 0, 2); err != nil { t.Fatal(err) } @@ -819,7 +897,7 @@ func TestSQLHostAllowlist(t *testing.T) { // Search for hosts using different modes. Should have 2 hosts in total, 2 // allowed ones and 0 blocked ones. - if err := assertSearch(2, 2, 0); err != nil { + if err := assertHosts(2, 2, 0); err != nil { t.Fatal(err) } @@ -849,7 +927,15 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostOptions{ + AutopilotID: "", + FilterMode: api.HostFilterModeAllowed, + UsabilityMode: api.UsabilityFilterModeAll, + AddressContains: "", + KeyIn: nil, + Offset: 0, + Limit: -1, + }) if err != nil { t.Fatal(err) } @@ -1121,14 +1207,14 @@ func newTestScan(hk types.PublicKey, scanTime time.Time, settings rhpv2.HostSett func newTestHostCheck() api.HostCheck { return api.HostCheck{ - Gouging: api.HostGougingBreakdown{ + GougingBreakdown: api.HostGougingBreakdown{ ContractErr: "foo", DownloadErr: "bar", GougingErr: "baz", PruneErr: "qux", UploadErr: "quuz", }, - Score: api.HostScoreBreakdown{ + ScoreBreakdown: api.HostScoreBreakdown{ Age: .1, Collateral: .2, Interactions: .3, @@ -1137,7 +1223,7 @@ func newTestHostCheck() api.HostCheck { Version: .6, Prices: .7, }, - Usability: api.HostUsabilityBreakdown{ + UsabilityBreakdown: api.HostUsabilityBreakdown{ Blocked: false, Offline: false, LowScore: false, @@ -1176,18 +1262,6 @@ func (s *testSQLStore) addTestHosts(n int) (keys []types.PublicKey, err error) { return } -// addTestScan adds a host scan to the database. -func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, settings rhpv2.HostSettings) error { - return s.RecordHostScans(context.Background(), []api.HostScan{ - { - HostKey: hk, - Settings: settings, - Success: err == nil, - Timestamp: t, - }, - }) -} - // announceHost adds a host announcement to the database. func (s *testSQLStore) announceHost(hk types.PublicKey, na string) error { return s.db.Transaction(context.Background(), func(tx sql.DatabaseTx) error { diff --git a/stores/metadata.go b/stores/metadata.go index d1c5ab557..a1912fa1e 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -125,7 +125,7 @@ func (s *SQLStore) AddRenewal(ctx context.Context, c api.ContractMetadata) error // reinsert renewed contract renewed.ArchivalReason = api.ContractArchivalReasonRenewed renewed.RenewedTo = c.ID - return tx.InsertContract(ctx, renewed) + return tx.PutContract(ctx, renewed) }) } @@ -229,12 +229,6 @@ func (s *SQLStore) ContractSize(ctx context.Context, id types.FileContractID) (c return cs, err } -func (s *SQLStore) InsertContract(ctx context.Context, c api.ContractMetadata) error { - return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.InsertContract(ctx, c) - }) -} - func (s *SQLStore) PutContract(ctx context.Context, c api.ContractMetadata) error { return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { return tx.PutContract(ctx, c) @@ -318,25 +312,9 @@ func (s *SQLStore) RenewedContract(ctx context.Context, renewedFrom types.FileCo return } -func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) (objects []api.ObjectMetadata, err error) { - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - objects, err = tx.SearchObjects(ctx, bucket, substring, offset, limit) - return err - }) - return -} - -func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) (metadata []api.ObjectMetadata, hasMore bool, err error) { - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - metadata, hasMore, err = tx.ObjectEntries(ctx, bucket, path, prefix, sortBy, sortDir, marker, offset, limit) - return err - }) - return -} - -func (s *SQLStore) Object(ctx context.Context, bucket, path string) (obj api.Object, err error) { +func (s *SQLStore) Object(ctx context.Context, bucket, key string) (obj api.Object, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - obj, err = tx.Object(ctx, bucket, path) + obj, err = tx.Object(ctx, bucket, key) return err }) return @@ -482,7 +460,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo return } -func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { +func (s *SQLStore) UpdateObject(ctx context.Context, bucket, key, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { // Sanity check input. for _, s := range o.Slabs { for i, shard := range s.Shards { @@ -507,19 +485,19 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it var err error - prune, err = tx.DeleteObject(ctx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, key) if err != nil { return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } // create the dir - dirID, err := tx.MakeDirsForPath(ctx, path) + dirID, err := tx.MakeDirsForPath(ctx, key) if err != nil { - return fmt.Errorf("failed to create directories for path '%s': %w", path, err) + return fmt.Errorf("failed to create directories for key '%s': %w", key, err) } // Insert a new object. - err = tx.InsertObject(ctx, bucket, path, contractSet, dirID, o, mimeType, eTag, metadata) + err = tx.InsertObject(ctx, bucket, key, contractSet, dirID, o, mimeType, eTag, metadata) if err != nil { return fmt.Errorf("failed to insert object: %w", err) } @@ -534,16 +512,16 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return nil } -func (s *SQLStore) RemoveObject(ctx context.Context, bucket, path string) error { +func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { var prune bool err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - prune, err = tx.DeleteObject(ctx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, key) return }) if err != nil { return fmt.Errorf("RemoveObject: failed to delete object: %w", err) } else if !prune { - return fmt.Errorf("%w: key: %s", api.ErrObjectNotFound, path) + return fmt.Errorf("%w: key: %s", api.ErrObjectNotFound, key) } s.triggerSlabPruning() return nil @@ -650,9 +628,9 @@ func (s *SQLStore) UnhealthySlabs(ctx context.Context, healthCutoff float64, set } // ObjectMetadata returns an object's metadata -func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (obj api.Object, err error) { +func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, key string) (obj api.Object, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - obj, err = tx.ObjectMetadata(ctx, bucket, path) + obj, err = tx.ObjectMetadata(ctx, bucket, key) return err }) return @@ -806,12 +784,9 @@ func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []types } } -// TODO: we can use ObjectEntries instead of ListObject if we want to use '/' as -// a delimiter for now (see backend.go) but it would be interesting to have -// arbitrary 'delim' support in ListObjects. -func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { +func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - resp, err = tx.ListObjects(ctx, bucket, prefix, sortBy, sortDir, marker, limit) + resp, err = tx.ListObjects(ctx, bucket, prefix, substring, delim, sortBy, sortDir, marker, limit) return err }) return diff --git a/stores/metadata_test.go b/stores/metadata_test.go index bc67f5e0b..f30d06ec3 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -42,10 +42,10 @@ func (s *testSQLStore) InsertSlab(slab object.Slab) { } } -func (s *SQLStore) RemoveObjectBlocking(ctx context.Context, bucket, path string) error { +func (s *SQLStore) RemoveObjectBlocking(ctx context.Context, bucket, key string) error { ts := time.Now() time.Sleep(time.Millisecond) - if err := s.RemoveObject(ctx, bucket, path); err != nil { + if err := s.RemoveObject(ctx, bucket, key); err != nil { return err } return s.waitForPruneLoop(ts) @@ -136,9 +136,9 @@ func TestPrunableContractRoots(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hks[0], fcids[0], types.Hash256{byte(i)}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[0], fcids[0], types.Hash256{byte(i)}), }, }, }, @@ -216,20 +216,20 @@ func TestObjectBasic(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), }, Offset: 20, Length: 200, @@ -298,20 +298,20 @@ func TestObjectMetadata(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), }, Offset: 20, Length: 200, @@ -399,7 +399,7 @@ func TestSQLContractStore(t *testing.T) { Uploads: types.NewCurrency64(6), }, } - if err := ss.InsertContract(context.Background(), c); err != nil { + if err := ss.PutContract(context.Background(), c); err != nil { t.Fatal(err) } @@ -502,9 +502,9 @@ func TestContractRoots(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hks[0], fcids[0], types.Hash256{1}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[0], fcids[0], types.Hash256{1}), }, }, }, @@ -685,20 +685,20 @@ func TestSQLMetadataStore(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Health: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), }, Offset: 20, Length: 200, @@ -725,15 +725,15 @@ func TestSQLMetadataStore(t *testing.T) { } obj.ModTime = api.TimeRFC3339{} - obj1Slab0Key := obj1.Slabs[0].Key - obj1Slab1Key := obj1.Slabs[1].Key + obj1Slab0Key := obj1.Slabs[0].EncryptionKey + obj1Slab1Key := obj1.Slabs[1].EncryptionKey expectedObj := api.Object{ ObjectMetadata: api.ObjectMetadata{ ETag: testETag, Health: 1, ModTime: api.TimeRFC3339{}, - Name: objID, + Key: objID, Size: obj1.TotalSize(), MimeType: testMimeType, }, @@ -745,9 +745,9 @@ func TestSQLMetadataStore(t *testing.T) { Offset: 10, Length: 100, Slab: object.Slab{ - Health: 1, - Key: obj1Slab0Key, - MinShards: 1, + Health: 1, + EncryptionKey: obj1Slab0Key, + MinShards: 1, Shards: []object.Sector{ { LatestHost: hk1, @@ -763,9 +763,9 @@ func TestSQLMetadataStore(t *testing.T) { Offset: 20, Length: 200, Slab: object.Slab{ - Health: 1, - Key: obj1Slab1Key, - MinShards: 2, + Health: 1, + EncryptionKey: obj1Slab1Key, + MinShards: 2, Shards: []object.Sector{ { LatestHost: hk2, @@ -816,9 +816,9 @@ func TestSQLMetadataStore(t *testing.T) { } expectedObjSlab1 := object.Slab{ - Health: 1, - Key: obj1Slab0Key, - MinShards: 1, + Health: 1, + EncryptionKey: obj1Slab0Key, + MinShards: 1, Shards: []object.Sector{ { Contracts: map[types.PublicKey][]types.FileContractID{ @@ -839,9 +839,9 @@ func TestSQLMetadataStore(t *testing.T) { } expectedObjSlab2 := object.Slab{ - Health: 1, - Key: obj1Slab1Key, - MinShards: 2, + Health: 1, + EncryptionKey: obj1Slab1Key, + MinShards: 2, Shards: []object.Sector{ { Contracts: map[types.PublicKey][]types.FileContractID{ @@ -969,8 +969,8 @@ func TestObjectHealth(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{1}), newTestShard(hks[1], fcids[1], types.Hash256{2}), @@ -981,8 +981,8 @@ func TestObjectHealth(t *testing.T) { }, { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hks[1], fcids[1], types.Hash256{5}), newTestShard(hks[2], fcids[2], types.Hash256{6}), @@ -1029,7 +1029,8 @@ func TestObjectHealth(t *testing.T) { } // assert health is returned correctly by ObjectEntries - entries, _, err := ss.ObjectEntries(context.Background(), api.DefaultBucketName, "/", "", "", "", "", 0, -1) + resp, err := ss.ListObjects(context.Background(), api.DefaultBucketName, "/", "", "", "", "", "", -1) + entries := resp.Objects if err != nil { t.Fatal(err) } else if len(entries) != 1 { @@ -1039,10 +1040,10 @@ func TestObjectHealth(t *testing.T) { } // assert health is returned correctly by SearchObject - entries, err = ss.SearchObjects(context.Background(), api.DefaultBucketName, "foo", 0, -1) + resp, err = ss.ListObjects(context.Background(), api.DefaultBucketName, "/", "foo", "", "", "", "", -1) if err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("wrong number of entries", len(entries)) } else if entries[0].Health != expectedHealth { t.Fatal("wrong health", entries[0].Health) @@ -1080,13 +1081,14 @@ func TestObjectHealth(t *testing.T) { } } -// TestObjectEntries is a test for the ObjectEntries method. -func TestObjectEntries(t *testing.T) { +// TestListObjectsWithDelimiterSlash is a test for the +// TestListObjects method with '/' as the prefix. +func TestListObjectsWithDelimiterSlash(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/foo/bar", 1}, @@ -1106,7 +1108,7 @@ func TestObjectEntries(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - _, err := ss.addTestObject(o.path, obj) + _, err := ss.addTestObject(o.key, obj) if err != nil { t.Fatal(err) } @@ -1118,13 +1120,13 @@ func TestObjectEntries(t *testing.T) { t.Helper() for i := range entries { // assert mod time - if !strings.HasSuffix(entries[i].Name, "/") && entries[i].ModTime.IsZero() { + if !strings.HasSuffix(entries[i].Key, "/") && entries[i].ModTime.IsZero() { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") + isDir := strings.HasSuffix(entries[i].Key, "/") if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType != testMimeType) { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -1158,43 +1160,47 @@ func TestObjectEntries(t *testing.T) { sortDir string want []api.ObjectMetadata }{ - {"/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: .5}}}, - {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, - - {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - {"/", "F", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}}}, + {"/", "", "", "", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/foo/", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/", Size: 7, Health: .5}}}, + {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/gab/", "", "", "", []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/fileÅ›/", "", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/Å›pecial", Size: 6, Health: 1}}}, + + {"/", "f", "", "", []api.ObjectMetadata{{Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}}}, + {"/", "F", "", "", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}}}, {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, - {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}}}, + {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: .75}}}, {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, - {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, + {"/", "", "name", "ASC", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "name", "DESC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 7, Health: 1}}}, - {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "", "health", "ASC", []api.ObjectMetadata{{Key: "/foo/", Size: 10, Health: .5}, {Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "DESC", []api.ObjectMetadata{{Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}}}, - {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileÅ›/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "", "size", "DESC", []api.ObjectMetadata{{Key: "/foo/", Size: 10, Health: .5}, {Key: "/FOO/", Size: 7, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "size", "ASC", []api.ObjectMetadata{{Key: "/gab/", Size: 5, Health: 1}, {Key: "/fileÅ›/", Size: 6, Health: 1}, {Key: "/FOO/", Size: 7, Health: 1}, {Key: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { - got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } + got := resp.Objects assertMetadata(got) if !(len(got) == 0 && len(test.want) == 0) && !reflect.DeepEqual(got, test.want) { t.Fatalf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) } + var marker string for offset := 0; offset < len(test.want); offset++ { - got, hasMore, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", offset, 1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } + marker = resp.NextMarker + got := resp.Objects assertMetadata(got) if len(got) != 1 || got[0] != test.want[offset] { @@ -1202,8 +1208,8 @@ func TestObjectEntries(t *testing.T) { } moreRemaining := len(test.want)-offset-1 > 0 - if hasMore != moreRemaining { - t.Fatalf("invalid value for hasMore (%t) at offset (%d) test (%+v)", hasMore, offset, test) + if resp.HasMore != moreRemaining { + t.Fatalf("invalid value for hasMore (%t) at offset (%d) test (%+v)", resp.HasMore, offset, test) } // make sure we stay within bounds @@ -1211,30 +1217,31 @@ func TestObjectEntries(t *testing.T) { continue } - got, hasMore, err = ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, test.want[offset].Name, 0, 1) + resp, err = ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, test.want[offset].Key, 1) if err != nil { t.Fatal(err) } + got = resp.Objects assertMetadata(got) if len(got) != 1 || got[0] != test.want[offset+1] { - t.Fatalf("\noffset: %v\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", offset+1, test.path, test.prefix, test.want[offset].Name, got, test.want[offset+1]) + t.Fatalf("\noffset: %v\nlist: %v\nprefix: %v\nmarker: %v\ngot: %v\nwant: %v", offset+1, test.path, test.prefix, test.want[offset].Key, got, test.want[offset+1]) } moreRemaining = len(test.want)-offset-2 > 0 - if hasMore != moreRemaining { - t.Fatalf("invalid value for hasMore (%t) at marker (%s) test (%+v)", hasMore, test.want[offset].Name, test) + if resp.HasMore != moreRemaining { + t.Fatalf("invalid value for hasMore (%t) at marker (%s) test (%+v)", resp.HasMore, test.want[offset].Key, test) } } } } -func TestObjectEntriesExplicitDir(t *testing.T) { +func TestListObjectsExplicitDir(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/dir/", 0}, // empty dir - created first @@ -1247,7 +1254,7 @@ func TestObjectEntriesExplicitDir(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - _, err := ss.addTestObject(o.path, obj) + _, err := ss.addTestObject(o.key, obj) if err != nil { t.Fatal(err) } @@ -1271,31 +1278,32 @@ func TestObjectEntriesExplicitDir(t *testing.T) { want []api.ObjectMetadata }{ {"/", "", "", "", []api.ObjectMetadata{ - {Name: "/dir/", Size: 1, Health: 0.5}, - {ETag: "d34db33f", Name: "/dir2/", Size: 2, Health: 1, MimeType: testMimeType}, // has MimeType and ETag since it's a file + {Key: "/dir/", Size: 1, Health: 0.5}, + {ETag: "d34db33f", Key: "/dir2/", Size: 2, Health: 1, MimeType: testMimeType}, // has MimeType and ETag since it's a file }}, - {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Name: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, + {"/dir/", "", "", "", []api.ObjectMetadata{{ETag: "d34db33f", Key: "/dir/file", Size: 1, Health: 0.5, MimeType: testMimeType}}}, } for _, test := range tests { - got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) + got, err := ss.ListObjects(ctx, api.DefaultBucketName, test.path+test.prefix, "", "/", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } - for i := range got { - got[i].ModTime = api.TimeRFC3339{} // ignore time for comparison + for i := range got.Objects { + got.Objects[i].ModTime = api.TimeRFC3339{} // ignore time for comparison } - if !reflect.DeepEqual(got, test.want) { + if !reflect.DeepEqual(got.Objects, test.want) { t.Fatalf("\nlist: %v\nprefix: %v\ngot: %v\nwant: %v", test.path, test.prefix, got, test.want) } } } -// TestSearchObjects is a test for the SearchObjects method. -func TestSearchObjects(t *testing.T) { +// TestListObjectsSubstring is a test for the ListObjects fuzzy +// search via the "substring" argument. +func TestListObjectsSubstring(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/foo/bar", 1}, @@ -1310,14 +1318,14 @@ func TestSearchObjects(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - if _, err := ss.addTestObject(o.path, obj); err != nil { + if _, err := ss.addTestObject(o.key, obj); err != nil { t.Fatal(err) } } metadataEquals := func(got api.ObjectMetadata, want api.ObjectMetadata) bool { t.Helper() - return got.Name == want.Name && + return got.Key == want.Key && got.Size == want.Size && got.Health == want.Health } @@ -1335,27 +1343,31 @@ func TestSearchObjects(t *testing.T) { } tests := []struct { - path string + key string want []api.ObjectMetadata }{ - {"/", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/foo/b", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"o/baz/quu", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, - {"uu", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/foo/b", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"o/baz/quu", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"uu", []api.ObjectMetadata{{Key: "/foo/baz/quux", Size: 3, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, } for _, test := range tests { - got, err := ss.SearchObjects(ctx, api.DefaultBucketName, test.path, 0, -1) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.key, "", "", "", "", -1) if err != nil { t.Fatal(err) } + got := resp.Objects assertEqual(got, test.want) + var marker string for offset := 0; offset < len(test.want); offset++ { - if got, err := ss.SearchObjects(ctx, api.DefaultBucketName, test.path, offset, 1); err != nil { + if resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", test.key, "", "", "", marker, 1); err != nil { t.Fatal(err) - } else if len(got) != 1 { - t.Errorf("\nkey: %v unexpected number of objects, %d != 1", test.path, len(got)) + } else if got := resp.Objects; len(got) != 1 { + t.Errorf("\nkey: %v unexpected number of objects, %d != 1", test.key, len(got)) } else if !metadataEquals(got[0], test.want[offset]) { - t.Errorf("\nkey: %v\ngot: %v\nwant: %v", test.path, got, test.want[offset]) + t.Errorf("\nkey: %v\ngot: %v\nwant: %v", test.key, got, test.want[offset]) + } else { + marker = resp.NextMarker } } } @@ -1394,8 +1406,8 @@ func TestUnhealthySlabs(t *testing.T) { // good slab { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{1}), newTestShard(hk2, fcid2, types.Hash256{2}), @@ -1406,8 +1418,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - hk4 is bad (1/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{4}), newTestShard(hk2, fcid2, types.Hash256{5}), @@ -1418,8 +1430,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - hk4 is bad (2/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{7}), newTestShard(hk4, fcid4, types.Hash256{8}), @@ -1430,8 +1442,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - hk5 is deleted (1/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{10}), newTestShard(hk2, fcid2, types.Hash256{11}), @@ -1442,8 +1454,8 @@ func TestUnhealthySlabs(t *testing.T) { // unhealthy slab - h1 is reused { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{13}), newTestShard(hk1, fcid4, types.Hash256{14}), @@ -1454,8 +1466,8 @@ func TestUnhealthySlabs(t *testing.T) { // lost slab - no good pieces (0/3) { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{16}), newTestShard(hk2, fcid2, types.Hash256{17}), @@ -1488,10 +1500,10 @@ func TestUnhealthySlabs(t *testing.T) { } expected := []api.UnhealthySlab{ - {Key: obj.Slabs[2].Key, Health: 0}, - {Key: obj.Slabs[4].Key, Health: 0}, - {Key: obj.Slabs[1].Key, Health: 0.5}, - {Key: obj.Slabs[3].Key, Health: 0.5}, + {EncryptionKey: obj.Slabs[2].EncryptionKey, Health: 0}, + {EncryptionKey: obj.Slabs[4].EncryptionKey, Health: 0}, + {EncryptionKey: obj.Slabs[1].EncryptionKey, Health: 0.5}, + {EncryptionKey: obj.Slabs[3].EncryptionKey, Health: 0.5}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order") @@ -1509,8 +1521,8 @@ func TestUnhealthySlabs(t *testing.T) { } expected = []api.UnhealthySlab{ - {Key: obj.Slabs[2].Key, Health: 0}, - {Key: obj.Slabs[4].Key, Health: 0}, + {EncryptionKey: obj.Slabs[2].EncryptionKey, Health: 0}, + {EncryptionKey: obj.Slabs[4].EncryptionKey, Health: 0}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order", slabs, expected) @@ -1559,8 +1571,8 @@ func TestUnhealthySlabsNegHealth(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{1}), newTestShard(hk1, fcid1, types.Hash256{2}), @@ -1618,9 +1630,9 @@ func TestUnhealthySlabsNoContracts(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, }, }, @@ -1699,16 +1711,16 @@ func TestUnhealthySlabsNoRedundancy(t *testing.T) { // hk1 is good so this slab should have full health. { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), }, }, // hk4 is bad so this slab should have no health. { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, Shards: []object.Sector{ newTestShard(hk2, fcid2, types.Hash256{2}), newTestShard(hk3, fcid3, types.Hash256{4}), @@ -1734,7 +1746,7 @@ func TestUnhealthySlabsNoRedundancy(t *testing.T) { } expected := []api.UnhealthySlab{ - {Key: obj.Slabs[1].Slab.Key, Health: -1}, + {EncryptionKey: obj.Slabs[1].Slab.EncryptionKey, Health: -1}, } if !reflect.DeepEqual(slabs, expected) { t.Fatal("slabs are not returned in the correct order") @@ -1826,8 +1838,8 @@ func TestUpdateSlab(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, types.Hash256{1}), newTestShard(hk2, fcid2, types.Hash256{2}), @@ -1842,7 +1854,7 @@ func TestUpdateSlab(t *testing.T) { } // extract the slab key - key, err := obj.Slabs[0].Key.MarshalBinary() + key, err := obj.Slabs[0].EncryptionKey.MarshalBinary() if err != nil { t.Fatal(err) } @@ -1850,7 +1862,7 @@ func TestUpdateSlab(t *testing.T) { // helper to fetch a slab from the database fetchSlab := func() (slab object.Slab) { t.Helper() - if slab, err = ss.Slab(ctx, obj.Slabs[0].Key); err != nil { + if slab, err = ss.Slab(ctx, obj.Slabs[0].EncryptionKey); err != nil { t.Fatal(err) } return @@ -1964,8 +1976,8 @@ func TestUpdateSlab(t *testing.T) { t.Fatal(err) } else if len(obj.Slabs) != 1 { t.Fatalf("unexpected number of slabs, %v != 1", len(obj.Slabs)) - } else if obj.Slabs[0].Key.String() != updated.Key.String() { - t.Fatalf("unexpected slab, %v != %v", obj.Slabs[0].Key, updated.Key) + } else if obj.Slabs[0].EncryptionKey.String() != updated.EncryptionKey.String() { + t.Fatalf("unexpected slab, %v != %v", obj.Slabs[0].EncryptionKey, updated.EncryptionKey) } // update the slab to change its contract set. @@ -1996,9 +2008,9 @@ func newTestObject(slabs int) object.Object { length := offset + uint32(frand.Uint64n(1<<22)) obj.Slabs[i] = object.SlabSlice{ Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: n, - Shards: make([]object.Sector, n*2), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: n, + Shards: make([]object.Sector, n*2), }, Offset: offset, Length: length, @@ -2178,18 +2190,18 @@ func TestRenameObjects(t *testing.T) { } // Assert that number of objects matches. - objs, err := ss.SearchObjects(ctx, api.DefaultBucketName, "/", 0, 100) + resp, err := ss.ListObjects(ctx, api.DefaultBucketName, "", "/", "", "", "", "", 100) if err != nil { t.Fatal(err) } - if len(objs) != len(objectsAfter) { - t.Fatal("unexpected number of objects", len(objs), len(objectsAfter)) + if len(resp.Objects) != len(objectsAfter) { + t.Fatal("unexpected number of objects", len(resp.Objects), len(objectsAfter)) } // Assert paths are correct. - for _, obj := range objs { - if _, exists := objectsAfterMap[obj.Name]; !exists { - t.Fatal("unexpected path", obj.Name) + for _, obj := range resp.Objects { + if _, exists := objectsAfterMap[obj.Key]; !exists { + t.Fatal("unexpected path", obj.Key) } } @@ -2432,7 +2444,7 @@ func TestPartialSlab(t *testing.T) { } else if bufferSize != rhpv2.SectorSize { t.Fatal("unexpected buffer size", bufferSize) } - data, err := ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length) + data, err := ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, slab1Data) { @@ -2457,7 +2469,7 @@ func TestPartialSlab(t *testing.T) { return } - buffer := fetchBuffer(slabs[0].Key) + buffer := fetchBuffer(slabs[0].EncryptionKey) if buffer.Filename == "" { t.Fatal("empty filename") } @@ -2471,9 +2483,9 @@ func TestPartialSlab(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ newTestShard(hk1, fcid1, frand.Entropy256()), newTestShard(hk2, fcid2, frand.Entropy256()), @@ -2509,7 +2521,7 @@ func TestPartialSlab(t *testing.T) { } else if bufferSize != rhpv2.SectorSize { t.Fatal("unexpected buffer size", bufferSize) } - data, err = ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length) + data, err = ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, slab2Data) { @@ -2544,16 +2556,16 @@ func TestPartialSlab(t *testing.T) { if bufferSize != 2*rhpv2.SectorSize { t.Fatal("unexpected buffer size", bufferSize) } - if data1, err := ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length); err != nil { + if data1, err := ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length); err != nil { t.Fatal(err) - } else if data2, err := ss.FetchPartialSlab(ctx, slabs[1].Key, slabs[1].Offset, slabs[1].Length); err != nil { + } else if data2, err := ss.FetchPartialSlab(ctx, slabs[1].EncryptionKey, slabs[1].Offset, slabs[1].Length); err != nil { t.Fatal(err) } else if !bytes.Equal(slab3Data, append(data1, data2...)) { t.Fatal("wrong data") } assertBuffer(buffer1Name, rhpv2.SectorSize, true, false) - buffer = fetchBuffer(slabs[1].Key) + buffer = fetchBuffer(slabs[1].EncryptionKey) buffer2Name := buffer.Filename assertBuffer(buffer2Name, 1, false, false) @@ -2578,7 +2590,7 @@ func TestPartialSlab(t *testing.T) { assertBuffer(buffer1Name, rhpv2.SectorSize, true, true) assertBuffer(buffer2Name, 1, false, false) - buffer = fetchBuffer(packedSlabs[0].Key) + buffer = fetchBuffer(packedSlabs[0].EncryptionKey) if buffer.ID != packedSlabs[0].BufferID { t.Fatalf("wrong buffer id, %v != %v", buffer.ID, packedSlabs[0].BufferID) } @@ -2597,13 +2609,13 @@ func TestPartialSlab(t *testing.T) { t.Fatal(err) } - buffer = fetchBuffer(packedSlabs[0].Key) + buffer = fetchBuffer(packedSlabs[0].EncryptionKey) if buffer != (bufferedSlab{}) { t.Fatal("shouldn't be able to find buffer", err) } assertBuffer(buffer2Name, 1, false, false) - _, err = ss.FetchPartialSlab(ctx, slabs[0].Key, slabs[0].Offset, slabs[0].Length) + _, err = ss.FetchPartialSlab(ctx, slabs[0].EncryptionKey, slabs[0].Offset, slabs[0].Length) if !errors.Is(err, api.ErrObjectNotFound) { t.Fatal("expected ErrObjectNotFound", err) } @@ -2719,9 +2731,9 @@ func TestContractSizes(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hks[i], fcids[i], types.Hash256{byte(i)}), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[i], fcids[i], types.Hash256{byte(i)}), }, }, }, @@ -2822,10 +2834,10 @@ func TestObjectsBySlabKey(t *testing.T) { // create a slab. slab := object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), } // Add 3 objects that all reference the slab. @@ -2847,13 +2859,13 @@ func TestObjectsBySlabKey(t *testing.T) { } // Fetch the objects by slab. - objs, err := ss.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, slab.Key) + objs, err := ss.ObjectsBySlabKey(context.Background(), api.DefaultBucketName, slab.EncryptionKey) if err != nil { t.Fatal(err) } for i, name := range []string{"obj1", "obj2", "obj3"} { - if objs[i].Name != name { - t.Fatal("unexpected object name", objs[i].Name, name) + if objs[i].Key != name { + t.Fatal("unexpected object name", objs[i].Key, name) } if objs[i].Size != int64(i)+1 { t.Fatal("unexpected object size", objs[i].Size, i+1) @@ -2955,30 +2967,30 @@ func TestBucketObjects(t *testing.T) { } // List the objects in the buckets. - if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } else if entries[0].Size != 1 { t.Fatal("unexpected size", entries[0].Size) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } else if entries[0].Size != 2 { t.Fatal("unexpected size", entries[0].Size) } // Search the objects in the buckets. - if objects, err := ss.SearchObjects(context.Background(), b1, "", 0, -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b1, "", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(objects) != 2 { + } else if objects := resp.Objects; len(objects) != 2 { t.Fatal("expected 2 objects", len(objects)) } else if objects[0].Size != 3 || objects[1].Size != 1 { t.Fatal("unexpected size", objects[0].Size, objects[1].Size) - } else if objects, err := ss.SearchObjects(context.Background(), b2, "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(objects) != 2 { + } else if objects := resp.Objects; len(objects) != 2 { t.Fatal("expected 2 objects", len(objects)) } else if objects[0].Size != 4 || objects[1].Size != 2 { t.Fatal("unexpected size", objects[0].Size, objects[1].Size) @@ -2987,35 +2999,35 @@ func TestBucketObjects(t *testing.T) { // Rename object foo/bar in bucket 1 to foo/baz but not in bucket 2. if err := ss.RenameObjectBlocking(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/baz" { - t.Fatal("unexpected name", entries[0].Name) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if entries[0].Key != "/foo/baz" { + t.Fatal("unexpected name", entries[0].Key) + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/bar" { - t.Fatal("unexpected name", entries[0].Name) + } else if entries[0].Key != "/foo/bar" { + t.Fatal("unexpected name", entries[0].Key) } // Rename foo/bar in bucket 2 using the batch rename. if err := ss.RenameObjectsBlocking(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/baz" { - t.Fatal("unexpected name", entries[0].Name) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if entries[0].Key != "/foo/baz" { + t.Fatal("unexpected name", entries[0].Key) + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/foo/bam" { - t.Fatal("unexpected name", entries[0].Name) + } else if entries[0].Key != "/foo/bam" { + t.Fatal("unexpected name", entries[0].Key) } // Delete foo/baz in bucket 1 but first try bucket 2 since that should fail. @@ -3023,30 +3035,30 @@ func TestBucketObjects(t *testing.T) { t.Fatal(err) } else if err := ss.RemoveObjectBlocking(context.Background(), b1, "/foo/baz"); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) > 0 { + } else if entries := resp.Objects; len(entries) > 0 { t.Fatal("expected 0 entries", len(entries)) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/foo/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/foo/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } // Delete all files in bucket 2. - if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/", "", "", "", "", 0, -1); err != nil { + if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 2 { + } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) } else if err := ss.RemoveObjectsBlocking(context.Background(), b2, "/"); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b2, "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b2, "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 0 { + } else if entries := resp.Objects; len(entries) != 0 { t.Fatal("expected 0 entries", len(entries)) - } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(context.Background(), b1, "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) } @@ -3062,11 +3074,11 @@ func TestBucketObjects(t *testing.T) { // See if we can fetch the object by slab. if obj, err := ss.Object(context.Background(), b1, "/bar"); err != nil { t.Fatal(err) - } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b1, obj.Slabs[0].Key); err != nil { + } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b1, obj.Slabs[0].EncryptionKey); err != nil { t.Fatal(err) } else if len(objects) != 1 { t.Fatal("expected 1 object", len(objects)) - } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b2, obj.Slabs[0].Key); err != nil { + } else if objects, err := ss.ObjectsBySlabKey(context.Background(), b2, obj.Slabs[0].EncryptionKey); err != nil { t.Fatal(err) } else if len(objects) != 0 { t.Fatal("expected 0 objects", len(objects)) @@ -3095,12 +3107,12 @@ func TestCopyObject(t *testing.T) { // Copy it within the same bucket. if om, err := ss.CopyObject(ctx, "src", "src", "/foo", "/bar", "", nil); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(ctx, "src", "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(ctx, "src", "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 2 { + } else if entries := resp.Objects; len(entries) != 2 { t.Fatal("expected 2 entries", len(entries)) - } else if entries[0].Name != "/bar" || entries[1].Name != "/foo" { - t.Fatal("unexpected names", entries[0].Name, entries[1].Name) + } else if entries[0].Key != "/bar" || entries[1].Key != "/foo" { + t.Fatal("unexpected names", entries[0].Key, entries[1].Key) } else if om.ModTime.IsZero() { t.Fatal("expected mod time to be set") } @@ -3108,12 +3120,12 @@ func TestCopyObject(t *testing.T) { // Copy it cross buckets. if om, err := ss.CopyObject(ctx, "src", "dst", "/foo", "/bar", "", nil); err != nil { t.Fatal(err) - } else if entries, _, err := ss.ObjectEntries(ctx, "dst", "/", "", "", "", "", 0, -1); err != nil { + } else if resp, err := ss.ListObjects(ctx, "dst", "/", "", "", "", "", "", -1); err != nil { t.Fatal(err) - } else if len(entries) != 1 { + } else if entries := resp.Objects; len(entries) != 1 { t.Fatal("expected 1 entry", len(entries)) - } else if entries[0].Name != "/bar" { - t.Fatal("unexpected names", entries[0].Name, entries[1].Name) + } else if entries[0].Key != "/bar" { + t.Fatal("unexpected names", entries[0].Key, entries[1].Key) } else if om.ModTime.IsZero() { t.Fatal("expected mod time to be set") } @@ -3183,11 +3195,11 @@ func TestMarkSlabUploadedAfterRenew(t *testing.T) { } } -func TestListObjects(t *testing.T) { +func TestListObjectsNoDelimiter(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() objects := []struct { - path string + key string size int64 }{ {"/foo/bar", 1}, @@ -3201,7 +3213,7 @@ func TestListObjects(t *testing.T) { // assert mod time & clear it afterwards so we can compare assertModTime := func(entries []api.ObjectMetadata) { for i := range entries { - if !strings.HasSuffix(entries[i].Name, "/") && entries[i].ModTime.IsZero() { + if !strings.HasSuffix(entries[i].Key, "/") && entries[i].ModTime.IsZero() { t.Fatal("mod time should be set") } entries[i].ModTime = api.TimeRFC3339{} @@ -3213,7 +3225,7 @@ func TestListObjects(t *testing.T) { obj := newTestObject(frand.Intn(9) + 1) obj.Slabs = obj.Slabs[:1] obj.Slabs[0].Length = uint32(o.size) - if _, err := ss.addTestObject(o.path, obj); err != nil { + if _, err := ss.addTestObject(o.key, obj); err != nil { t.Fatal(err) } } @@ -3238,16 +3250,16 @@ func TestListObjects(t *testing.T) { marker string want []api.ObjectMetadata }{ - {"/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", "ASC", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "", "DESC", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, - {"/", "health", "ASC", "", []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/", "health", "DESC", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/foo/b", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/", "", "", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", "ASC", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", "DESC", "", []api.ObjectMetadata{{Key: "/gab/guub", Size: 5, Health: 1}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/FOO/bar", Size: 6, Health: 1}}}, + {"/", "health", "ASC", "", []api.ObjectMetadata{{Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "health", "DESC", "", []api.ObjectMetadata{{Key: "/FOO/bar", Size: 6, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/gab/guub", Size: 5, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/foo/b", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, {"o/baz/quu", "", "", "", []api.ObjectMetadata{}}, - {"/foo", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/foo", "size", "ASC", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/foo", "size", "DESC", "", []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, + {"/foo", "", "", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/foo", "size", "ASC", "", []api.ObjectMetadata{{Key: "/foo/bar", Size: 1, Health: 1}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/foo", "size", "DESC", "", []api.ObjectMetadata{{Key: "/foo/baz/quuz", Size: 4, Health: .5}, {Key: "/foo/baz/quux", Size: 3, Health: .75}, {Key: "/foo/bat", Size: 2, Health: 1}, {Key: "/foo/bar", Size: 1, Health: 1}}}, } // set common fields for i := range tests { @@ -3257,7 +3269,7 @@ func TestListObjects(t *testing.T) { } } for _, test := range tests { - res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, test.sortBy, test.sortDir, "", -1) + res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", "", test.sortBy, test.sortDir, "", -1) if err != nil { t.Fatal(err) } @@ -3272,7 +3284,7 @@ func TestListObjects(t *testing.T) { if len(res.Objects) > 0 { marker := "" for offset := 0; offset < len(test.want); offset++ { - res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, test.sortBy, test.sortDir, marker, 1) + res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, "", "", test.sortBy, test.sortDir, marker, 1) if err != nil { t.Fatal(err) } @@ -3283,8 +3295,8 @@ func TestListObjects(t *testing.T) { got := res.Objects if len(got) != 1 { t.Fatalf("expected 1 object, got %v", len(got)) - } else if got[0].Name != test.want[offset].Name { - t.Fatalf("expected %v, got %v, offset %v, marker %v", test.want[offset].Name, got[0].Name, offset, marker) + } else if got[0].Key != test.want[offset].Key { + t.Fatalf("expected %v, got %v, offset %v, marker %v", test.want[offset].Key, got[0].Key, offset, marker) } marker = res.NextMarker } @@ -3312,8 +3324,8 @@ func TestDeleteHostSector(t *testing.T) { // create a healthy slab with one sector that is uploaded to all contracts. root := types.Hash256{1, 2, 3} ss.InsertSlab(object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: 1, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, Shards: []object.Sector{ { Contracts: map[types.PublicKey][]types.FileContractID{ @@ -3480,9 +3492,9 @@ func TestUpdateSlabSanityChecks(t *testing.T) { shards = append(shards, newTestShard(hks[i], contracts[i].ID, types.Hash256{byte(i + 1)})) } slab := object.Slab{ - Key: object.GenerateEncryptionKey(), - Shards: shards, - Health: 1, + EncryptionKey: object.GenerateEncryptionKey(), + Shards: shards, + Health: 1, } // set slab. @@ -3495,7 +3507,7 @@ func TestUpdateSlabSanityChecks(t *testing.T) { } // verify slab. - rSlab, err := ss.Slab(context.Background(), slab.Key) + rSlab, err := ss.Slab(context.Background(), slab.EncryptionKey) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(slab, rSlab) { @@ -3504,8 +3516,8 @@ func TestUpdateSlabSanityChecks(t *testing.T) { // change the length to fail the update. if err := ss.UpdateSlab(context.Background(), object.Slab{ - Key: slab.Key, - Shards: shards[:len(shards)-1], + EncryptionKey: slab.EncryptionKey, + Shards: shards[:len(shards)-1], }, testContractSet); !errors.Is(err, isql.ErrInvalidNumberOfShards) { t.Fatal(err) } @@ -3517,8 +3529,8 @@ func TestUpdateSlabSanityChecks(t *testing.T) { reversedShards[i], reversedShards[j] = reversedShards[j], reversedShards[i] } reversedSlab := object.Slab{ - Key: slab.Key, - Shards: reversedShards, + EncryptionKey: slab.EncryptionKey, + Shards: reversedShards, } if err := ss.UpdateSlab(context.Background(), reversedSlab, testContractSet); !errors.Is(err, isql.ErrShardRootChanged) { t.Fatal(err) @@ -3572,7 +3584,7 @@ func TestSlabHealthInvalidation(t *testing.T) { _, err = ss.addTestObject("o1", object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: s1, + EncryptionKey: s1, Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{0}), newTestShard(hks[1], fcids[1], types.Hash256{1}), @@ -3588,7 +3600,7 @@ func TestSlabHealthInvalidation(t *testing.T) { err = ss.UpdateObject(context.Background(), api.DefaultBucketName, "o2", testContractSet, testETag, testMimeType, testMetadata, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: s2, + EncryptionKey: s2, Shards: []object.Sector{ newTestShard(hks[2], fcids[2], types.Hash256{2}), newTestShard(hks[3], fcids[3], types.Hash256{3}), @@ -3718,8 +3730,8 @@ func TestRefreshHealth(t *testing.T) { if added, err := ss.addTestObject(o1, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - MinShards: 2, - Key: object.GenerateEncryptionKey(), + MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{0}), newTestShard(hks[1], fcids[1], types.Hash256{1}), @@ -3737,8 +3749,8 @@ func TestRefreshHealth(t *testing.T) { if added, err := ss.addTestObject(o2, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - MinShards: 2, - Key: object.GenerateEncryptionKey(), + MinShards: 2, + EncryptionKey: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[4], fcids[4], types.Hash256{4}), newTestShard(hks[5], fcids[5], types.Hash256{5}), @@ -3946,8 +3958,8 @@ func TestUpdateObjectReuseSlab(t *testing.T) { Offset: 0, Length: uint32(minShards) * rhpv2.SectorSize, Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: uint8(minShards), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), }, }) } @@ -4098,7 +4110,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { t.Fatal("invalid minShards", slab.MinShards) } else if slab.TotalShards != uint8(totalShards) { t.Fatal("invalid totalShards", slab.TotalShards) - } else if slab.Key.String() != obj.Slabs[i].Key.String() { + } else if slab.Key.String() != obj.Slabs[i].EncryptionKey.String() { t.Fatal("wrong key") } @@ -4128,8 +4140,8 @@ func TestUpdateObjectReuseSlab(t *testing.T) { Offset: 0, Length: uint32(minShards) * rhpv2.SectorSize, Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), - MinShards: uint8(minShards), + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), }, }) // 30 shards each @@ -4201,7 +4213,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { t.Fatal("invalid minShards", slab2.MinShards) } else if slab2.TotalShards != uint8(totalShards) { t.Fatal("invalid totalShards", slab2.TotalShards) - } else if slab2.Key.String() != obj2.Slabs[0].Key.String() { + } else if slab2.Key.String() != obj2.Slabs[0].EncryptionKey.String() { t.Fatal("wrong key") } @@ -4303,20 +4315,20 @@ func TestUpdateObjectParallel(t *testing.T) { Slabs: []object.SlabSlice{ { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 1, - Shards: newTestShards(hk1, fcid1, frand.Entropy256()), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, frand.Entropy256()), }, Offset: 10, Length: 100, }, { Slab: object.Slab{ - Health: 1.0, - Key: object.GenerateEncryptionKey(), - MinShards: 2, - Shards: newTestShards(hk2, fcid2, frand.Entropy256()), + Health: 1.0, + EncryptionKey: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, frand.Entropy256()), }, Offset: 20, Length: 200, diff --git a/stores/multipart.go b/stores/multipart.go index ec987619f..532942303 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -10,10 +10,10 @@ import ( sql "go.sia.tech/renterd/stores/sql" ) -func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) { +func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) { var uploadID string err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { - uploadID, err = tx.InsertMultipartUpload(ctx, bucket, path, ec, mimeType, metadata) + uploadID, err = tx.InsertMultipartUpload(ctx, bucket, key, ec, mimeType, metadata) return }) if err != nil { @@ -24,9 +24,9 @@ func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, path strin }, err } -func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, key, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.AddMultipartPart(ctx, bucket, path, contractSet, eTag, uploadID, partNumber, slices) + return tx.AddMultipartPart(ctx, bucket, key, contractSet, eTag, uploadID, partNumber, slices) }) } @@ -54,9 +54,9 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri return resp, err } -func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { +func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error { err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.AbortMultipartUpload(ctx, bucket, path, uploadID) + return tx.AbortMultipartUpload(ctx, bucket, key, uploadID) }) if err != nil { return err @@ -65,7 +65,7 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string return nil } -func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path string, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) { +func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, key string, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) { // Sanity check input parts. if !sort.SliceIsSorted(parts, func(i, j int) bool { return parts[i].PartNumber < parts[j].PartNumber @@ -82,13 +82,13 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str var prune bool err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { // Delete potentially existing object. - prune, err = tx.DeleteObject(ctx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, key) if err != nil { return fmt.Errorf("failed to delete object: %w", err) } // Complete upload - eTag, err = tx.CompleteMultipartUpload(ctx, bucket, path, uploadID, parts, opts) + eTag, err = tx.CompleteMultipartUpload(ctx, bucket, key, uploadID, parts, opts) if err != nil { return fmt.Errorf("failed to complete multipart upload: %w", err) } diff --git a/stores/settings.go b/stores/settings.go new file mode 100644 index 000000000..471c013fb --- /dev/null +++ b/stores/settings.go @@ -0,0 +1,107 @@ +package stores + +import ( + "context" + "encoding/json" + "fmt" + + "go.sia.tech/renterd/api" + sql "go.sia.tech/renterd/stores/sql" +) + +const ( + SettingGouging = "gouging" + SettingPinned = "pinned" + SettingS3 = "s3" + SettingUpload = "upload" +) + +func (s *SQLStore) GougingSettings(ctx context.Context) (gs api.GougingSettings, err error) { + err = s.fetchSetting(ctx, SettingGouging, &gs) + return +} + +func (s *SQLStore) UpdateGougingSettings(ctx context.Context, gs api.GougingSettings) error { + return s.updateSetting(ctx, SettingGouging, gs) +} + +func (s *SQLStore) PinnedSettings(ctx context.Context) (ps api.PinnedSettings, err error) { + err = s.fetchSetting(ctx, SettingPinned, &ps) + return +} + +func (s *SQLStore) UpdatePinnedSettings(ctx context.Context, ps api.PinnedSettings) error { + return s.updateSetting(ctx, SettingPinned, ps) +} + +func (s *SQLStore) UploadSettings(ctx context.Context) (us api.UploadSettings, err error) { + err = s.fetchSetting(ctx, SettingUpload, &us) + return +} + +func (s *SQLStore) UpdateUploadSettings(ctx context.Context, us api.UploadSettings) error { + return s.updateSetting(ctx, SettingUpload, us) +} + +func (s *SQLStore) S3Settings(ctx context.Context) (ss api.S3Settings, err error) { + err = s.fetchSetting(ctx, SettingS3, &ss) + return +} + +func (s *SQLStore) UpdateS3Settings(ctx context.Context, ss api.S3Settings) error { + return s.updateSetting(ctx, SettingS3, ss) +} + +func (s *SQLStore) fetchSetting(ctx context.Context, key string, out interface{}) error { + s.settingsMu.Lock() + defer s.settingsMu.Unlock() + + // fetch setting from cache + value, ok := s.settings[key] + if ok { + _ = json.Unmarshal([]byte(value), &out) // cached values are always valid json + return nil + } + + // fetch setting from database + var err error + if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + value, err = tx.Setting(ctx, key) + return err + }); err != nil { + return err + } + + // unmarshal setting + if err := json.Unmarshal([]byte(value), &out); err != nil { + return fmt.Errorf("failed to unmarshal setting '%s', err: %v", key, err) + } + + // update cache + s.settings[key] = value + + return nil +} + +func (s *SQLStore) updateSetting(ctx context.Context, key string, value any) error { + s.settingsMu.Lock() + defer s.settingsMu.Unlock() + + // marshal the value + b, err := json.Marshal(value) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + + // update db first + err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateSetting(ctx, key, string(b)) + }) + if err != nil { + return err + } + + // update cache second + s.settings[key] = string(b) + return nil +} diff --git a/stores/settingsdb.go b/stores/settingsdb.go deleted file mode 100644 index 7a895108c..000000000 --- a/stores/settingsdb.go +++ /dev/null @@ -1,75 +0,0 @@ -package stores - -import ( - "context" - "fmt" - - sql "go.sia.tech/renterd/stores/sql" -) - -// DeleteSetting implements the bus.SettingStore interface. -func (s *SQLStore) DeleteSetting(ctx context.Context, key string) error { - s.settingsMu.Lock() - defer s.settingsMu.Unlock() - - // delete from database first - if err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.DeleteSettings(ctx, key) - }); err != nil { - return err - } - - // delete from cache - delete(s.settings, key) - return nil -} - -// Setting implements the bus.SettingStore interface. -func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { - // Check cache first. - s.settingsMu.Lock() - defer s.settingsMu.Unlock() - value, ok := s.settings[key] - if ok { - return value, nil - } - - // Check database. - var err error - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - value, err = tx.Setting(ctx, key) - return err - }) - if err != nil { - return "", fmt.Errorf("failed to fetch setting from db: %w", err) - } - s.settings[key] = value - return value, nil -} - -// Settings implements the bus.SettingStore interface. -func (s *SQLStore) Settings(ctx context.Context) (settings []string, err error) { - err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - settings, err = tx.Settings(ctx) - return err - }) - return -} - -// UpdateSetting implements the bus.SettingStore interface. -func (s *SQLStore) UpdateSetting(ctx context.Context, key, value string) error { - // update db first - s.settingsMu.Lock() - defer s.settingsMu.Unlock() - - err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.UpdateSetting(ctx, key, value) - }) - if err != nil { - return err - } - - // update cache second - s.settings[key] = value - return nil -} diff --git a/stores/settingsdb_test.go b/stores/settingsdb_test.go deleted file mode 100644 index cf2582579..000000000 --- a/stores/settingsdb_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package stores - -import ( - "context" - "errors" - "testing" - - "go.sia.tech/renterd/api" -) - -// TestSQLSettingStore tests the bus.SettingStore methods on the SQLSettingStore. -func TestSQLSettingStore(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - // assert there are no settings - ctx := context.Background() - if keys, err := ss.Settings(ctx); err != nil { - t.Fatal(err) - } else if len(keys) != 0 { - t.Fatalf("unexpected number of settings, %v != 0", len(keys)) - } - - // add a setting - if err := ss.UpdateSetting(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } - - // assert it's returned - if keys, err := ss.Settings(ctx); err != nil { - t.Fatal(err) - } else if len(keys) != 1 { - t.Fatalf("unexpected number of settings, %v != 1", len(keys)) - } else if keys[0] != "foo" { - t.Fatalf("unexpected key, %s != 'foo'", keys[0]) - } - - // assert we can query the setting by key - if value, err := ss.Setting(ctx, "foo"); err != nil { - t.Fatal(err) - } else if value != "bar" { - t.Fatalf("unexpected value, %s != 'bar'", value) - } - - // assert we can update the setting - if err := ss.UpdateSetting(ctx, "foo", "barbaz"); err != nil { - t.Fatal(err) - } else if value, err := ss.Setting(ctx, "foo"); err != nil { - t.Fatal(err) - } else if value != "barbaz" { - t.Fatalf("unexpected value, %s != 'barbaz'", value) - } - - // delete the setting - if err := ss.DeleteSetting(ctx, "foo"); err != nil { - t.Fatal(err) - } else if _, err := ss.Setting(ctx, "foo"); !errors.Is(err, api.ErrSettingNotFound) { - t.Fatal("should fail with api.ErrSettingNotFound", err) - } else if keys, err := ss.Settings(ctx); err != nil { - t.Fatal(err) - } else if len(keys) != 0 { - t.Fatalf("unexpected number of settings, %v != 0", len(keys)) - } -} diff --git a/stores/slabbuffer.go b/stores/slabbuffer.go index 5e8a542b8..7e61480e1 100644 --- a/stores/slabbuffer.go +++ b/stores/slabbuffer.go @@ -334,9 +334,9 @@ func (mgr *SlabBufferManager) SlabsForUpload(ctx context.Context, lockingDuratio return nil, err } slabs = append(slabs, api.PackedSlab{ - BufferID: buffer.dbID, - Data: data, - Key: buffer.slabKey, + BufferID: buffer.dbID, + Data: data, + EncryptionKey: buffer.slabKey, }) if len(slabs) == limit { break diff --git a/stores/sql.go b/stores/sql.go index 50533768d..14216ce32 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -31,6 +31,10 @@ type ( LongTxDuration time.Duration } + Explorer interface { + Enabled() bool + } + // SQLStore is a helper type for interacting with a SQL-based backend. SQLStore struct { alerts alerts.Alerter diff --git a/stores/sql/database.go b/stores/sql/database.go index c23089120..876d9786f 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -150,14 +150,21 @@ type ( // prefix and returns 'true' if any object was deleted. DeleteObjects(ctx context.Context, bucket, prefix string, limit int64) (bool, error) - // DeleteSettings deletes the settings with the given key. - DeleteSettings(ctx context.Context, key string) error + // DeleteSetting deletes the setting with the given key. + DeleteSetting(ctx context.Context, key string) error // DeleteWebhook deletes the webhook with the matching module, event and // URL of the provided webhook. If the webhook doesn't exist, // webhooks.ErrWebhookNotFound is returned. DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error + // Hosts returns a list of hosts that match the provided filters + Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) + + // HostsForScanning returns a list of hosts to scan which haven't been + // scanned since at least maxLastScan. + HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) + // HostAllowlist returns the list of public keys of hosts on the // allowlist. HostAllowlist(ctx context.Context) ([]types.PublicKey, error) @@ -171,12 +178,9 @@ type ( // that was created. InsertBufferedSlab(ctx context.Context, fileName string, contractSetID int64, ec object.EncryptionKey, minShards, totalShards uint8) (int64, error) - // InsertContract creates a new contract with the given metadata. - InsertContract(ctx context.Context, c api.ContractMetadata) error - // InsertMultipartUpload creates a new multipart upload and returns a // unique upload ID. - InsertMultipartUpload(ctx context.Context, bucket, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) + InsertMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) // InsertObject inserts a new object into the database. InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error @@ -185,15 +189,11 @@ type ( // are associated with any of the provided contracts. InvalidateSlabHealthByFCID(ctx context.Context, fcids []types.FileContractID, limit int64) (int64, error) - // HostsForScanning returns a list of hosts to scan which haven't been - // scanned since at least maxLastScan. - HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) - // ListBuckets returns a list of all buckets in the database. ListBuckets(ctx context.Context) ([]api.Bucket, error) // ListObjects returns a list of objects from the given bucket. - ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) + ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) // MakeDirsForPath creates all directories for a given object's path. MakeDirsForPath(ctx context.Context, path string) (int64, error) @@ -217,9 +217,6 @@ type ( // Object returns an object from the database. Object(ctx context.Context, bucket, key string) (api.Object, error) - // ObjectEntries queries the database for objects in a given dir. - ObjectEntries(ctx context.Context, bucket, key, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) - // ObjectMetadata returns an object's metadata. ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) @@ -312,24 +309,9 @@ type ( // existing ones. SaveAccounts(ctx context.Context, accounts []api.Account) error - // SearchHosts returns a list of hosts that match the provided filters - SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) - - // SearchObjects returns a list of objects that contain the provided - // substring. - SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) - - // UpdateContractSet adds/removes the provided contract ids to/from - // the contract set. The contract set is created in the process if - // it doesn't exist already. - UpdateContractSet(ctx context.Context, name string, toAdd, toRemove []types.FileContractID) error - // Setting returns the setting with the given key from the database. Setting(ctx context.Context, key string) (string, error) - // Settings returns all available settings from the database. - Settings(ctx context.Context) ([]string, error) - // Slab returns the slab with the given ID or api.ErrSlabNotFound. Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) @@ -358,6 +340,11 @@ type ( // UpdateContract sets the given metadata on the contract with given fcid. UpdateContract(ctx context.Context, fcid types.FileContractID, c api.ContractMetadata) error + // UpdateContractSet adds/removes the provided contract ids to/from + // the contract set. The contract set is created in the process if + // it doesn't exist already. + UpdateContractSet(ctx context.Context, name string, toAdd, toRemove []types.FileContractID) error + // UpdateHostAllowlistEntries updates the allowlist in the database UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error diff --git a/stores/sql/main.go b/stores/sql/main.go index d2305f1f3..d2b3aead3 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -29,7 +29,9 @@ import ( ) var ( - ErrNegativeOffset = errors.New("offset can not be negative") + ErrNegativeOffset = errors.New("offset can not be negative") + ErrMissingAutopilotID = errors.New("missing autopilot id") + ErrSettingNotFound = errors.New("setting not found") ) // helper types @@ -417,7 +419,7 @@ func CopyObject(ctx context.Context, tx sql.Tx, srcBucket, dstBucket, srcKey, ds // helper to fetch metadata fetchMetadata := func(objID int64) (om api.ObjectMetadata, err error) { err = tx.QueryRow(ctx, "SELECT etag, health, created_at, object_id, size, mime_type FROM objects WHERE id = ?", objID). - Scan(&om.ETag, &om.Health, (*time.Time)(&om.ModTime), &om.Name, &om.Size, &om.MimeType) + Scan(&om.ETag, &om.Health, (*time.Time)(&om.ModTime), &om.Key, &om.Size, &om.MimeType) if err != nil { return api.ObjectMetadata{}, fmt.Errorf("failed to fetch new object: %w", err) } @@ -571,7 +573,7 @@ func DeleteMetadata(ctx context.Context, tx sql.Tx, objID int64) error { return err } -func DeleteSettings(ctx context.Context, tx sql.Tx, key string) error { +func DeleteSetting(ctx context.Context, tx sql.Tx, key string) error { if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", key); err != nil { return fmt.Errorf("failed to delete setting '%s': %w", key, err) } @@ -685,6 +687,239 @@ func HostBlocklist(ctx context.Context, tx sql.Tx) ([]string, error) { return blocklist, nil } +func Hosts(ctx context.Context, tx sql.Tx, opts api.HostOptions) ([]api.Host, error) { + if opts.Offset < 0 { + return nil, ErrNegativeOffset + } else if opts.AutopilotID == "" && opts.UsabilityMode != "" && opts.UsabilityMode != api.UsabilityFilterModeAll { + return nil, fmt.Errorf("%w: have to specify autopilot id when filter mode isn't 'all'", ErrMissingAutopilotID) + } + + var hasAllowlist, hasBlocklist bool + if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_allowlist_entries)").Scan(&hasAllowlist); err != nil { + return nil, fmt.Errorf("failed to check for allowlist: %w", err) + } else if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_blocklist_entries)").Scan(&hasBlocklist); err != nil { + return nil, fmt.Errorf("failed to check for blocklist: %w", err) + } + + // validate filterMode + switch opts.FilterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + default: + return nil, fmt.Errorf("invalid filter mode: %v", opts.FilterMode) + } + + var whereExprs []string + var args []any + + // fetch autopilot id + var autopilotID int64 + if opts.AutopilotID != "" { + if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", opts.AutopilotID). + Scan(&autopilotID); errors.Is(err, dsql.ErrNoRows) { + return nil, api.ErrAutopilotNotFound + } else if err != nil { + return nil, fmt.Errorf("failed to fetch autopilot id: %w", err) + } + } + + // filter allowlist/blocklist + switch opts.FilterMode { + case api.HostFilterModeAllowed: + if hasAllowlist { + whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + case api.HostFilterModeBlocked: + if hasAllowlist { + whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if !hasAllowlist && !hasBlocklist { + // if neither an allowlist nor a blocklist exist, all hosts are + // allowed which means we return none + return []api.Host{}, nil + } + } + + // filter address + if opts.AddressContains != "" { + whereExprs = append(whereExprs, "h.net_address LIKE ?") + args = append(args, "%"+opts.AddressContains+"%") + } + + // filter public key + if len(opts.KeyIn) > 0 { + pubKeys := make([]any, len(opts.KeyIn)) + for i, pk := range opts.KeyIn { + pubKeys[i] = PublicKey(pk) + } + placeholders := strings.Repeat("?, ", len(opts.KeyIn)-1) + "?" + whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) + args = append(args, pubKeys...) + } + + // filter usability + whereApExpr := "" + if opts.AutopilotID != "" { + whereApExpr = "AND hc.db_autopilot_id = ?" + + switch opts.UsabilityMode { + case api.UsabilityFilterModeUsable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) + args = append(args, autopilotID) + case api.UsabilityFilterModeUnusable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) + args = append(args, autopilotID) + } + } + + // offset + limit + if opts.Limit == -1 { + opts.Limit = math.MaxInt64 + } + offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", opts.Limit, opts.Offset) + + // fetch stored data for each host + rows, err := tx.Query(ctx, "SELECT host_key, SUM(size) FROM contracts WHERE archival_reason IS NULL GROUP BY host_key") + if err != nil { + return nil, fmt.Errorf("failed to fetch stored data: %w", err) + } + defer rows.Close() + + storedDataMap := make(map[types.PublicKey]uint64) + for rows.Next() { + var hostKey PublicKey + var storedData uint64 + if err := rows.Scan(&hostKey, &storedData); err != nil { + return nil, fmt.Errorf("failed to scan stored data: %w", err) + } + storedDataMap[types.PublicKey(hostKey)] = storedData + } + + // query hosts + var blockedExprs []string + if hasAllowlist { + blockedExprs = append(blockedExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + blockedExprs = append(blockedExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + + var orderByExpr string + var blockedExpr string + if len(blockedExprs) > 0 { + blockedExpr = strings.Join(blockedExprs, " OR ") + } else { + blockedExpr = "FALSE" + } + var whereExpr string + if len(whereExprs) > 0 { + whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") + } + + rows, err = tx.Query(ctx, fmt.Sprintf(` + SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, + h.settings, h.total_scans, h.last_scan, h.last_scan_success, h.second_to_last_scan_success, + h.uptime, h.downtime, h.successful_interactions, h.failed_interactions, COALESCE(h.lost_sectors, 0), + h.scanned, h.resolved_addresses, %s + FROM hosts h + %s + %s + %s + `, blockedExpr, whereExpr, orderByExpr, offsetLimitStr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch hosts: %w", err) + } + defer rows.Close() + + var hosts []api.Host + for rows.Next() { + var h api.Host + var hostID int64 + var pte dsql.NullTime + var resolvedAddresses string + err := rows.Scan(&hostID, &h.KnownSince, &h.LastAnnouncement, (*PublicKey)(&h.PublicKey), + &h.NetAddress, (*PriceTable)(&h.PriceTable.HostPriceTable), &pte, + (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeMS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, + &h.Interactions.SecondToLastScanSuccess, (*DurationMS)(&h.Interactions.Uptime), (*DurationMS)(&h.Interactions.Downtime), + &h.Interactions.SuccessfulInteractions, &h.Interactions.FailedInteractions, &h.Interactions.LostSectors, + &h.Scanned, &resolvedAddresses, &h.Blocked, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan host: %w", err) + } + + if resolvedAddresses != "" { + h.ResolvedAddresses = strings.Split(resolvedAddresses, ",") + h.Subnets, err = utils.AddressesToSubnets(h.ResolvedAddresses) + if err != nil { + return nil, fmt.Errorf("failed to convert addresses to subnets: %w", err) + } + } + h.PriceTable.Expiry = pte.Time + h.StoredData = storedDataMap[h.PublicKey] + hosts = append(hosts, h) + } + + // query host checks + var apExpr string + if opts.AutopilotID != "" { + apExpr = "WHERE ap.identifier = ?" + args = append(args, opts.AutopilotID) + } + rows, err = tx.Query(ctx, fmt.Sprintf(` + SELECT h.public_key, ap.identifier, hc.usability_blocked, hc.usability_offline, hc.usability_low_score, hc.usability_redundant_ip, + hc.usability_gouging, usability_not_accepting_contracts, hc.usability_not_announced, hc.usability_not_completing_scan, + hc.score_age, hc.score_collateral, hc.score_interactions, hc.score_storage_remaining, hc.score_uptime, + hc.score_version, hc.score_prices, hc.gouging_contract_err, hc.gouging_download_err, hc.gouging_gouging_err, + hc.gouging_prune_err, hc.gouging_upload_err + FROM ( + SELECT h.id, h.public_key + FROM hosts h + %s + %s + ) AS h + INNER JOIN host_checks hc ON hc.db_host_id = h.id + INNER JOIN autopilots ap ON hc.db_autopilot_id = ap.id + %s + `, whereExpr, offsetLimitStr, apExpr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch host checks: %w", err) + } + defer rows.Close() + + hostChecks := make(map[types.PublicKey]map[string]api.HostCheck) + for rows.Next() { + var ap string + var pk PublicKey + var hc api.HostCheck + err := rows.Scan(&pk, &ap, &hc.UsabilityBreakdown.Blocked, &hc.UsabilityBreakdown.Offline, &hc.UsabilityBreakdown.LowScore, &hc.UsabilityBreakdown.RedundantIP, + &hc.UsabilityBreakdown.Gouging, &hc.UsabilityBreakdown.NotAcceptingContracts, &hc.UsabilityBreakdown.NotAnnounced, &hc.UsabilityBreakdown.NotCompletingScan, + &hc.ScoreBreakdown.Age, &hc.ScoreBreakdown.Collateral, &hc.ScoreBreakdown.Interactions, &hc.ScoreBreakdown.StorageRemaining, &hc.ScoreBreakdown.Uptime, + &hc.ScoreBreakdown.Version, &hc.ScoreBreakdown.Prices, &hc.GougingBreakdown.ContractErr, &hc.GougingBreakdown.DownloadErr, &hc.GougingBreakdown.GougingErr, + &hc.GougingBreakdown.PruneErr, &hc.GougingBreakdown.UploadErr) + if err != nil { + return nil, fmt.Errorf("failed to scan host: %w", err) + } + if _, ok := hostChecks[types.PublicKey(pk)]; !ok { + hostChecks[types.PublicKey(pk)] = make(map[string]api.HostCheck) + } + hostChecks[types.PublicKey(pk)][ap] = hc + } + + // fill in hosts + for i := range hosts { + hosts[i].Checks = hostChecks[hosts[i].PublicKey] + } + return hosts, nil +} + func HostsForScanning(ctx context.Context, tx sql.Tx, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { if offset < 0 { return nil, ErrNegativeOffset @@ -732,43 +967,6 @@ func InsertBufferedSlab(ctx context.Context, tx sql.Tx, fileName string, contrac return bufferedSlabID, nil } -func InsertContract(ctx context.Context, tx sql.Tx, c api.ContractMetadata) error { - // validate metadata - var state ContractState - if err := state.LoadString(c.State); err != nil { - return err - } else if c.ID == (types.FileContractID{}) { - return errors.New("contract id is required") - } else if c.HostKey == (types.PublicKey{}) { - return errors.New("host key is required") - } - - var hostID int64 - err := tx.QueryRow(ctx, `SELECT id FROM hosts WHERE public_key = ?`, PublicKey(c.HostKey)).Scan(&hostID) - if errors.Is(err, dsql.ErrNoRows) { - return api.ErrHostNotFound - } else if err != nil { - return err - } - - // insert contract - _, err = tx.Exec(ctx, ` -INSERT INTO contracts ( -created_at, fcid, host_id, host_key, -archival_reason, proof_height, renewed_from, renewed_to, revision_height, revision_number, size, start_height, state, window_start, window_end, -contract_price, initial_renter_funds, -delete_spending, fund_account_spending, sector_roots_spending, upload_spending -) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - time.Now(), FileContractID(c.ID), hostID, PublicKey(c.HostKey), - NullableString(c.ArchivalReason), c.ProofHeight, FileContractID(c.RenewedFrom), FileContractID(c.RenewedTo), c.RevisionHeight, c.RevisionNumber, c.Size, c.StartHeight, state, c.WindowStart, c.WindowEnd, - Currency(c.ContractPrice), Currency(c.InitialRenterFunds), - Currency(c.Spending.Deletions), Currency(c.Spending.FundAccount), Currency(c.Spending.SectorRoots), Currency(c.Spending.Uploads)) - if err != nil { - return fmt.Errorf("failed to insert contract: %w", err) - } - return nil -} - func InsertMetadata(ctx context.Context, tx sql.Tx, objID, muID *int64, md api.ObjectUserMetadata) error { if len(md) == 0 { return nil @@ -970,7 +1168,7 @@ func whereObjectMarker(marker, sortBy, sortDir string, queryMarker func(dst any, return nil, nil, fmt.Errorf("sortBy and sortDir must be set") } - desc := strings.ToLower(sortDir) == api.ObjectSortDirDesc + desc := strings.ToLower(sortDir) == api.SortDirDesc switch strings.ToLower(sortBy) { case api.ObjectSortByName: if desc { @@ -1013,8 +1211,8 @@ func orderByObject(sortBy, sortDir string) (orderByExprs []string, _ error) { } dir2SQL := map[string]string{ - api.ObjectSortDirAsc: "ASC", - api.ObjectSortDirDesc: "DESC", + api.SortDirAsc: "ASC", + api.SortDirDesc: "DESC", } if _, ok := dir2SQL[strings.ToLower(sortDir)]; !ok { return nil, fmt.Errorf("invalid sortDir: %v", sortDir) @@ -1037,117 +1235,31 @@ func orderByObject(sortBy, sortDir string) (orderByExprs []string, _ error) { return orderByExprs, nil } -func ListObjects(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - // fetch one more to see if there are more entries - if limit <= -1 { - limit = math.MaxInt - } else if limit != math.MaxInt { - limit++ +func ListObjects(ctx context.Context, tx Tx, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { + switch delim { + case "": + resp, err = listObjectsNoDelim(ctx, tx, bucket, prefix, substring, sortBy, sortDir, marker, limit) + case "/": + resp, err = listObjectsSlashDelim(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) + default: + err = fmt.Errorf("unsupported delimiter: '%s'", delim) } + return +} - // establish sane defaults for sorting - if sortBy == "" { - sortBy = api.ObjectSortByName - } - if sortDir == "" { - sortDir = api.ObjectSortDirAsc +func MultipartUpload(ctx context.Context, tx sql.Tx, uploadID string) (api.MultipartUpload, error) { + resp, err := scanMultipartUpload(tx.QueryRow(ctx, "SELECT b.name, mu.key, mu.object_id, mu.upload_id, mu.created_at FROM multipart_uploads mu INNER JOIN buckets b ON b.id = mu.db_bucket_id WHERE mu.upload_id = ?", uploadID)) + if err != nil { + return api.MultipartUpload{}, fmt.Errorf("failed to fetch multipart upload: %w", err) } + return resp, nil +} - // filter by bucket - whereExprs := []string{"o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)"} - whereArgs := []any{bucket} - - // apply prefix - if prefix != "" { - whereExprs = append(whereExprs, "o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?") - whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) - } - - // apply sorting - orderByExprs, err := orderByObject(sortBy, sortDir) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) - } - - // apply marker - markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { - err := tx.QueryRow(ctx, fmt.Sprintf(` - SELECT o.%s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE b.name = ? AND o.object_id = ? - `, col), bucket, marker).Scan(dst) - if errors.Is(err, dsql.ErrNoRows) { - return api.ErrMarkerNotFound - } else { - return err - } - }) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to get marker exprs: %w", err) - } - whereExprs = append(whereExprs, markerExprs...) - whereArgs = append(whereArgs, markerArgs...) - - // apply limit - whereArgs = append(whereArgs, limit) - - // run query - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM objects o - WHERE %s - ORDER BY %s - LIMIT ? - `, - tx.SelectObjectMetadataExpr(), - strings.Join(whereExprs, " AND "), - strings.Join(orderByExprs, ", ")), - whereArgs...) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - - var hasMore bool - var nextMarker string - if len(objects) == limit { - objects = objects[:len(objects)-1] - if len(objects) > 0 { - hasMore = true - nextMarker = objects[len(objects)-1].Name - } - } - - return api.ObjectsListResponse{ - HasMore: hasMore, - NextMarker: nextMarker, - Objects: objects, - }, nil -} - -func MultipartUpload(ctx context.Context, tx sql.Tx, uploadID string) (api.MultipartUpload, error) { - resp, err := scanMultipartUpload(tx.QueryRow(ctx, "SELECT b.name, mu.key, mu.object_id, mu.upload_id, mu.created_at FROM multipart_uploads mu INNER JOIN buckets b ON b.id = mu.db_bucket_id WHERE mu.upload_id = ?", uploadID)) - if err != nil { - return api.MultipartUpload{}, fmt.Errorf("failed to fetch multipart upload: %w", err) - } - return resp, nil -} - -func MultipartUploadParts(ctx context.Context, tx sql.Tx, bucket, key, uploadID string, marker int, limit int64) (api.MultipartListPartsResponse, error) { - limitExpr := "" - limitUsed := limit > 0 - if limitUsed { - limitExpr = fmt.Sprintf("LIMIT %d", limit+1) +func MultipartUploadParts(ctx context.Context, tx sql.Tx, bucket, key, uploadID string, marker int, limit int64) (api.MultipartListPartsResponse, error) { + limitExpr := "" + limitUsed := limit > 0 + if limitUsed { + limitExpr = fmt.Sprintf("LIMIT %d", limit+1) } rows, err := tx.Query(ctx, fmt.Sprintf(` @@ -1244,7 +1356,7 @@ func MultipartUploads(ctx context.Context, tx sql.Tx, bucket, prefix, keyMarker, if limitUsed && len(uploads) > int(limit) { hasMore = true uploads = uploads[:len(uploads)-1] - nextPathMarker = uploads[len(uploads)-1].Path + nextPathMarker = uploads[len(uploads)-1].Key nextUploadIDMarker = uploads[len(uploads)-1].UploadID } @@ -1377,167 +1489,6 @@ func dirID(ctx context.Context, tx sql.Tx, dirPath string) (int64, error) { return id, nil } -func ObjectEntries(ctx context.Context, tx Tx, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - // sanity check we are passing a directory - if !strings.HasSuffix(path, "/") { - panic("path must end in /") - } - - // sanity check we are passing sane paging parameters - usingMarker := marker != "" - usingOffset := offset > 0 - if usingMarker && usingOffset { - return nil, false, errors.New("fetching entries using a marker and an offset is not supported at the same time") - } - - // fetch one more to see if there are more entries - if limit <= -1 { - limit = math.MaxInt - } else if limit != math.MaxInt { - limit++ - } - - // establish sane defaults for sorting - if sortBy == "" { - sortBy = api.ObjectSortByName - } - if sortDir == "" { - sortDir = api.ObjectSortDirAsc - } - - // fetch directory id - dirID, err := dirID(ctx, tx, path) - if errors.Is(err, dsql.ErrNoRows) { - return []api.ObjectMetadata{}, false, nil - } else if err != nil { - return nil, false, fmt.Errorf("failed to fetch directory id: %w", err) - } - - args := []any{ - path, - dirID, bucket, - } - - // apply prefix - var prefixExpr string - if prefix != "" { - prefixExpr = "AND SUBSTR(o.object_id, 1, ?) = ?" - args = append(args, - utf8.RuneCountInString(path+prefix), path+prefix, - utf8.RuneCountInString(path+prefix), path+prefix, - ) - } - - args = append(args, - bucket, - path+"%", - utf8.RuneCountInString(path), path, - dirID, - ) - - // apply marker - var whereExpr string - markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { - var groupFn string - switch col { - case "size": - groupFn = "SUM" - case "health": - groupFn = "MIN" - default: - return fmt.Errorf("unknown column: %v", col) - } - err := tx.QueryRow(ctx, fmt.Sprintf(` - SELECT o.%s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE b.name = ? AND o.object_id = ? - UNION ALL - SELECT %s(o.%s) - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name - WHERE b.name = ? AND d.name = ? - GROUP BY d.id - `, col, groupFn, col, tx.CharLengthExpr()), bucket, marker, bucket, marker).Scan(dst) - if errors.Is(err, dsql.ErrNoRows) { - return api.ErrMarkerNotFound - } else { - return err - } - }) - if err != nil { - return nil, false, fmt.Errorf("failed to query marker: %w", err) - } else if len(markerExprs) > 0 { - whereExpr = "WHERE " + strings.Join(markerExprs, " AND ") - } - args = append(args, markerArgs...) - - // apply sorting - orderByExprs, err := orderByObject(sortBy, sortDir) - if err != nil { - return nil, false, fmt.Errorf("failed to apply sorting: %w", err) - } - - // apply offset and limit - args = append(args, limit, offset) - - // objectsQuery consists of 2 parts - // 1. fetch all objects in requested directory - // 2. fetch all sub-directories - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM ( - SELECT o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag - FROM objects o - LEFT JOIN directories d ON d.name = o.object_id - WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) %s - AND d.id IS NULL - UNION ALL - SELECT d.name as object_id, SUM(o.size), MIN(o.health), '' as mime_type, MAX(o.created_at) as created_at, '' as etag - FROM objects o - INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name %s - WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) - AND o.object_id LIKE ? - AND SUBSTR(o.object_id, 1, ?) = ? - AND d.db_parent_id = ? - GROUP BY d.id - ) AS o - %s - ORDER BY %s - LIMIT ? OFFSET ? - `, - tx.SelectObjectMetadataExpr(), - prefixExpr, - tx.CharLengthExpr(), - prefixExpr, - whereExpr, - strings.Join(orderByExprs, ", "), - ), args...) - if err != nil { - return nil, false, fmt.Errorf("failed to fetch objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return nil, false, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - - // trim last element if we have more - var hasMore bool - if len(objects) == limit { - hasMore = true - objects = objects[:len(objects)-1] - } - - return objects, hasMore, nil -} - func ObjectMetadata(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) { // fetch object id var objID int64 @@ -1963,273 +1914,30 @@ func ResetLostSectors(ctx context.Context, tx sql.Tx, hk types.PublicKey) error return nil } -func SearchHosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - if offset < 0 { - return nil, ErrNegativeOffset +func Setting(ctx context.Context, tx sql.Tx, key string) (string, error) { + var value string + err := tx.QueryRow(ctx, "SELECT value FROM settings WHERE `key` = ?", key).Scan((*BusSetting)(&value)) + if errors.Is(err, dsql.ErrNoRows) { + return "", ErrSettingNotFound + } else if err != nil { + return "", fmt.Errorf("failed to fetch setting '%s': %w", key, err) } + return value, nil +} - var hasAllowlist, hasBlocklist bool - if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_allowlist_entries)").Scan(&hasAllowlist); err != nil { - return nil, fmt.Errorf("failed to check for allowlist: %w", err) - } else if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_blocklist_entries)").Scan(&hasBlocklist); err != nil { - return nil, fmt.Errorf("failed to check for blocklist: %w", err) - } - - // validate filterMode - switch filterMode { - case api.HostFilterModeAllowed: - case api.HostFilterModeBlocked: - case api.HostFilterModeAll: - default: - return nil, fmt.Errorf("invalid filter mode: %v", filterMode) - } - - var whereExprs []string - var args []any - - // fetch autopilot id - var autopilotID int64 - if autopilot != "" { - if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", autopilot). - Scan(&autopilotID); errors.Is(err, dsql.ErrNoRows) { - return nil, api.ErrAutopilotNotFound - } else if err != nil { - return nil, fmt.Errorf("failed to fetch autopilot id: %w", err) - } - } - - // filter allowlist/blocklist - switch filterMode { - case api.HostFilterModeAllowed: - if hasAllowlist { - whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if hasBlocklist { - whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - case api.HostFilterModeBlocked: - if hasAllowlist { - whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if hasBlocklist { - whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if !hasAllowlist && !hasBlocklist { - // if neither an allowlist nor a blocklist exist, all hosts are - // allowed which means we return none - return []api.Host{}, nil - } - } - - // filter address - if addressContains != "" { - whereExprs = append(whereExprs, "h.net_address LIKE ?") - args = append(args, "%"+addressContains+"%") - } - - // filter public key - if len(keyIn) > 0 { - pubKeys := make([]any, len(keyIn)) - for i, pk := range keyIn { - pubKeys[i] = PublicKey(pk) - } - placeholders := strings.Repeat("?, ", len(keyIn)-1) + "?" - whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) - args = append(args, pubKeys...) - } - - // filter usability - whereApExpr := "" - if autopilot != "" { - whereApExpr = "AND hc.db_autopilot_id = ?" - } - switch usabilityMode { - case api.UsabilityFilterModeUsable: - whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) - args = append(args, autopilotID) - case api.UsabilityFilterModeUnusable: - whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) - args = append(args, autopilotID) - } - - // offset + limit - if limit == -1 { - limit = math.MaxInt64 - } - offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) - - // fetch stored data for each host - rows, err := tx.Query(ctx, "SELECT host_key, SUM(size) FROM contracts WHERE archival_reason IS NULL GROUP BY host_key") - if err != nil { - return nil, fmt.Errorf("failed to fetch stored data: %w", err) - } - defer rows.Close() - - storedDataMap := make(map[types.PublicKey]uint64) - for rows.Next() { - var hostKey PublicKey - var storedData uint64 - if err := rows.Scan(&hostKey, &storedData); err != nil { - return nil, fmt.Errorf("failed to scan stored data: %w", err) - } - storedDataMap[types.PublicKey(hostKey)] = storedData - } - - // query hosts - var blockedExprs []string - if hasAllowlist { - blockedExprs = append(blockedExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - if hasBlocklist { - blockedExprs = append(blockedExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") - } - var blockedExpr string - if len(blockedExprs) > 0 { - blockedExpr = strings.Join(blockedExprs, " OR ") - } else { - blockedExpr = "FALSE" - } - var whereExpr string - if len(whereExprs) > 0 { - whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") - } - rows, err = tx.Query(ctx, fmt.Sprintf(` - SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, - h.settings, h.total_scans, h.last_scan, h.last_scan_success, h.second_to_last_scan_success, - h.uptime, h.downtime, h.successful_interactions, h.failed_interactions, COALESCE(h.lost_sectors, 0), - h.scanned, h.resolved_addresses, %s - FROM hosts h - %s - %s - `, blockedExpr, whereExpr, offsetLimitStr), args...) - if err != nil { - return nil, fmt.Errorf("failed to fetch hosts: %w", err) - } - defer rows.Close() - - var hosts []api.Host - for rows.Next() { - var h api.Host - var hostID int64 - var pte dsql.NullTime - var resolvedAddresses string - err := rows.Scan(&hostID, &h.KnownSince, &h.LastAnnouncement, (*PublicKey)(&h.PublicKey), - &h.NetAddress, (*PriceTable)(&h.PriceTable.HostPriceTable), &pte, - (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeMS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, - &h.Interactions.SecondToLastScanSuccess, (*DurationMS)(&h.Interactions.Uptime), (*DurationMS)(&h.Interactions.Downtime), - &h.Interactions.SuccessfulInteractions, &h.Interactions.FailedInteractions, &h.Interactions.LostSectors, - &h.Scanned, &resolvedAddresses, &h.Blocked, - ) - if err != nil { - return nil, fmt.Errorf("failed to scan host: %w", err) - } - - if resolvedAddresses != "" { - h.ResolvedAddresses = strings.Split(resolvedAddresses, ",") - h.Subnets, err = utils.AddressesToSubnets(h.ResolvedAddresses) - if err != nil { - return nil, fmt.Errorf("failed to convert addresses to subnets: %w", err) - } - } - h.PriceTable.Expiry = pte.Time - h.StoredData = storedDataMap[h.PublicKey] - hosts = append(hosts, h) - } - - // query host checks - var apExpr string - if autopilot != "" { - apExpr = "WHERE ap.identifier = ?" - args = append(args, autopilot) - } - rows, err = tx.Query(ctx, fmt.Sprintf(` - SELECT h.public_key, ap.identifier, hc.usability_blocked, hc.usability_offline, hc.usability_low_score, hc.usability_redundant_ip, - hc.usability_gouging, usability_not_accepting_contracts, hc.usability_not_announced, hc.usability_not_completing_scan, - hc.score_age, hc.score_collateral, hc.score_interactions, hc.score_storage_remaining, hc.score_uptime, - hc.score_version, hc.score_prices, hc.gouging_contract_err, hc.gouging_download_err, hc.gouging_gouging_err, - hc.gouging_prune_err, hc.gouging_upload_err - FROM ( - SELECT h.id, h.public_key - FROM hosts h - %s - %s - ) AS h - INNER JOIN host_checks hc ON hc.db_host_id = h.id - INNER JOIN autopilots ap ON hc.db_autopilot_id = ap.id - %s - `, whereExpr, offsetLimitStr, apExpr), args...) - if err != nil { - return nil, fmt.Errorf("failed to fetch host checks: %w", err) - } - defer rows.Close() - - hostChecks := make(map[types.PublicKey]map[string]api.HostCheck) - for rows.Next() { - var ap string - var pk PublicKey - var hc api.HostCheck - err := rows.Scan(&pk, &ap, &hc.Usability.Blocked, &hc.Usability.Offline, &hc.Usability.LowScore, &hc.Usability.RedundantIP, - &hc.Usability.Gouging, &hc.Usability.NotAcceptingContracts, &hc.Usability.NotAnnounced, &hc.Usability.NotCompletingScan, - &hc.Score.Age, &hc.Score.Collateral, &hc.Score.Interactions, &hc.Score.StorageRemaining, &hc.Score.Uptime, - &hc.Score.Version, &hc.Score.Prices, &hc.Gouging.ContractErr, &hc.Gouging.DownloadErr, &hc.Gouging.GougingErr, - &hc.Gouging.PruneErr, &hc.Gouging.UploadErr) - if err != nil { - return nil, fmt.Errorf("failed to scan host: %w", err) - } - if _, ok := hostChecks[types.PublicKey(pk)]; !ok { - hostChecks[types.PublicKey(pk)] = make(map[string]api.HostCheck) - } - hostChecks[types.PublicKey(pk)][ap] = hc - } - - // fill in hosts - for i := range hosts { - hosts[i].Checks = hostChecks[hosts[i].PublicKey] - } - return hosts, nil -} - -func Setting(ctx context.Context, tx sql.Tx, key string) (string, error) { - var value string - err := tx.QueryRow(ctx, "SELECT value FROM settings WHERE `key` = ?", key).Scan((*BusSetting)(&value)) - if errors.Is(err, dsql.ErrNoRows) { - return "", api.ErrSettingNotFound - } else if err != nil { - return "", fmt.Errorf("failed to fetch setting '%s': %w", key, err) - } - return value, nil -} - -func Settings(ctx context.Context, tx sql.Tx) ([]string, error) { - rows, err := tx.Query(ctx, "SELECT `key` FROM settings") - if err != nil { - return nil, fmt.Errorf("failed to query settings: %w", err) - } - defer rows.Close() - var settings []string - for rows.Next() { - var setting string - if err := rows.Scan(&setting); err != nil { - return nil, fmt.Errorf("failed to scan setting key") - } - settings = append(settings, setting) - } - return settings, nil -} - -func Slab(ctx context.Context, tx sql.Tx, key object.EncryptionKey) (object.Slab, error) { - // fetch slab - var slabID int64 - slab := object.Slab{Key: key} - err := tx.QueryRow(ctx, ` - SELECT id, health, min_shards - FROM slabs sla - WHERE sla.key = ? - `, EncryptionKey(key)).Scan(&slabID, &slab.Health, &slab.MinShards) - if errors.Is(err, dsql.ErrNoRows) { - return object.Slab{}, api.ErrSlabNotFound - } else if err != nil { - return object.Slab{}, fmt.Errorf("failed to fetch slab: %w", err) +func Slab(ctx context.Context, tx sql.Tx, key object.EncryptionKey) (object.Slab, error) { + // fetch slab + var slabID int64 + slab := object.Slab{EncryptionKey: key} + err := tx.QueryRow(ctx, ` + SELECT id, health, min_shards + FROM slabs sla + WHERE sla.key = ? + `, EncryptionKey(key)).Scan(&slabID, &slab.Health, &slab.MinShards) + if errors.Is(err, dsql.ErrNoRows) { + return object.Slab{}, api.ErrSlabNotFound + } else if err != nil { + return object.Slab{}, fmt.Errorf("failed to fetch slab: %w", err) } // fetch sectors @@ -2352,7 +2060,7 @@ func UnhealthySlabs(ctx context.Context, tx sql.Tx, healthCutoff float64, set st var slabs []api.UnhealthySlab for rows.Next() { var slab api.UnhealthySlab - if err := rows.Scan((*EncryptionKey)(&slab.Key), &slab.Health); err != nil { + if err := rows.Scan((*EncryptionKey)(&slab.EncryptionKey), &slab.Health); err != nil { return nil, fmt.Errorf("failed to scan unhealthy slab: %w", err) } slabs = append(slabs, slab) @@ -2528,7 +2236,7 @@ func scanBucket(s Scanner) (api.Bucket, error) { } func scanMultipartUpload(s Scanner) (resp api.MultipartUpload, _ error) { - err := s.Scan(&resp.Bucket, (*EncryptionKey)(&resp.Key), &resp.Path, &resp.UploadID, &resp.CreatedAt) + err := s.Scan(&resp.Bucket, (*EncryptionKey)(&resp.EncryptionKey), &resp.Key, &resp.UploadID, &resp.CreatedAt) if errors.Is(err, dsql.ErrNoRows) { return api.MultipartUpload{}, api.ErrMultipartUploadNotFound } else if err != nil { @@ -2613,35 +2321,6 @@ func scanStateElement(s Scanner) (types.StateElement, error) { }, nil } -func SearchObjects(ctx context.Context, tx Tx, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { - if limit <= -1 { - limit = math.MaxInt - } - - rows, err := tx.Query(ctx, fmt.Sprintf(` - SELECT %s - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - WHERE INSTR(o.object_id, ?) > 0 AND b.name = ? - ORDER BY o.object_id ASC - LIMIT ? OFFSET ? - `, tx.SelectObjectMetadataExpr()), substring, bucket, limit, offset) - if err != nil { - return nil, fmt.Errorf("failed to search objects: %w", err) - } - defer rows.Close() - - var objects []api.ObjectMetadata - for rows.Next() { - om, err := tx.ScanObjectMetadata(rows) - if err != nil { - return nil, fmt.Errorf("failed to scan object metadata: %w", err) - } - objects = append(objects, om) - } - return objects, nil -} - func ObjectsBySlabKey(ctx context.Context, tx Tx, bucket string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) { rows, err := tx.Query(ctx, fmt.Sprintf(` SELECT %s @@ -2846,7 +2525,7 @@ func Object(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) var hk types.PublicKey if err := rows.Scan(&bufferedSlab, // whether the slab is buffered &objectIndex, &ss.Offset, &ss.Length, // slice info - &ss.Health, (*EncryptionKey)(&ss.Key), &ss.MinShards, // slab info + &ss.Health, (*EncryptionKey)(&ss.EncryptionKey), &ss.MinShards, // slab info &slabIndex, (*Hash256)(§or.Root), (*PublicKey)(§or.LatestHost), // sector info (*PublicKey)(&fcid), // contract info (*PublicKey)(&hk), // host info @@ -2905,3 +2584,274 @@ func Object(ctx context.Context, tx Tx, bucket, key string) (api.Object, error) }, }, nil } + +func listObjectsNoDelim(ctx context.Context, tx Tx, bucket, prefix, substring, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + // fetch one more to see if there are more entries + if limit <= -1 { + limit = math.MaxInt + } else if limit != math.MaxInt { + limit++ + } + + // establish sane defaults for sorting + if sortBy == "" { + sortBy = api.ObjectSortByName + } + if sortDir == "" { + sortDir = api.SortDirAsc + } + + // filter by bucket + whereExprs := []string{"o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)"} + whereArgs := []any{bucket} + + // apply prefix + if prefix != "" { + whereExprs = append(whereExprs, "o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?") + whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) + } + + // apply substring + if substring != "" { + whereExprs = append(whereExprs, "INSTR(o.object_id, ?) > 0") + whereArgs = append(whereArgs, substring) + } + + // apply sorting + orderByExprs, err := orderByObject(sortBy, sortDir) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) + } + + // apply marker + markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { + err := tx.QueryRow(ctx, fmt.Sprintf(` + SELECT o.%s + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + WHERE b.name = ? AND o.object_id = ? + `, col), bucket, marker).Scan(dst) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrMarkerNotFound + } else { + return err + } + }) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to get marker exprs: %w", err) + } + whereExprs = append(whereExprs, markerExprs...) + whereArgs = append(whereArgs, markerArgs...) + + // apply limit + whereArgs = append(whereArgs, limit) + + // run query + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT %s + FROM objects o + WHERE %s + ORDER BY %s + LIMIT ? + `, + tx.SelectObjectMetadataExpr(), + strings.Join(whereExprs, " AND "), + strings.Join(orderByExprs, ", ")), + whereArgs...) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) + } + defer rows.Close() + + var objects []api.ObjectMetadata + for rows.Next() { + om, err := tx.ScanObjectMetadata(rows) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) + } + objects = append(objects, om) + } + + var hasMore bool + var nextMarker string + if len(objects) == limit { + objects = objects[:len(objects)-1] + if len(objects) > 0 { + hasMore = true + nextMarker = objects[len(objects)-1].Key + } + } + + return api.ObjectsListResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Objects: objects, + }, nil +} + +func listObjectsSlashDelim(ctx context.Context, tx Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + // split prefix into path and object prefix + path := "/" // root of bucket + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + path = prefix[:idx+1] + prefix = prefix[idx+1:] + } + if !strings.HasSuffix(path, "/") { + panic("path must end with /") + } + + // fetch one more to see if there are more entries + if limit <= -1 { + limit = math.MaxInt + } else if limit != math.MaxInt { + limit++ + } + + // establish sane defaults for sorting + if sortBy == "" { + sortBy = api.ObjectSortByName + } + if sortDir == "" { + sortDir = api.SortDirAsc + } + + // fetch directory id + dirID, err := dirID(ctx, tx, path) + if errors.Is(err, dsql.ErrNoRows) { + return api.ObjectsListResponse{}, nil + } else if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch directory id: %w", err) + } + + args := []any{ + path, + dirID, bucket, + } + + // apply prefix + var prefixExpr string + if prefix != "" { + prefixExpr = "AND SUBSTR(o.object_id, 1, ?) = ?" + args = append(args, + utf8.RuneCountInString(path+prefix), path+prefix, + utf8.RuneCountInString(path+prefix), path+prefix, + ) + } + + args = append(args, + bucket, + path+"%", + utf8.RuneCountInString(path), path, + dirID, + ) + + // apply marker + var whereExpr string + markerExprs, markerArgs, err := whereObjectMarker(marker, sortBy, sortDir, func(dst any, marker, col string) error { + var groupFn string + switch col { + case "size": + groupFn = "SUM" + case "health": + groupFn = "MIN" + default: + return fmt.Errorf("unknown column: %v", col) + } + err := tx.QueryRow(ctx, fmt.Sprintf(` + SELECT o.%s + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + WHERE b.name = ? AND o.object_id = ? + UNION ALL + SELECT %s(o.%s) + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name + WHERE b.name = ? AND d.name = ? + GROUP BY d.id + `, col, groupFn, col, tx.CharLengthExpr()), bucket, marker, bucket, marker).Scan(dst) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrMarkerNotFound + } else { + return err + } + }) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to query marker: %w", err) + } else if len(markerExprs) > 0 { + whereExpr = "WHERE " + strings.Join(markerExprs, " AND ") + } + args = append(args, markerArgs...) + + // apply sorting + orderByExprs, err := orderByObject(sortBy, sortDir) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to apply sorting: %w", err) + } + + // apply offset and limit + args = append(args, limit) + + // objectsQuery consists of 2 parts + // 1. fetch all objects in requested directory + // 2. fetch all sub-directories + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT %s + FROM ( + SELECT o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag + FROM objects o + LEFT JOIN directories d ON d.name = o.object_id + WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) %s + AND d.id IS NULL + UNION ALL + SELECT d.name as object_id, SUM(o.size), MIN(o.health), '' as mime_type, MAX(o.created_at) as created_at, '' as etag + FROM objects o + INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name %s + WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) + AND o.object_id LIKE ? + AND SUBSTR(o.object_id, 1, ?) = ? + AND d.db_parent_id = ? + GROUP BY d.id + ) AS o + %s + ORDER BY %s + LIMIT ? + `, + tx.SelectObjectMetadataExpr(), + prefixExpr, + tx.CharLengthExpr(), + prefixExpr, + whereExpr, + strings.Join(orderByExprs, ", "), + ), args...) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) + } + defer rows.Close() + + var objects []api.ObjectMetadata + for rows.Next() { + om, err := tx.ScanObjectMetadata(rows) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) + } + objects = append(objects, om) + } + + // trim last element if we have more + var hasMore bool + var nextMarker string + if len(objects) == limit { + objects = objects[:len(objects)-1] + if len(objects) > 0 { + hasMore = true + nextMarker = objects[len(objects)-1].Key + } + } + + return api.ObjectsListResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Objects: objects, + }, nil +} diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 4728774f5..844913225 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -86,6 +86,11 @@ func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.Database }) } +func (b *MainDatabase) UpdateSetting(ctx context.Context, tx sql.Tx, key, value string) error { + mtx := b.wrapTxn(tx) + return mtx.UpdateSetting(ctx, key, value) +} + func (b *MainDatabase) Version(ctx context.Context) (string, string, error) { return version(ctx, b.db) } @@ -94,8 +99,8 @@ func (b *MainDatabase) wrapTxn(tx sql.Tx) *MainDatabaseTx { return &MainDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} } -func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { - return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) +func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error { + return ssql.AbortMultipartUpload(ctx, tx, bucket, key, uploadID) } func (tx *MainDatabaseTx) Accounts(ctx context.Context, owner string) ([]api.Account, error) { @@ -331,8 +336,8 @@ func (tx *MainDatabaseTx) DeleteHostSector(ctx context.Context, hk types.PublicK return ssql.DeleteHostSector(ctx, tx, hk, root) } -func (tx *MainDatabaseTx) DeleteSettings(ctx context.Context, key string) error { - return ssql.DeleteSettings(ctx, tx, key) +func (tx *MainDatabaseTx) DeleteSetting(ctx context.Context, key string) error { + return ssql.DeleteSetting(ctx, tx, key) } func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { @@ -394,6 +399,10 @@ func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { return ssql.HostBlocklist(ctx, tx) } +func (tx *MainDatabaseTx) Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) { + return ssql.Hosts(ctx, tx, opts) +} + func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) } @@ -402,10 +411,6 @@ func (tx *MainDatabaseTx) InsertBufferedSlab(ctx context.Context, fileName strin return ssql.InsertBufferedSlab(ctx, tx, fileName, contractSetID, ec, minShards, totalShards) } -func (tx *MainDatabaseTx) InsertContract(ctx context.Context, c api.ContractMetadata) error { - return ssql.InsertContract(ctx, tx, c) -} - func (tx *MainDatabaseTx) InsertMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) { return ssql.InsertMultipartUpload(ctx, tx, bucket, key, ec, mimeType, metadata) } @@ -473,8 +478,8 @@ func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) return ssql.ListBuckets(ctx, tx) } -func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ListObjects(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, substring, delim, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { @@ -533,12 +538,8 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - return ssql.ObjectEntries(ctx, tx, bucket, path, prefix, sortBy, sortDir, marker, offset, limit) -} - -func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { - return ssql.ObjectMetadata(ctx, tx, bucket, path) +func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) { + return ssql.ObjectMetadata(ctx, tx, bucket, key) } func (tx *MainDatabaseTx) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey object.EncryptionKey) (metadata []api.ObjectMetadata, err error) { @@ -854,7 +855,7 @@ func (tx MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Accoun } func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md api.ObjectMetadata, err error) { - dst := []any{&md.Name, &md.Size, &md.Health, &md.MimeType, &md.ModTime, &md.ETag} + dst := []any{&md.Key, &md.Size, &md.Health, &md.MimeType, &md.ModTime, &md.ETag} dst = append(dst, others...) if err := s.Scan(dst...); err != nil { return api.ObjectMetadata{}, fmt.Errorf("failed to scan object metadata: %w", err) @@ -862,14 +863,6 @@ func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md return md, nil } -func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) -} - -func (tx *MainDatabaseTx) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { - return ssql.SearchObjects(ctx, tx, bucket, substring, offset, limit) -} - func (tx *MainDatabaseTx) SelectObjectMetadataExpr() string { return "o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag" } @@ -878,10 +871,6 @@ func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, erro return ssql.Setting(ctx, tx, key) } -func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { - return ssql.Settings(ctx, tx) -} - func (tx *MainDatabaseTx) Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) { return ssql.Slab(ctx, tx, key) } @@ -1103,10 +1092,10 @@ func (tx *MainDatabaseTx) UpdateHostCheck(ctx context.Context, autopilot string, score_storage_remaining = VALUES(score_storage_remaining), score_uptime = VALUES(score_uptime), score_version = VALUES(score_version), score_prices = VALUES(score_prices), gouging_contract_err = VALUES(gouging_contract_err), gouging_download_err = VALUES(gouging_download_err), gouging_gouging_err = VALUES(gouging_gouging_err), gouging_prune_err = VALUES(gouging_prune_err), gouging_upload_err = VALUES(gouging_upload_err) - `, time.Now(), autopilot, ssql.PublicKey(hk), hc.Usability.Blocked, hc.Usability.Offline, hc.Usability.LowScore, - hc.Usability.RedundantIP, hc.Usability.Gouging, hc.Usability.NotAcceptingContracts, hc.Usability.NotAnnounced, hc.Usability.NotCompletingScan, - hc.Score.Age, hc.Score.Collateral, hc.Score.Interactions, hc.Score.StorageRemaining, hc.Score.Uptime, hc.Score.Version, hc.Score.Prices, - hc.Gouging.ContractErr, hc.Gouging.DownloadErr, hc.Gouging.GougingErr, hc.Gouging.PruneErr, hc.Gouging.UploadErr, + `, time.Now(), autopilot, ssql.PublicKey(hk), hc.UsabilityBreakdown.Blocked, hc.UsabilityBreakdown.Offline, hc.UsabilityBreakdown.LowScore, + hc.UsabilityBreakdown.RedundantIP, hc.UsabilityBreakdown.Gouging, hc.UsabilityBreakdown.NotAcceptingContracts, hc.UsabilityBreakdown.NotAnnounced, hc.UsabilityBreakdown.NotCompletingScan, + hc.ScoreBreakdown.Age, hc.ScoreBreakdown.Collateral, hc.ScoreBreakdown.Interactions, hc.ScoreBreakdown.StorageRemaining, hc.ScoreBreakdown.Uptime, hc.ScoreBreakdown.Version, hc.ScoreBreakdown.Prices, + hc.GougingBreakdown.ContractErr, hc.GougingBreakdown.DownloadErr, hc.GougingBreakdown.GougingErr, hc.GougingBreakdown.PruneErr, hc.GougingBreakdown.UploadErr, ) if err != nil { return fmt.Errorf("failed to insert host check: %w", err) @@ -1141,7 +1130,7 @@ func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contrac health_valid_until = ?, health = ? WHERE `+"`key`"+` = ? - `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.Key)) + `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.EncryptionKey)) if err != nil { return err } else if n, err := res.RowsAffected(); err != nil { @@ -1152,7 +1141,7 @@ func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contrac // fetch slab id and total shards var slabID, totalShards int64 - err = tx.QueryRow(ctx, "SELECT id, total_shards FROM slabs WHERE `key` = ?", ssql.EncryptionKey(s.Key)). + err = tx.QueryRow(ctx, "SELECT id, total_shards FROM slabs WHERE `key` = ?", ssql.EncryptionKey(s.EncryptionKey)). Scan(&slabID, &totalShards) if err != nil { return err @@ -1321,7 +1310,7 @@ func (tx *MainDatabaseTx) insertSlabs(ctx context.Context, objID, partID *int64, res, err := insertSlabStmt.Exec(ctx, time.Now(), contractSetID, - ssql.EncryptionKey(slices[i].Key), + ssql.EncryptionKey(slices[i].EncryptionKey), slices[i].MinShards, uint8(len(slices[i].Shards)), ) diff --git a/stores/sql/mysql/migrations/main/migration_00018_gouging_units.sql b/stores/sql/mysql/migrations/main/migration_00018_gouging_units.sql new file mode 100644 index 000000000..77982c509 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00018_gouging_units.sql @@ -0,0 +1,18 @@ +UPDATE settings +SET value = ( + -- Update settings to new values + SELECT JSON_REPLACE(value, '$.maxDownloadPrice', newMaxDownloadPrice, '$.maxUploadPrice', newMaxUploadPrice) + FROM ( + -- Convert TB to bytes by trimming the last 12 digits + SELECT + SUBSTR(maxDownloadPrice, 1, LENGTH(maxDownloadPrice)-12) AS newMaxDownloadPrice, + SUBSTR(maxUploadPrice, 1, LENGTH(maxUploadPrice)-12) AS newMaxUploadPrice + FROM ( + -- SELECT previous settings + SELECT + JSON_UNQUOTE(JSON_EXTRACT(value, '$.maxDownloadPrice')) AS maxDownloadPrice, + JSON_UNQUOTE(JSON_EXTRACT(value, '$.maxUploadPrice')) AS maxUploadPrice + ) AS _ + ) AS _ +) +WHERE settings.key = "gouging"; diff --git a/stores/sql/mysql/migrations/main/migration_00019_settings.sql b/stores/sql/mysql/migrations/main/migration_00019_settings.sql new file mode 100644 index 000000000..27f512d7f --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00019_settings.sql @@ -0,0 +1,83 @@ +-- avoid duplicate key errors +DELETE FROM settings WHERE `key` IN ("s3", "upload", "pinned"); + +-- migrate settings +INSERT INTO settings (created_at, `key`, value) +SELECT NOW(), k, v +FROM ( + -- upload is a combination of uploadpacking, redundancy, and contractset + SELECT + "upload" as k, + json_merge_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "uploadpacking")), + json_merge_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE `key` = "contractset")) + ) + ) as v + WHERE json_extract( + json_merge_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "uploadpacking")), + json_merge_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE `key` = "contractset")) + ) + ), "$.packing" + ) IS NOT NULL + AND json_extract( + json_merge_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "uploadpacking")), + json_merge_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE `key` = "contractset")) + ) + ), "$.redundancy" + ) IS NOT NULL + + UNION ALL + + -- s3 wraps the s3authentication setting + SELECT + "s3" as k, + json_object("authentication", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "s3authentication")) as v + WHERE json_extract( + json_object("authentication", (SELECT json_extract(value, "$") FROM settings WHERE `key` = "s3authentication")), + "$.authentication" + ) IS NOT NULL + + UNION ALL + + -- pinning renames pricepinning and removes the 'enabled' and 'forexEndpointURL' fields + SELECT + "pinned" as k, + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE `key` = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ) as v + WHERE json_extract( + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE `key` = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ), + "$.currency" + ) IS NOT NULL + AND json_extract( + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE `key` = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ), + "$.threshold" + ) IS NOT NULL +) as migration; + +-- delete old settings +DELETE FROM settings WHERE `key` IN ("uploadpacking", "redundancy", "contractset", "s3authentication", "pricepinning"); diff --git a/stores/sql/mysql/migrations/main/migration_00020_idx_db_directory.sql b/stores/sql/mysql/migrations/main/migration_00020_idx_db_directory.sql new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00020_idx_db_directory.sql @@ -0,0 +1 @@ + diff --git a/stores/sql/mysql/migrations/main/migration_00018_archived_contracts.sql b/stores/sql/mysql/migrations/main/migration_00021_archived_contracts.sql similarity index 100% rename from stores/sql/mysql/migrations/main/migration_00018_archived_contracts.sql rename to stores/sql/mysql/migrations/main/migration_00021_archived_contracts.sql diff --git a/stores/sql/sqlite/chain.go b/stores/sql/sqlite/chain.go index d4d04c162..5a2084590 100644 --- a/stores/sql/sqlite/chain.go +++ b/stores/sql/sqlite/chain.go @@ -132,7 +132,7 @@ func (c chainUpdateTx) WalletRevertIndex(index types.ChainIndex, removed, unspen // delete removed outputs for _, e := range removed { c.l.Debugw(fmt.Sprintf("remove output %v", e.ID), "height", index.Height, "block_id", index.ID) - if res, err := deleteRemovedStmt.Exec(c.ctx, e.ID); err != nil { + if res, err := deleteRemovedStmt.Exec(c.ctx, ssql.Hash256(e.ID)); err != nil { return fmt.Errorf("failed to delete removed output: %w", err) } else if n, err := res.RowsAffected(); err != nil { return fmt.Errorf("failed to get rows affected: %w", err) diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 50aca9e13..f9508a6a1 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -85,6 +85,11 @@ func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.Database }) } +func (b *MainDatabase) UpdateSetting(ctx context.Context, tx sql.Tx, key, value string) error { + mtx := b.wrapTxn(tx) + return mtx.UpdateSetting(ctx, key, value) +} + func (b *MainDatabase) Version(ctx context.Context) (string, string, error) { return version(ctx, b.db) } @@ -97,8 +102,8 @@ func (tx *MainDatabaseTx) Accounts(ctx context.Context, owner string) ([]api.Acc return ssql.Accounts(ctx, tx, owner) } -func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { - return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) +func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error { + return ssql.AbortMultipartUpload(ctx, tx, bucket, key, uploadID) } func (tx *MainDatabaseTx) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error { @@ -335,8 +340,8 @@ func (tx *MainDatabaseTx) DeleteHostSector(ctx context.Context, hk types.PublicK return ssql.DeleteHostSector(ctx, tx, hk, root) } -func (tx *MainDatabaseTx) DeleteSettings(ctx context.Context, key string) error { - return ssql.DeleteSettings(ctx, tx, key) +func (tx *MainDatabaseTx) DeleteSetting(ctx context.Context, key string) error { + return ssql.DeleteSetting(ctx, tx, key) } func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { @@ -391,12 +396,12 @@ func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { return ssql.HostBlocklist(ctx, tx) } -func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { - return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) +func (tx *MainDatabaseTx) Hosts(ctx context.Context, opts api.HostOptions) ([]api.Host, error) { + return ssql.Hosts(ctx, tx, opts) } -func (tx *MainDatabaseTx) InsertContract(ctx context.Context, c api.ContractMetadata) error { - return ssql.InsertContract(ctx, tx, c) +func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { + return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) } func (tx *MainDatabaseTx) InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error { @@ -459,8 +464,8 @@ func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) return ssql.ListBuckets(ctx, tx) } -func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - return ssql.ListObjects(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, substring, delim, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, substring, delim, sortBy, sortDir, marker, limit) } func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { @@ -529,12 +534,8 @@ func (tx *MainDatabaseTx) Object(ctx context.Context, bucket, key string) (api.O return ssql.Object(ctx, tx, bucket, key) } -func (tx *MainDatabaseTx) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) { - return ssql.ObjectEntries(ctx, tx, bucket, path, prefix, sortBy, sortDir, marker, offset, limit) -} - -func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { - return ssql.ObjectMetadata(ctx, tx, bucket, path) +func (tx *MainDatabaseTx) ObjectMetadata(ctx context.Context, bucket, key string) (api.Object, error) { + return ssql.ObjectMetadata(ctx, tx, bucket, key) } func (tx *MainDatabaseTx) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey object.EncryptionKey) (metadata []api.ObjectMetadata, err error) { @@ -857,7 +858,7 @@ func (tx *MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Accou func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md api.ObjectMetadata, err error) { var createdAt string - dst := []any{&md.Name, &md.Size, &md.Health, &md.MimeType, &createdAt, &md.ETag} + dst := []any{&md.Key, &md.Size, &md.Health, &md.MimeType, &createdAt, &md.ETag} dst = append(dst, others...) if err := s.Scan(dst...); err != nil { return api.ObjectMetadata{}, fmt.Errorf("failed to scan object metadata: %w", err) @@ -867,14 +868,6 @@ func (tx *MainDatabaseTx) ScanObjectMetadata(s ssql.Scanner, others ...any) (md return md, nil } -func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) -} - -func (tx *MainDatabaseTx) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { - return ssql.SearchObjects(ctx, tx, bucket, substring, offset, limit) -} - func (tx *MainDatabaseTx) SelectObjectMetadataExpr() string { return "o.object_id, o.size, o.health, o.mime_type, DATETIME(o.created_at), o.etag" } @@ -937,10 +930,6 @@ func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, erro return ssql.Setting(ctx, tx, key) } -func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { - return ssql.Settings(ctx, tx) -} - func (tx *MainDatabaseTx) Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) { return ssql.Slab(ctx, tx, key) } @@ -1105,10 +1094,10 @@ func (tx *MainDatabaseTx) UpdateHostCheck(ctx context.Context, autopilot string, score_storage_remaining = EXCLUDED.score_storage_remaining, score_uptime = EXCLUDED.score_uptime, score_version = EXCLUDED.score_version, score_prices = EXCLUDED.score_prices, gouging_contract_err = EXCLUDED.gouging_contract_err, gouging_download_err = EXCLUDED.gouging_download_err, gouging_gouging_err = EXCLUDED.gouging_gouging_err, gouging_prune_err = EXCLUDED.gouging_prune_err, gouging_upload_err = EXCLUDED.gouging_upload_err - `, time.Now(), autopilot, ssql.PublicKey(hk), hc.Usability.Blocked, hc.Usability.Offline, hc.Usability.LowScore, - hc.Usability.RedundantIP, hc.Usability.Gouging, hc.Usability.NotAcceptingContracts, hc.Usability.NotAnnounced, hc.Usability.NotCompletingScan, - hc.Score.Age, hc.Score.Collateral, hc.Score.Interactions, hc.Score.StorageRemaining, hc.Score.Uptime, hc.Score.Version, hc.Score.Prices, - hc.Gouging.ContractErr, hc.Gouging.DownloadErr, hc.Gouging.GougingErr, hc.Gouging.PruneErr, hc.Gouging.UploadErr, + `, time.Now(), autopilot, ssql.PublicKey(hk), hc.UsabilityBreakdown.Blocked, hc.UsabilityBreakdown.Offline, hc.UsabilityBreakdown.LowScore, + hc.UsabilityBreakdown.RedundantIP, hc.UsabilityBreakdown.Gouging, hc.UsabilityBreakdown.NotAcceptingContracts, hc.UsabilityBreakdown.NotAnnounced, hc.UsabilityBreakdown.NotCompletingScan, + hc.ScoreBreakdown.Age, hc.ScoreBreakdown.Collateral, hc.ScoreBreakdown.Interactions, hc.ScoreBreakdown.StorageRemaining, hc.ScoreBreakdown.Uptime, hc.ScoreBreakdown.Version, hc.ScoreBreakdown.Prices, + hc.GougingBreakdown.ContractErr, hc.GougingBreakdown.DownloadErr, hc.GougingBreakdown.GougingErr, hc.GougingBreakdown.PruneErr, hc.GougingBreakdown.UploadErr, ) if err != nil { return fmt.Errorf("failed to insert host check: %w", err) @@ -1145,7 +1134,7 @@ func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contrac health = ? WHERE key = ? RETURNING id, total_shards - `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.Key)). + `, contractSet, time.Now().Unix(), 1, ssql.EncryptionKey(s.EncryptionKey)). Scan(&slabID, &totalShards) if errors.Is(err, dsql.ErrNoRows) { return api.ErrSlabNotFound @@ -1312,12 +1301,12 @@ func (tx *MainDatabaseTx) insertSlabs(ctx context.Context, objID, partID *int64, err = insertSlabStmt.QueryRow(ctx, time.Now(), contractSetID, - ssql.EncryptionKey(slices[i].Key), + ssql.EncryptionKey(slices[i].EncryptionKey), slices[i].MinShards, uint8(len(slices[i].Shards)), ).Scan(&slabIDs[i]) if errors.Is(err, dsql.ErrNoRows) { - if err := querySlabIDStmt.QueryRow(ctx, ssql.EncryptionKey(slices[i].Key)).Scan(&slabIDs[i]); err != nil { + if err := querySlabIDStmt.QueryRow(ctx, ssql.EncryptionKey(slices[i].EncryptionKey)).Scan(&slabIDs[i]); err != nil { return fmt.Errorf("failed to fetch slab id: %w", err) } } else if err != nil { diff --git a/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql b/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql new file mode 100644 index 000000000..c5be99f11 --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00018_gouging_units.sql @@ -0,0 +1,18 @@ +UPDATE settings +SET value = ( + -- Update settings to new values + SELECT JSON_REPLACE(value, '$.maxDownloadPrice', newMaxDownloadPrice, '$.maxUploadPrice', newMaxUploadPrice) + FROM ( + -- Convert TB to bytes by trimming the last 12 digits + SELECT + SUBSTR(maxDownloadPrice, 1, LENGTH(maxDownloadPrice)-12) AS newMaxDownloadPrice, + SUBSTR(maxUploadPrice, 1, LENGTH(maxUploadPrice)-12) AS newMaxUploadPrice + FROM ( + -- SELECT previous settings + SELECT + JSON_EXTRACT(value, '$.maxDownloadPrice') AS maxDownloadPrice, + JSON_EXTRACT(value, '$.maxUploadPrice') AS maxUploadPrice + ) AS _ + ) AS _ +) +WHERE settings.key = "gouging"; diff --git a/stores/sql/sqlite/migrations/main/migration_00019_settings.sql b/stores/sql/sqlite/migrations/main/migration_00019_settings.sql new file mode 100644 index 000000000..98d4aa36b --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00019_settings.sql @@ -0,0 +1,48 @@ +-- avoid duplicate key errors +DELETE FROM settings WHERE `key` IN ("s3", "upload", "pinned"); + +-- migrate settings +INSERT INTO settings (created_at, `key`, value) +SELECT DATETIME('now'), k, v +FROM ( + -- upload is a combination of uploadpacking, redundancy, and contractset + SELECT + "upload" as k, + json_patch( + json_object("packing", (SELECT json_extract(value, "$") FROM settings WHERE key = "uploadpacking")), + json_patch( + json_object("redundancy", (SELECT json_extract(value, "$") FROM settings WHERE key = "redundancy")), + json_object("defaultContractSet", (SELECT json_extract(value, "$.default") FROM settings WHERE key = "contractset")) + ) + ) as v + WHERE + json_extract(v, "$.packing") IS NOT NULL AND + json_extract(v, "$.redundancy") IS NOT NULL + + UNION ALL + + -- s3 wraps the s3authentication setting + SELECT + "s3" as k, + json_object("authentication", (SELECT json_extract(value, "$") FROM settings WHERE key = "s3authentication")) as v + WHERE json_extract(v, "$.authentication") IS NOT NULL + + UNION ALL + + -- pinning renames pricepinning and removes the 'enabled' and 'forexEndpointURL' fields + SELECT + "pinned" as k, + json_remove( + json_remove( + (SELECT json_extract(value, "$") FROM settings WHERE key = "pricepinning"), + "$.enabled" + ), + "$.forexEndpointURL" + ) as v + WHERE + json_extract(v, "$.currency") IS NOT NULL AND + json_extract(v, "$.threshold") IS NOT NULL +) + +-- delete old settings +DELETE FROM settings WHERE `key` IN ("uploadpacking", "redundancy", "contractset", "s3authentication", "pricepinning"); diff --git a/stores/sql/sqlite/migrations/main/migration_00020_idx_db_directory.sql b/stores/sql/sqlite/migrations/main/migration_00020_idx_db_directory.sql new file mode 100644 index 000000000..5757fd280 --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00020_idx_db_directory.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS `idx_objects_db_directory_id` ON `objects`(`db_directory_id`); diff --git a/stores/sql/sqlite/migrations/main/migration_00018_archived_contracts.sql b/stores/sql/sqlite/migrations/main/migration_00021_archived_contracts.sql similarity index 100% rename from stores/sql/sqlite/migrations/main/migration_00018_archived_contracts.sql rename to stores/sql/sqlite/migrations/main/migration_00021_archived_contracts.sql diff --git a/stores/sql/sqlite/migrations/main/schema.sql b/stores/sql/sqlite/migrations/main/schema.sql index b1c64b696..462db3d38 100644 --- a/stores/sql/sqlite/migrations/main/schema.sql +++ b/stores/sql/sqlite/migrations/main/schema.sql @@ -48,6 +48,7 @@ CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); CREATE INDEX `idx_objects_size` ON `objects`(`size`); CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); CREATE INDEX `idx_objects_created_at` ON `objects`(`created_at`); +CREATE INDEX `idx_objects_db_directory_id` ON `objects`(`db_directory_id`); -- dbMultipartUpload CREATE TABLE `multipart_uploads` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`key` blob,`upload_id` text NOT NULL,`object_id` text NOT NULL,`db_bucket_id` integer NOT NULL,`mime_type` text,CONSTRAINT `fk_multipart_uploads_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`) ON DELETE CASCADE); diff --git a/stores/sql_test.go b/stores/sql_test.go index ae130bedd..24bcfb6e1 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -282,10 +282,10 @@ func (s *testSQLStore) Retry(tries int, durationBetweenAttempts time.Duration, f } } -func (s *testSQLStore) addTestObject(path string, o object.Object) (api.Object, error) { - if err := s.UpdateObjectBlocking(context.Background(), api.DefaultBucketName, path, testContractSet, testETag, testMimeType, testMetadata, o); err != nil { +func (s *testSQLStore) addTestObject(key string, o object.Object) (api.Object, error) { + if err := s.UpdateObjectBlocking(context.Background(), api.DefaultBucketName, key, testContractSet, testETag, testMimeType, testMetadata, o); err != nil { return api.Object{}, err - } else if obj, err := s.Object(context.Background(), api.DefaultBucketName, path); err != nil { + } else if obj, err := s.Object(context.Background(), api.DefaultBucketName, key); err != nil { return api.Object{}, err } else { return obj, nil @@ -314,7 +314,7 @@ func (s *testSQLStore) addTestContracts(keys []types.PublicKey) (fcids []types.F } func (s *SQLStore) addTestContract(fcid types.FileContractID, hk types.PublicKey) (api.ContractMetadata, error) { - if err := s.InsertContract(context.Background(), newTestContract(fcid, hk)); err != nil { + if err := s.PutContract(context.Background(), newTestContract(fcid, hk)); err != nil { return api.ContractMetadata{}, err } return s.Contract(context.Background(), fcid) diff --git a/worker/alerts.go b/worker/alerts.go index 664698fb6..02598c770 100644 --- a/worker/alerts.go +++ b/worker/alerts.go @@ -13,14 +13,14 @@ func randomAlertID() types.Hash256 { return frand.Entropy256() } -func newDownloadFailedAlert(bucket, path string, offset, length, contracts int64, err error) alerts.Alert { +func newDownloadFailedAlert(bucket, key string, offset, length, contracts int64, err error) alerts.Alert { return alerts.Alert{ ID: randomAlertID(), Severity: alerts.SeverityError, Message: "Download failed", Data: map[string]any{ "bucket": bucket, - "path": path, + "key": key, "offset": offset, "length": length, "contracts": contracts, diff --git a/worker/bench_test.go b/worker/bench_test.go index cc0034415..3cde1792a 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -37,15 +37,15 @@ func BenchmarkDownloaderSingleObject(b *testing.B) { if err != nil { b.Fatal(err) } - o, err := w.os.Object(context.Background(), testBucket, up.path, api.GetObjectOptions{}) + o, err := w.os.Object(context.Background(), testBucket, up.key, api.GetObjectOptions{}) if err != nil { b.Fatal(err) } - b.SetBytes(o.Object.Size) + b.SetBytes(o.Size) b.ResetTimer() for i := 0; i < b.N; i++ { - err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { b.Fatal(err) } diff --git a/worker/client/client.go b/worker/client/client.go index 2bac4f99f..ca5aee3c8 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -2,7 +2,6 @@ package client import ( "context" - "encoding/json" "errors" "fmt" "io" @@ -59,24 +58,24 @@ func (c *Client) Contracts(ctx context.Context, hostTimeout time.Duration) (resp } // DeleteObject deletes the object at the given path. -func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) { +func (c *Client) DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) (err error) { values := url.Values{} values.Set("bucket", bucket) opts.Apply(values) - path = api.ObjectPathEscape(path) - err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), path)) + key = api.ObjectKeyEscape(key) + err = c.c.WithContext(ctx).DELETE(fmt.Sprintf("/objects/%s?"+values.Encode(), key)) return } -// DownloadObject downloads the object at the given path. -func (c *Client) DownloadObject(ctx context.Context, w io.Writer, bucket, path string, opts api.DownloadObjectOptions) (err error) { - if strings.HasSuffix(path, "/") { - return errors.New("the given path is a directory, use ObjectEntries instead") +// DownloadObject downloads the object at the given key. +func (c *Client) DownloadObject(ctx context.Context, w io.Writer, bucket, key string, opts api.DownloadObjectOptions) (err error) { + if strings.HasSuffix(key, "/") { + return errors.New("the given key is a directory, use ObjectEntries instead") } - path = api.ObjectPathEscape(path) - body, _, err := c.object(ctx, bucket, path, opts) + key = api.ObjectKeyEscape(key) + body, _, err := c.object(ctx, bucket, key, opts) if err != nil { return err } @@ -91,18 +90,18 @@ func (c *Client) DownloadStats() (resp api.DownloadStatsResponse, err error) { return } -// HeadObject returns the metadata of the object at the given path. -func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { - c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", path), nil, nil) +// HeadObject returns the metadata of the object at the given key. +func (c *Client) HeadObject(ctx context.Context, bucket, key string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", key), nil, nil) values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) opts.Apply(values) - path = api.ObjectPathEscape(path) - path += "?" + values.Encode() + key = api.ObjectKeyEscape(key) + key += "?" + values.Encode() // TODO: support HEAD in jape client - req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) + req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, key), http.NoBody) if err != nil { panic(err) } @@ -123,14 +122,14 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H return &head, nil } -// GetObject returns the object at given path alongside its metadata. -func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { - if strings.HasSuffix(path, "/") { +// GetObject returns the object at given key alongside its metadata. +func (c *Client) GetObject(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { + if strings.HasSuffix(key, "/") { return nil, errors.New("the given path is a directory, use ObjectEntries instead") } - path = api.ObjectPathEscape(path) - body, header, err := c.object(ctx, bucket, path, opts) + key = api.ObjectKeyEscape(key) + body, header, err := c.object(ctx, bucket, key, opts) if err != nil { return nil, err } @@ -172,21 +171,6 @@ func (c *Client) MigrateSlab(ctx context.Context, slab object.Slab, set string) return } -// ObjectEntries returns the entries at the given path, which must end in /. -func (c *Client) ObjectEntries(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (entries []api.ObjectMetadata, err error) { - path = api.ObjectPathEscape(path) - body, _, err := c.object(ctx, bucket, path, api.DownloadObjectOptions{ - GetObjectOptions: opts, - }) - if err != nil { - return nil, err - } - defer io.Copy(io.Discard, body) - defer body.Close() - err = json.NewDecoder(body).Decode(&entries) - return -} - // State returns the current state of the worker. func (c *Client) State() (state api.WorkerStateResponse, err error) { err = c.c.GET("/state", &state) @@ -195,7 +179,7 @@ func (c *Client) State() (state api.WorkerStateResponse, err error) { // UploadMultipartUploadPart uploads part of the data for a multipart upload. func (c *Client) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) { - path = api.ObjectPathEscape(path) + path = api.ObjectKeyEscape(path) c.c.Custom("PUT", fmt.Sprintf("/multipart/%s", path), []byte{}, nil) values := make(url.Values) @@ -227,14 +211,14 @@ func (c *Client) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc } // UploadObject uploads the data in r, creating an object at the given path. -func (c *Client) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { - path = api.ObjectPathEscape(path) - c.c.Custom("PUT", fmt.Sprintf("/objects/%s", path), []byte{}, nil) +func (c *Client) UploadObject(ctx context.Context, r io.Reader, bucket, key string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { + key = api.ObjectKeyEscape(key) + c.c.Custom("PUT", fmt.Sprintf("/objects/%s", key), []byte{}, nil) values := make(url.Values) values.Set("bucket", bucket) opts.ApplyValues(values) - u, err := url.Parse(fmt.Sprintf("%v/objects/%v", c.c.BaseURL, path)) + u, err := url.Parse(fmt.Sprintf("%v/objects/%v", c.c.BaseURL, key)) if err != nil { panic(err) } @@ -269,14 +253,13 @@ func (c *Client) NotifyEvent(ctx context.Context, e webhooks.Event) (err error) return } -func (c *Client) object(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (_ io.ReadCloser, _ http.Header, err error) { +func (c *Client) object(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (_ io.ReadCloser, _ http.Header, err error) { values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) - opts.ApplyValues(values) - path += "?" + values.Encode() + key += "?" + values.Encode() - c.c.Custom("GET", fmt.Sprintf("/objects/%s", path), nil, (*[]api.ObjectMetadata)(nil)) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) + c.c.Custom("GET", fmt.Sprintf("/objects/%s", key), nil, (*[]api.ObjectMetadata)(nil)) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, key), http.NoBody) if err != nil { panic(err) } diff --git a/worker/download.go b/worker/download.go index e1c54771d..cedcd7a82 100644 --- a/worker/download.go +++ b/worker/download.go @@ -173,7 +173,7 @@ func (mgr *downloadManager) DownloadObject(ctx context.Context, w io.Writer, o o if !slabs[i].PartialSlab { continue } - data, slab, err := mgr.fetchPartialSlab(ctx, slabs[i].SlabSlice.Key, slabs[i].SlabSlice.Offset, slabs[i].SlabSlice.Length) + data, slab, err := mgr.fetchPartialSlab(ctx, slabs[i].SlabSlice.EncryptionKey, slabs[i].SlabSlice.Offset, slabs[i].SlabSlice.Length) if err != nil { return fmt.Errorf("failed to fetch partial slab data: %w", err) } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index feacaba6d..24a70adcb 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -60,7 +60,6 @@ type busMock struct { *settingStoreMock *syncerMock *s3Mock - *walletMock *webhookBroadcasterMock *webhookStoreMock } @@ -76,7 +75,6 @@ func newBusMock(cs *contractStoreMock, hs *hostStoreMock, os *objectStoreMock) * objectStoreMock: os, settingStoreMock: &settingStoreMock{}, syncerMock: &syncerMock{}, - walletMock: &walletMock{}, webhookBroadcasterMock: &webhookBroadcasterMock{}, } } @@ -386,7 +384,7 @@ func (os *objectStoreMock) DeleteHostSector(ctx context.Context, hk types.Public return nil } -func (os *objectStoreMock) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error { +func (os *objectStoreMock) DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) error { return nil } @@ -433,32 +431,32 @@ func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minS return []object.SlabSlice{ss}, os.totalSlabBufferSize() > os.slabBufferMaxSizeSoft, nil } -func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { +func (os *objectStoreMock) Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (api.Object, error) { os.mu.Lock() defer os.mu.Unlock() // check if the bucket exists if _, exists := os.objects[bucket]; !exists { - return api.ObjectsResponse{}, api.ErrBucketNotFound + return api.Object{}, api.ErrBucketNotFound } // check if the object exists - if _, exists := os.objects[bucket][path]; !exists { - return api.ObjectsResponse{}, api.ErrObjectNotFound + if _, exists := os.objects[bucket][key]; !exists { + return api.Object{}, api.ErrObjectNotFound } // clone to ensure the store isn't unwillingly modified var o object.Object - if b, err := json.Marshal(os.objects[bucket][path]); err != nil { + if b, err := json.Marshal(os.objects[bucket][key]); err != nil { panic(err) } else if err := json.Unmarshal(b, &o); err != nil { panic(err) } - return api.ObjectsResponse{Object: &api.Object{ - ObjectMetadata: api.ObjectMetadata{Name: path, Size: o.TotalSize()}, + return api.Object{ + ObjectMetadata: api.ObjectMetadata{Key: key, Size: o.TotalSize()}, Object: &o, - }}, nil + }, nil } func (os *objectStoreMock) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, error) { @@ -480,9 +478,9 @@ func (os *objectStoreMock) Slab(ctx context.Context, key object.EncryptionKey) ( os.mu.Lock() defer os.mu.Unlock() - os.forEachObject(func(bucket, path string, o object.Object) { + os.forEachObject(func(bucket, objKey string, o object.Object) { for _, s := range o.Slabs { - if s.Slab.Key.String() == key.String() { + if s.Slab.EncryptionKey.String() == key.String() { slab = s.Slab return } @@ -496,13 +494,13 @@ func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contra os.mu.Lock() defer os.mu.Unlock() - os.forEachObject(func(bucket, path string, o object.Object) { + os.forEachObject(func(bucket, objKey string, o object.Object) { for i, slab := range o.Slabs { - if slab.Key.String() != s.Key.String() { + if slab.EncryptionKey.String() != s.EncryptionKey.String() { continue } // update slab - shards := os.objects[bucket][path].Slabs[i].Slab.Shards + shards := os.objects[bucket][objKey].Slabs[i].Slab.Shards for sI := range shards { // overwrite latest host shards[sI].LatestHost = s.Shards[sI].LatestHost @@ -523,7 +521,7 @@ func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contra } } } - os.objects[bucket][path].Slabs[i].Slab.Shards = shards + os.objects[bucket][objKey].Slabs[i].Slab.Shards = shards return } }) @@ -544,9 +542,9 @@ func (os *objectStoreMock) PackedSlabsForUpload(ctx context.Context, lockingDura if ps.parameterKey == parameterKey && time.Now().After(ps.lockedUntil) { ps.lockedUntil = time.Now().Add(lockingDuration) pss = append(pss, api.PackedSlab{ - BufferID: ps.bufferID, - Data: ps.data, - Key: ps.slabKey, + BufferID: ps.bufferID, + Data: ps.data, + EncryptionKey: ps.slabKey, }) if len(pss) == limit { break @@ -566,9 +564,9 @@ func (os *objectStoreMock) MarkPackedSlabsUploaded(ctx context.Context, slabs [] } slabKeyToSlab := make(map[string]*object.Slab) - os.forEachObject(func(bucket, path string, o object.Object) { + os.forEachObject(func(bucket, objKey string, o object.Object) { for i, slab := range o.Slabs { - slabKeyToSlab[slab.Slab.Key.String()] = &os.objects[bucket][path].Slabs[i].Slab + slabKeyToSlab[slab.Slab.EncryptionKey.String()] = &os.objects[bucket][objKey].Slabs[i].Slab } }) @@ -604,7 +602,7 @@ func (os *objectStoreMock) setSlabBufferMaxSizeSoft(n int) { os.slabBufferMaxSizeSoft = n } -func (os *objectStoreMock) forEachObject(fn func(bucket, path string, o object.Object)) { +func (os *objectStoreMock) forEachObject(fn func(bucket, key string, o object.Object)) { for bucket, objects := range os.objects { for path, object := range objects { fn(bucket, path, object) @@ -654,8 +652,8 @@ func (*s3Mock) MultipartUploadParts(ctx context.Context, bucket, object string, return api.MultipartListPartsResponse{}, nil } -func (*s3Mock) S3AuthenticationSettings(context.Context) (as api.S3AuthenticationSettings, err error) { - return api.S3AuthenticationSettings{}, nil +func (*s3Mock) S3Settings(context.Context) (as api.S3Settings, err error) { + return api.S3Settings{}, nil } func (*s3Mock) UpdateSetting(context.Context, string, interface{}) error { @@ -686,22 +684,6 @@ func (*syncerMock) SyncerPeers(context.Context) ([]string, error) { return nil, nil } -var _ Wallet = (*walletMock)(nil) - -type walletMock struct{} - -func (*walletMock) WalletDiscard(context.Context, types.Transaction) error { - return nil -} - -func (*walletMock) WalletFund(context.Context, *types.Transaction, types.Currency, bool) ([]types.Hash256, []types.Transaction, error) { - return nil, nil, nil -} - -func (*walletMock) WalletSign(context.Context, *types.Transaction, []types.Hash256, types.CoveredFields) error { - return nil -} - var _ webhooks.Broadcaster = (*webhookBroadcasterMock)(nil) type webhookBroadcasterMock struct{} diff --git a/worker/s3/authentication.go b/worker/s3/authentication.go index 58ebad677..066e27e53 100644 --- a/worker/s3/authentication.go +++ b/worker/s3/authentication.go @@ -117,11 +117,11 @@ func (b *authenticatedBackend) permsFromCtx(ctx context.Context, bucket string) } func (b *authenticatedBackend) reloadV4Keys(ctx context.Context) error { - as, err := b.backend.b.S3AuthenticationSettings(ctx) + s3, err := b.backend.b.S3Settings(ctx) if err != nil { return err } - signature.ReloadKeys(as.V4Keypairs) + signature.ReloadKeys(s3.Authentication.V4Keypairs) return nil } diff --git a/worker/s3/backend.go b/worker/s3/backend.go index ae0719662..28e1f7299 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -12,7 +12,6 @@ import ( "go.sia.tech/gofakes3" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" - "go.sia.tech/renterd/object" "go.uber.org/zap" ) @@ -82,9 +81,6 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 return nil, gofakes3.ErrorMessage(gofakes3.ErrNotImplemented, "delimiter must be '/' but was "+prefix.Delimiter) } - // Workaround for empty prefix - prefix.HasPrefix = prefix.Prefix != "" - // Adjust MaxKeys if page.MaxKeys == 0 { page.MaxKeys = maxKeysDefault @@ -95,59 +91,22 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 page.Marker = "/" + page.Marker } - var objects []api.ObjectMetadata - var err error - response := gofakes3.NewObjectList() - if prefix.HasDelimiter { - // Handle request with delimiter. - opts := api.GetObjectOptions{} - if page.HasMarker { - opts.Marker = page.Marker - opts.Limit = int(page.MaxKeys) - } - var path string // root of bucket - adjustedPrefix := prefix.Prefix - if idx := strings.LastIndex(adjustedPrefix, prefix.Delimiter); idx != -1 { - path = adjustedPrefix[:idx+1] - adjustedPrefix = adjustedPrefix[idx+1:] - } - if adjustedPrefix != "" { - opts.Prefix = adjustedPrefix - } - var res api.ObjectsResponse - res, err = s.b.Object(ctx, bucketName, path, opts) - if utils.IsErr(err, api.ErrBucketNotFound) { - return nil, gofakes3.BucketNotFound(bucketName) - } else if err != nil { - return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) - } - objects = res.Entries - response.IsTruncated = res.HasMore - if response.IsTruncated { - response.NextMarker = objects[len(objects)-1].Name - } - } else { - // Handle request without delimiter. - opts := api.ListObjectOptions{ - Limit: int(page.MaxKeys), - Marker: page.Marker, - Prefix: "/" + prefix.Prefix, - } - - var res api.ObjectsListResponse - res, err = s.b.ListObjects(ctx, bucketName, opts) - if utils.IsErr(err, api.ErrBucketNotFound) { - return nil, gofakes3.BucketNotFound(bucketName) - } else if err != nil { - return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) - } - objects = res.Objects - response.IsTruncated = res.HasMore - response.NextMarker = res.NextMarker - } - if err != nil { + resp, err := s.b.Objects(ctx, bucketName, prefix.Prefix, api.ListObjectOptions{ + Delimiter: prefix.Delimiter, + Limit: int(page.MaxKeys), + Marker: page.Marker, + }) + if utils.IsErr(err, api.ErrBucketNotFound) { + return nil, gofakes3.BucketNotFound(bucketName) + } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } + objects := resp.Objects + + // prepare response + response := gofakes3.NewObjectList() + response.IsTruncated = resp.HasMore + response.NextMarker = resp.NextMarker // Remove the leading slash from the marker since we also do that for the // name of each object @@ -155,7 +114,7 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 // Loop over the entries and add them to the response. for _, object := range objects { - key := strings.TrimPrefix(object.Name, "/") + key := strings.TrimPrefix(object.Key, "/") if prefix.HasDelimiter && strings.HasSuffix(key, prefix.Delimiter) { response.AddPrefix(key) continue @@ -239,7 +198,7 @@ func (s *s3) DeleteBucket(ctx context.Context, name string) error { // If the backend is a VersionedBackend, GetObject retrieves the latest version. // TODO: Range requests starting from the end are not supported yet. Backend // needs to be updated for that. -func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) { +func (s *s3) GetObject(ctx context.Context, bucketName, key string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) { if rangeRequest != nil && rangeRequest.FromEnd { return nil, gofakes3.ErrorMessage(gofakes3.ErrNotImplemented, "range request from end not supported") } @@ -253,11 +212,11 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range opts.Range = &api.DownloadRange{Offset: rangeRequest.Start, Length: length} } - res, err := s.w.GetObject(ctx, bucketName, objectName, opts) + res, err := s.w.GetObject(ctx, bucketName, key, opts) if utils.IsErr(err, api.ErrBucketNotFound) { return nil, gofakes3.BucketNotFound(bucketName) } else if utils.IsErr(err, api.ErrObjectNotFound) { - return nil, gofakes3.KeyNotFound(objectName) + return nil, gofakes3.KeyNotFound(key) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } @@ -288,7 +247,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range return &gofakes3.Object{ Hash: etag, - Name: gofakes3.URLEncode(objectName), + Name: gofakes3.URLEncode(key), Metadata: res.Metadata, Size: res.Size, Contents: res.Content, @@ -305,12 +264,10 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // // HeadObject should return a NotFound() error if the object does not // exist. -func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.w.HeadObject(ctx, bucketName, objectName, api.HeadObjectOptions{ - IgnoreDelim: true, - }) +func (s *s3) HeadObject(ctx context.Context, bucketName, key string) (*gofakes3.Object, error) { + res, err := s.w.HeadObject(ctx, bucketName, key, api.HeadObjectOptions{}) if utils.IsErr(err, api.ErrObjectNotFound) { - return nil, gofakes3.KeyNotFound(objectName) + return nil, gofakes3.KeyNotFound(key) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } @@ -333,7 +290,7 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go return &gofakes3.Object{ Hash: hash, - Name: gofakes3.URLEncode(objectName), + Name: gofakes3.URLEncode(key), Metadata: metadata, Size: res.Size, Contents: io.NopCloser(bytes.NewReader(nil)), @@ -355,8 +312,8 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go // Removes the null version (if there is one) of an object and inserts a // delete marker, which becomes the latest version of the object. If there // isn't a null version, Amazon S3 does not remove any objects. -func (s *s3) DeleteObject(ctx context.Context, bucketName, objectName string) (gofakes3.ObjectDeleteResult, error) { - err := s.b.DeleteObject(ctx, bucketName, objectName, api.DeleteObjectOptions{}) +func (s *s3) DeleteObject(ctx context.Context, bucketName, key string) (gofakes3.ObjectDeleteResult, error) { + err := s.b.DeleteObject(ctx, bucketName, key, api.DeleteObjectOptions{}) if utils.IsErr(err, api.ErrBucketNotFound) { return gofakes3.ObjectDeleteResult{}, gofakes3.BucketNotFound(bucketName) } else if utils.IsErr(err, api.ErrObjectNotFound) { @@ -396,17 +353,17 @@ func (s *s3) PutObject(ctx context.Context, bucketName, key string, meta map[str func (s *s3) DeleteMulti(ctx context.Context, bucketName string, objects ...string) (gofakes3.MultiDeleteResult, error) { var res gofakes3.MultiDeleteResult - for _, objectName := range objects { - err := s.b.DeleteObject(ctx, bucketName, objectName, api.DeleteObjectOptions{}) + for _, key := range objects { + err := s.b.DeleteObject(ctx, bucketName, key, api.DeleteObjectOptions{}) if err != nil && !utils.IsErr(err, api.ErrObjectNotFound) { res.Error = append(res.Error, gofakes3.ErrorResult{ - Key: objectName, + Key: key, Code: gofakes3.ErrInternal, Message: err.Error(), }) } else { res.Deleted = append(res.Deleted, gofakes3.ObjectID{ - Key: objectName, + Key: key, VersionID: "", // not supported }) } @@ -433,9 +390,9 @@ func (s *s3) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKe func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta map[string]string) (gofakes3.UploadID, error) { convertToSiaMetadataHeaders(meta) resp, err := s.b.CreateMultipartUpload(ctx, bucket, "/"+key, api.CreateMultipartOptions{ - Key: &object.NoOpKey, - MimeType: meta["Content-Type"], - Metadata: api.ExtractObjectUserMetadataFrom(meta), + DisableClientSideEncryption: true, + MimeType: meta["Content-Type"], + Metadata: api.ExtractObjectUserMetadataFrom(meta), }) if err != nil { return "", gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -474,7 +431,7 @@ func (s *s3) ListMultipartUploads(ctx context.Context, bucket string, marker *go var uploads []gofakes3.ListMultipartUploadItem for _, upload := range resp.Uploads { uploads = append(uploads, gofakes3.ListMultipartUploadItem{ - Key: upload.Path[1:], + Key: upload.Key[1:], UploadID: gofakes3.UploadID(upload.UploadID), Initiated: gofakes3.NewContentTime(upload.CreatedAt.Std()), }) diff --git a/worker/s3/s3.go b/worker/s3/s3.go index d5cbb71a3..10b074306 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -30,28 +30,26 @@ type Bus interface { DeleteBucket(ctx context.Context, bucketName string) error ListBuckets(ctx context.Context) (buckets []api.Bucket, err error) - AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) - CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) - DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) (err error) - ListObjects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.ObjectsResponse, err error) + AddObject(ctx context.Context, bucket, key, contractSet string, o object.Object, opts api.AddObjectOptions) (err error) + CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey string, opts api.CopyObjectOptions) (om api.ObjectMetadata, err error) + DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) (err error) + Objects(ctx context.Context, bucket, prefix string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) - AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) - CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) - CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (api.MultipartCreateResponse, error) + AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) (err error) + CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) + CreateMultipartUpload(ctx context.Context, bucket, key string, opts api.CreateMultipartOptions) (api.MultipartCreateResponse, error) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) MultipartUploadParts(ctx context.Context, bucket, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) - S3AuthenticationSettings(ctx context.Context) (as api.S3AuthenticationSettings, err error) - UpdateSetting(ctx context.Context, key string, value interface{}) error + S3Settings(ctx context.Context) (as api.S3Settings, err error) UploadParams(ctx context.Context) (api.UploadParams, error) } type Worker interface { - GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) - HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) - UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) - UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) + GetObject(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) + HeadObject(ctx context.Context, bucket, key string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) + UploadObject(ctx context.Context, r io.Reader, bucket, key string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) + UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, key, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) } func (l *gofakes3Logger) Print(level gofakes3.LogLevel, v ...interface{}) { diff --git a/worker/upload.go b/worker/upload.go index 6ced77f9a..1e1dca9a1 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -154,16 +154,16 @@ func (w *Worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime w.uploadManager = newUploadManager(w.shutdownCtx, w, w.bus, w.bus, w.bus, maxMemory, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } -func (w *Worker) upload(ctx context.Context, bucket, path string, rs api.RedundancySettings, r io.Reader, contracts []api.ContractMetadata, opts ...UploadOption) (_ string, err error) { +func (w *Worker) upload(ctx context.Context, bucket, key string, rs api.RedundancySettings, r io.Reader, contracts []api.ContractMetadata, opts ...UploadOption) (_ string, err error) { // apply the options - up := defaultParameters(bucket, path, rs) + up := defaultParameters(bucket, key, rs) for _, opt := range opts { opt(&up) } // if not given, try decide on a mime type using the file extension if !up.multipart && up.mimeType == "" { - up.mimeType = mime.TypeByExtension(filepath.Ext(up.path)) + up.mimeType = mime.TypeByExtension(filepath.Ext(up.key)) // if mime type is still not known, wrap the reader with a mime reader if up.mimeType == "" { @@ -540,13 +540,13 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a if up.multipart { // persist the part - err = mgr.os.AddMultipartPart(ctx, up.bucket, up.path, up.contractSet, eTag, up.uploadID, up.partNumber, o.Slabs) + err = mgr.os.AddMultipartPart(ctx, up.bucket, up.key, up.contractSet, eTag, up.uploadID, up.partNumber, o.Slabs) if err != nil { return bufferSizeLimitReached, "", fmt.Errorf("couldn't add multi part: %w", err) } } else { // persist the object - err = mgr.os.AddObject(ctx, up.bucket, up.path, up.contractSet, o, api.AddObjectOptions{MimeType: up.mimeType, ETag: eTag, Metadata: up.metadata}) + err = mgr.os.AddObject(ctx, up.bucket, up.key, up.contractSet, o, api.AddObjectOptions{MimeType: up.mimeType, ETag: eTag, Metadata: up.metadata}) if err != nil { return bufferSizeLimitReached, "", fmt.Errorf("couldn't add object: %w", err) } @@ -561,7 +561,7 @@ func (mgr *uploadManager) UploadPackedSlab(ctx context.Context, rs api.Redundanc defer cancel() // build the shards - shards := encryptPartialSlab(ps.Data, ps.Key, uint8(rs.MinShards), uint8(rs.TotalShards)) + shards := encryptPartialSlab(ps.Data, ps.EncryptionKey, uint8(rs.MinShards), uint8(rs.TotalShards)) // create the upload upload, err := mgr.newUpload(len(shards), contracts, bh, lockPriority) diff --git a/worker/upload_params.go b/worker/upload_params.go index 109488bb9..b20ec6485 100644 --- a/worker/upload_params.go +++ b/worker/upload_params.go @@ -7,7 +7,7 @@ import ( type uploadParameters struct { bucket string - path string + key string multipart bool uploadID string @@ -25,10 +25,10 @@ type uploadParameters struct { metadata api.ObjectUserMetadata } -func defaultParameters(bucket, path string, rs api.RedundancySettings) uploadParameters { +func defaultParameters(bucket, key string, rs api.RedundancySettings) uploadParameters { return uploadParameters{ bucket: bucket, - path: path, + key: key, ec: object.GenerateEncryptionKey(), // random key encryptionOffset: 0, // from the beginning diff --git a/worker/upload_test.go b/worker/upload_test.go index 5bad0941a..d36c67a6e 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -54,13 +54,13 @@ func TestUpload(t *testing.T) { // build used hosts used := make(map[types.PublicKey]struct{}) - for _, shard := range o.Object.Object.Slabs[0].Shards { + for _, shard := range o.Object.Slabs[0].Shards { used[shard.LatestHost] = struct{}{} } // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -86,7 +86,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), filtered) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -103,7 +103,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it fails buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), filtered) if !errors.Is(err, errDownloadNotEnoughHosts) { t.Fatal("expected not enough hosts error", err) } @@ -165,7 +165,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -201,7 +201,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -221,8 +221,8 @@ func TestUploadPackedSlab(t *testing.T) { var c int uploadBytes := func(n int) { t.Helper() - params.path = fmt.Sprintf("%s_%d", t.Name(), c) - _, err := w.upload(context.Background(), params.bucket, params.path, testRedundancySettings, bytes.NewReader(frand.Bytes(n)), w.Contracts(), opts...) + params.key = fmt.Sprintf("%s_%d", t.Name(), c) + _, err := w.upload(context.Background(), params.bucket, params.key, testRedundancySettings, bytes.NewReader(frand.Bytes(n)), w.Contracts(), opts...) if err != nil { t.Fatal(err) } @@ -294,10 +294,10 @@ func TestMigrateLostSector(t *testing.T) { o, err := os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab := o.Object.Object.Slabs[0] + slab := o.Object.Slabs[0] // build usedHosts hosts usedHosts := make(map[types.PublicKey]struct{}) @@ -320,7 +320,7 @@ func TestMigrateLostSector(t *testing.T) { } // encrypt the shards - o.Object.Object.Slabs[0].Slab.Encrypt(shards) + o.Object.Slabs[0].Slab.Encrypt(shards) // filter it down to the shards we need to migrate shards = shards[:1] @@ -336,7 +336,7 @@ func TestMigrateLostSector(t *testing.T) { // migrate the shard away from the bad host mem := mm.AcquireMemory(context.Background(), rhpv2.SectorSize) - err = ul.UploadShards(context.Background(), o.Object.Object.Slabs[0].Slab, []int{0}, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) + err = ul.UploadShards(context.Background(), o.Object.Slabs[0].Slab, []int{0}, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) if err != nil { t.Fatal(err) } @@ -345,10 +345,10 @@ func TestMigrateLostSector(t *testing.T) { o, err = os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab = o.Object.Object.Slabs[0] + slab = o.Object.Slabs[0] // assert the bad shard is on a good host now shard := slab.Shards[0] @@ -395,10 +395,10 @@ func TestUploadShards(t *testing.T) { o, err := os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab := o.Object.Object.Slabs[0] + slab := o.Object.Slabs[0] // build usedHosts hosts usedHosts := make(map[types.PublicKey]struct{}) @@ -423,7 +423,7 @@ func TestUploadShards(t *testing.T) { } // encrypt the shards - o.Object.Object.Slabs[0].Slab.Encrypt(shards) + o.Object.Slabs[0].Slab.Encrypt(shards) // filter it down to the shards we need to migrate for i, si := range badIndices { @@ -443,7 +443,7 @@ func TestUploadShards(t *testing.T) { // migrate those shards away from bad hosts mem := mm.AcquireMemory(context.Background(), uint64(len(badIndices))*rhpv2.SectorSize) - err = ul.UploadShards(context.Background(), o.Object.Object.Slabs[0].Slab, badIndices, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) + err = ul.UploadShards(context.Background(), o.Object.Slabs[0].Slab, badIndices, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) if err != nil { t.Fatal(err) } @@ -452,10 +452,10 @@ func TestUploadShards(t *testing.T) { o, err = os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) - } else if len(o.Object.Object.Slabs) != 1 { + } else if len(o.Object.Slabs) != 1 { t.Fatal("expected 1 slab") } - slab = o.Object.Object.Slabs[0] + slab = o.Object.Slabs[0] // assert none of the shards are on bad hosts for i, shard := range slab.Shards { @@ -479,7 +479,7 @@ func TestUploadShards(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), contracts) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), contracts) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -599,7 +599,7 @@ func TestUploadRegression(t *testing.T) { // upload data ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, err := w.upload(ctx, params.bucket, params.path, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) + _, err := w.upload(ctx, params.bucket, params.key, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) if !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } @@ -608,7 +608,7 @@ func TestUploadRegression(t *testing.T) { unblock() // upload data - _, err = w.upload(context.Background(), params.bucket, params.path, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) + _, err = w.upload(context.Background(), params.bucket, params.key, testRedundancySettings, bytes.NewReader(data), w.Contracts(), testOpts()...) if err != nil { t.Fatal(err) } @@ -621,7 +621,7 @@ func TestUploadRegression(t *testing.T) { // download data for good measure var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object, 0, uint64(o.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -660,10 +660,10 @@ func TestUploadSingleSectorSlowHosts(t *testing.T) { } } -func testParameters(path string) uploadParameters { +func testParameters(key string) uploadParameters { return uploadParameters{ bucket: testBucket, - path: path, + key: key, ec: object.GenerateEncryptionKey(), // random key encryptionOffset: 0, // from the beginning diff --git a/worker/upload_utils.go b/worker/upload_utils.go index 306e1774f..6dfc9b729 100644 --- a/worker/upload_utils.go +++ b/worker/upload_utils.go @@ -10,9 +10,9 @@ import ( func encryptPartialSlab(data []byte, key object.EncryptionKey, minShards, totalShards uint8) [][]byte { slab := object.Slab{ - Key: key, - MinShards: minShards, - Shards: make([]object.Sector, totalShards), + EncryptionKey: key, + MinShards: minShards, + Shards: make([]object.Sector, totalShards), } encodedShards := make([][]byte, totalShards) slab.Encode(data, encodedShards) diff --git a/worker/worker.go b/worker/worker.go index 70b5fccf3..3fffec52d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -34,7 +34,6 @@ import ( "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker/client" - "go.sia.tech/renterd/worker/s3" "go.uber.org/zap" ) @@ -66,8 +65,6 @@ func NewClient(address, password string) *Client { type ( Bus interface { - s3.Bus - alerts.Alerter gouging.ConsensusState webhooks.Broadcaster @@ -83,7 +80,6 @@ type ( WebhookStore Syncer - Wallet } AccountFunder interface { @@ -113,8 +109,8 @@ type ( Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) // NOTE: used for upload - AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) error - AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) + AddObject(ctx context.Context, bucket, key, contractSet string, o object.Object, opts api.AddObjectOptions) error + AddMultipartPart(ctx context.Context, bucket, key, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) AddPartialSlab(ctx context.Context, data []byte, minShards, totalShards uint8, contractSet string) (slabs []object.SlabSlice, slabBufferMaxSizeSoftReached bool, err error) AddUploadingSector(ctx context.Context, uID api.UploadID, id types.FileContractID, root types.Hash256) error FinishUpload(ctx context.Context, uID api.UploadID) error @@ -124,8 +120,8 @@ type ( // NOTE: used by worker Bucket(_ context.Context, bucket string) (api.Bucket, error) - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) - DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error + Object(ctx context.Context, bucket, key string, opts api.GetObjectOptions) (api.Object, error) + DeleteObject(ctx context.Context, bucket, key string, opts api.DeleteObjectOptions) error MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) } @@ -140,12 +136,6 @@ type ( SyncerPeers(ctx context.Context) (resp []string, err error) } - Wallet interface { - WalletDiscard(ctx context.Context, txn types.Transaction) error - WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) - WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error - } - WebhookStore interface { RegisterWebhook(ctx context.Context, webhook webhooks.Webhook) error UnregisterWebhook(ctx context.Context, webhook webhooks.Webhook) error @@ -493,17 +483,9 @@ func (w *Worker) objectsHandlerHEAD(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } - var ignoreDelim bool - if jc.DecodeForm("ignoreDelim", &ignoreDelim) != nil { - return - } - // parse path - path := jc.PathParam("path") - if !ignoreDelim && (path == "" || strings.HasSuffix(path, "/")) { - jc.Error(errors.New("HEAD requests can only be performed on objects, not directories"), http.StatusBadRequest) - return - } + // parse key + path := jc.PathParam("key") var off int if jc.DecodeForm("offset", &off) != nil { @@ -528,8 +510,7 @@ func (w *Worker) objectsHandlerHEAD(jc jape.Context) { // fetch object metadata hor, err := w.HeadObject(jc.Request.Context(), bucket, path, api.HeadObjectOptions{ - IgnoreDelim: ignoreDelim, - Range: &dr, + Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -584,27 +565,9 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { return } - opts := api.GetObjectOptions{ - Prefix: prefix, - Marker: marker, - Offset: off, - Limit: limit, - IgnoreDelim: ignoreDelim, - SortBy: sortBy, - SortDir: sortDir, - } - - path := jc.PathParam("path") - if path == "" || strings.HasSuffix(path, "/") { - // list directory - res, err := w.bus.Object(ctx, bucket, path, opts) - if utils.IsErr(err, api.ErrObjectNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("couldn't get object or entries", err) != nil { - return - } - jc.Encode(res.Entries) + key := jc.PathParam("key") + if key == "" { + jc.Error(errors.New("no path provided"), http.StatusBadRequest) return } @@ -620,9 +583,8 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { return } - gor, err := w.GetObject(ctx, bucket, path, api.DownloadObjectOptions{ - GetObjectOptions: opts, - Range: &dr, + gor, err := w.GetObject(ctx, bucket, key, api.DownloadObjectOptions{ + Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -636,7 +598,7 @@ func (w *Worker) objectsHandlerGET(jc jape.Context) { defer gor.Content.Close() // serve the content - serveContent(jc.ResponseWriter, jc.Request, path, gor.Content, gor.HeadObjectResponse) + serveContent(jc.ResponseWriter, jc.Request, key, gor.Content, gor.HeadObjectResponse) } func (w *Worker) objectsHandlerPUT(jc jape.Context) { @@ -644,7 +606,7 @@ func (w *Worker) objectsHandlerPUT(jc jape.Context) { ctx := jc.Request.Context() // grab the path - path := jc.PathParam("path") + path := jc.PathParam("key") // decode the contract set from the query string var contractset string @@ -715,7 +677,7 @@ func (w *Worker) multipartUploadHandlerPUT(jc jape.Context) { ctx := jc.Request.Context() // grab the path - path := jc.PathParam("path") + path := jc.PathParam("key") // decode the contract set from the query string var contractset string @@ -807,7 +769,7 @@ func (w *Worker) objectsHandlerDELETE(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } - err := w.bus.DeleteObject(jc.Request.Context(), bucket, jc.PathParam("path"), api.DeleteObjectOptions{Batch: batch}) + err := w.bus.DeleteObject(jc.Request.Context(), bucket, jc.PathParam("key"), api.DeleteObjectOptions{Batch: batch}) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return @@ -842,7 +804,6 @@ func (w *Worker) rhpContractsHandlerGET(jc jape.Context) { contracts, errs := w.fetchContracts(ctx, busContracts, hosttimeout) resp := api.ContractsResponse{Contracts: contracts} if errs != nil { - resp.Error = errs.Error() resp.Errors = make(map[types.PublicKey]string) for pk, err := range errs { resp.Errors[pk] = err.Error() @@ -996,12 +957,12 @@ func (w *Worker) Handler() http.Handler { "GET /stats/uploads": w.uploadsStatsHandlerGET, "POST /slab/migrate": w.slabMigrateHandler, - "HEAD /objects/*path": w.objectsHandlerHEAD, - "GET /objects/*path": w.objectsHandlerGET, - "PUT /objects/*path": w.objectsHandlerPUT, - "DELETE /objects/*path": w.objectsHandlerDELETE, + "HEAD /objects/*key": w.objectsHandlerHEAD, + "GET /objects/*key": w.objectsHandlerGET, + "PUT /objects/*key": w.objectsHandlerPUT, + "DELETE /objects/*key": w.objectsHandlerDELETE, - "PUT /multipart/*path": w.multipartUploadHandlerPUT, + "PUT /multipart/*key": w.multipartUploadHandlerPUT, "GET /state": w.stateHandlerGET, }) @@ -1147,16 +1108,15 @@ func isErrHostUnreachable(err error) bool { utils.IsErr(err, errors.New("cannot assign requested address")) } -func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.ObjectsResponse, error) { +func (w *Worker) headObject(ctx context.Context, bucket, key string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.Object, error) { // fetch object - res, err := w.bus.Object(ctx, bucket, path, api.GetObjectOptions{ - IgnoreDelim: opts.IgnoreDelim, + res, err := w.bus.Object(ctx, bucket, key, api.GetObjectOptions{ OnlyMetadata: onlyMetadata, }) if err != nil { - return nil, api.ObjectsResponse{}, fmt.Errorf("couldn't fetch object: %w", err) + return nil, api.Object{}, fmt.Errorf("couldn't fetch object: %w", err) } else if res.Object == nil { - return nil, api.ObjectsResponse{}, errors.New("object is a directory") + return nil, api.Object{}, errors.New("object is a directory") } // adjust length @@ -1164,21 +1124,21 @@ func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetada opts.Range = &api.DownloadRange{Offset: 0, Length: -1} } if opts.Range.Length == -1 { - opts.Range.Length = res.Object.Size - opts.Range.Offset + opts.Range.Length = res.Size - opts.Range.Offset } // check size of object against range - if opts.Range.Offset+opts.Range.Length > res.Object.Size { - return nil, api.ObjectsResponse{}, http_range.ErrInvalid + if opts.Range.Offset+opts.Range.Length > res.Size { + return nil, api.Object{}, http_range.ErrInvalid } return &api.HeadObjectResponse{ - ContentType: res.Object.MimeType, - Etag: res.Object.ETag, - LastModified: res.Object.ModTime, - Range: opts.Range.ContentRange(res.Object.Size), - Size: res.Object.Size, - Metadata: res.Object.Metadata, + ContentType: res.MimeType, + Etag: res.ETag, + LastModified: res.ModTime, + Range: opts.Range.ContentRange(res.Size), + Size: res.Size, + Metadata: res.Metadata, }, res, nil } @@ -1211,16 +1171,15 @@ func (w *Worker) FundAccount(ctx context.Context, fcid types.FileContractID, hk }) } -func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { +func (w *Worker) GetObject(ctx context.Context, bucket, key string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { // head object - hor, res, err := w.headObject(ctx, bucket, path, false, api.HeadObjectOptions{ - IgnoreDelim: opts.IgnoreDelim, - Range: opts.Range, + hor, res, err := w.headObject(ctx, bucket, key, false, api.HeadObjectOptions{ + Range: opts.Range, }) if err != nil { return nil, fmt.Errorf("couldn't fetch object: %w", err) } - obj := *res.Object.Object + obj := *res.Object // adjust range if opts.Range == nil { @@ -1257,7 +1216,7 @@ func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.Do if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errDownloadCancelled) && !errors.Is(err, io.ErrClosedPipe) { - w.registerAlert(newDownloadFailedAlert(bucket, path, offset, length, int64(len(contracts)), err)) + w.registerAlert(newDownloadFailedAlert(bucket, key, offset, length, int64(len(contracts)), err)) } return fmt.Errorf("failed to download object: %w", err) } @@ -1277,8 +1236,8 @@ func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.Do }, nil } -func (w *Worker) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { - res, _, err := w.headObject(ctx, bucket, path, true, opts) +func (w *Worker) HeadObject(ctx context.Context, bucket, key string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + res, _, err := w.headObject(ctx, bucket, key, true, opts) return res, err } @@ -1301,7 +1260,7 @@ func (w *Worker) SyncAccount(ctx context.Context, fcid types.FileContractID, hk return nil } -func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { +func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, key string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { // prepare upload params up, err := w.prepareUploadParams(ctx, bucket, opts.ContractSet, opts.MinShards, opts.TotalShards) if err != nil { @@ -1318,7 +1277,7 @@ func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str } // upload - eTag, err := w.upload(ctx, bucket, path, up.RedundancySettings, r, contracts, + eTag, err := w.upload(ctx, bucket, key, up.RedundancySettings, r, contracts, WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithMimeType(opts.MimeType), @@ -1326,9 +1285,9 @@ func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str WithObjectUserMetadata(opts.Metadata), ) if err != nil { - w.logger.With(zap.Error(err)).With("path", path).With("bucket", bucket).Error("failed to upload object") + w.logger.With(zap.Error(err)).With("key", key).With("bucket", bucket).Error("failed to upload object") if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { - w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, opts.MimeType, up.RedundancySettings.MinShards, up.RedundancySettings.TotalShards, len(contracts), up.UploadPacking, false, err)) + w.registerAlert(newUploadFailedAlert(bucket, key, up.ContractSet, opts.MimeType, up.RedundancySettings.MinShards, up.RedundancySettings.TotalShards, len(contracts), up.UploadPacking, false, err)) } return nil, fmt.Errorf("couldn't upload object: %w", err) } @@ -1358,13 +1317,13 @@ func (w *Worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithPacking(up.UploadPacking), - WithCustomKey(upload.Key), + WithCustomKey(upload.EncryptionKey), WithPartNumber(partNumber), WithUploadID(uploadID), } // make sure only one of the following is set - if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && opts.EncryptionOffset == nil { + if encryptionEnabled := !upload.EncryptionKey.IsNoopKey(); encryptionEnabled && opts.EncryptionOffset == nil { return nil, fmt.Errorf("%w: if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set", api.ErrInvalidMultipartEncryptionSettings) } else if opts.EncryptionOffset != nil && *opts.EncryptionOffset < 0 { return nil, fmt.Errorf("%w: encryption offset must be positive", api.ErrInvalidMultipartEncryptionSettings)