@@ -2,7 +2,6 @@ package util
2
2
3
3
import (
4
4
"context"
5
- "crypto/sha256"
6
5
"fmt"
7
6
"slices"
8
7
"strconv"
@@ -14,7 +13,7 @@ import (
14
13
"github.com/nspcc-dev/neo-go/pkg/core/block"
15
14
"github.com/nspcc-dev/neo-go/pkg/io"
16
15
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
17
- "github.com/nspcc-dev/neo-go/pkg/services/oracle/ neofs"
16
+ "github.com/nspcc-dev/neo-go/pkg/services/neofs"
18
17
"github.com/nspcc-dev/neo-go/pkg/util"
19
18
"github.com/nspcc-dev/neo-go/pkg/wallet"
20
19
"github.com/nspcc-dev/neofs-sdk-go/checksum"
@@ -30,35 +29,6 @@ import (
30
29
"github.com/urfave/cli/v2"
31
30
)
32
31
33
- const (
34
- // Number of objects to search in a batch. We need to search with EQ filter to
35
- // avoid partially-completed SEARCH responses. If EQ search haven't found object
36
- // the object will be uploaded one more time which may lead to duplicating objects.
37
- // We will have a risk of duplicates until #3645 is resolved (NeoFS guarantees
38
- // search results).
39
- searchBatchSize = 1
40
- // Size of object ID.
41
- oidSize = sha256 .Size
42
- )
43
-
44
- // Constants related to retry mechanism.
45
- const (
46
- // Initial backoff duration.
47
- initialBackoff = 500 * time .Millisecond
48
- // Backoff multiplier.
49
- backoffFactor = 2
50
- // Maximum backoff duration.
51
- maxBackoff = 20 * time .Second
52
- )
53
-
54
- // Constants related to NeoFS pool request timeouts.
55
- // Such big values are used to avoid NeoFS pool timeouts during block search and upload.
56
- const (
57
- defaultDialTimeout = 10 * time .Minute
58
- defaultStreamTimeout = 10 * time .Minute
59
- defaultHealthcheckTimeout = 10 * time .Second
60
- )
61
-
62
32
// poolWrapper wraps a NeoFS pool to adapt its Close method to return an error.
63
33
type poolWrapper struct {
64
34
* pool.Pool
@@ -103,9 +73,9 @@ func uploadBin(ctx *cli.Context) error {
103
73
signer := user .NewAutoIDSignerRFC6979 (acc .PrivateKey ().PrivateKey )
104
74
105
75
params := pool .DefaultOptions ()
106
- params .SetHealthcheckTimeout (defaultHealthcheckTimeout )
107
- params .SetNodeDialTimeout (defaultDialTimeout )
108
- params .SetNodeStreamTimeout (defaultStreamTimeout )
76
+ params .SetHealthcheckTimeout (neofs . DefaultHealthcheckTimeout )
77
+ params .SetNodeDialTimeout (neofs . DefaultDialTimeout )
78
+ params .SetNodeStreamTimeout (neofs . DefaultStreamTimeout )
109
79
p , err := pool .New (pool .NewFlatNodeParams (rpcNeoFS ), signer , params )
110
80
if err != nil {
111
81
return cli .Exit (fmt .Sprintf ("failed to create NeoFS pool: %v" , err ), 1 )
@@ -166,15 +136,15 @@ func uploadBin(ctx *cli.Context) error {
166
136
// retry function with exponential backoff.
167
137
func retry (action func () error , maxRetries uint ) error {
168
138
var err error
169
- backoff := initialBackoff
139
+ backoff := neofs . InitialBackoff
170
140
for range maxRetries {
171
141
if err = action (); err == nil {
172
142
return nil // Success, no retry needed.
173
143
}
174
144
time .Sleep (backoff ) // Backoff before retrying.
175
- backoff *= time .Duration (backoffFactor )
176
- if backoff > maxBackoff {
177
- backoff = maxBackoff
145
+ backoff *= time .Duration (neofs . BackoffFactor )
146
+ if backoff > neofs . MaxBackoff {
147
+ backoff = neofs . MaxBackoff
178
148
}
179
149
}
180
150
return err // Return the last error after exhausting retries.
@@ -193,15 +163,15 @@ func uploadBlocksAndIndexFiles(ctx *cli.Context, p poolWrapper, rpc *rpcclient.C
193
163
errCh = make (chan error )
194
164
doneCh = make (chan struct {})
195
165
wg sync.WaitGroup
196
- emptyOID = make ([]byte , oidSize )
166
+ emptyOID = make ([]byte , neofs . OidSize )
197
167
)
198
168
fmt .Fprintf (ctx .App .Writer , "Processing batch from %d to %d\n " , indexFileStart , indexFileEnd - 1 )
199
169
wg .Add (int (numWorkers ))
200
170
for i := range numWorkers {
201
171
go func (i uint ) {
202
172
defer wg .Done ()
203
173
for blockIndex := indexFileStart + i ; blockIndex < indexFileEnd ; blockIndex += numWorkers {
204
- if slices .Compare (buf [blockIndex % indexFileSize * oidSize :blockIndex % indexFileSize * oidSize + oidSize ], emptyOID ) != 0 {
174
+ if slices .Compare (buf [blockIndex % indexFileSize * neofs . OidSize :blockIndex % indexFileSize * neofs . OidSize + neofs . OidSize ], emptyOID ) != 0 {
205
175
if debug {
206
176
fmt .Fprintf (ctx .App .Writer , "Block %d is already uploaded\n " , blockIndex )
207
177
}
@@ -263,7 +233,7 @@ func uploadBlocksAndIndexFiles(ctx *cli.Context, p poolWrapper, rpc *rpcclient.C
263
233
}
264
234
return
265
235
}
266
- resOid .Encode (buf [blockIndex % indexFileSize * oidSize :])
236
+ resOid .Encode (buf [blockIndex % indexFileSize * neofs . OidSize :])
267
237
}
268
238
}(i )
269
239
}
@@ -281,9 +251,9 @@ func uploadBlocksAndIndexFiles(ctx *cli.Context, p poolWrapper, rpc *rpcclient.C
281
251
fmt .Fprintf (ctx .App .Writer , "Successfully processed batch of blocks: from %d to %d\n " , indexFileStart , indexFileEnd - 1 )
282
252
283
253
// Additional check for empty OIDs in the buffer.
284
- for k := uint (0 ); k < (indexFileEnd - indexFileStart )* oidSize ; k += oidSize {
285
- if slices .Compare (buf [k :k + oidSize ], emptyOID ) == 0 {
286
- return fmt .Errorf ("empty OID found in index file %d at position %d (block index %d)" , indexFileStart / indexFileSize , k / oidSize , indexFileStart / indexFileSize * indexFileSize + k / oidSize )
254
+ for k := uint (0 ); k < (indexFileEnd - indexFileStart )* neofs . OidSize ; k += neofs . OidSize {
255
+ if slices .Compare (buf [k :k + neofs . OidSize ], emptyOID ) == 0 {
256
+ return fmt .Errorf ("empty OID found in index file %d at position %d (block index %d)" , indexFileStart / indexFileSize , k / neofs . OidSize , indexFileStart / indexFileSize * indexFileSize + k / neofs . OidSize )
287
257
}
288
258
}
289
259
if indexFileEnd - indexFileStart == indexFileSize {
@@ -310,7 +280,7 @@ func uploadBlocksAndIndexFiles(ctx *cli.Context, p poolWrapper, rpc *rpcclient.C
310
280
func searchIndexFile (ctx * cli.Context , p poolWrapper , containerID cid.ID , account * wallet.Account , signer user.Signer , indexFileSize uint , blockAttributeKey , attributeKey string , maxParallelSearches , maxRetries uint ) (uint , []byte , error ) {
311
281
var (
312
282
// buf is used to store OIDs of the uploaded blocks.
313
- buf = make ([]byte , indexFileSize * oidSize )
283
+ buf = make ([]byte , indexFileSize * neofs . OidSize )
314
284
doneCh = make (chan struct {})
315
285
errCh = make (chan error )
316
286
@@ -377,7 +347,7 @@ func searchIndexFile(ctx *cli.Context, p poolWrapper, containerID cid.ID, accoun
377
347
}
378
348
pos := uint (blockIndex ) % indexFileSize
379
349
if _ , ok := processedIndices .LoadOrStore (pos , blockIndex ); ! ok {
380
- id .Encode (buf [pos * oidSize :])
350
+ id .Encode (buf [pos * neofs . OidSize :])
381
351
}
382
352
}
383
353
}()
@@ -404,15 +374,15 @@ func searchIndexFile(ctx *cli.Context, p poolWrapper, containerID cid.ID, accoun
404
374
// endIndex. It returns a buffered channel of resulting object IDs and closes it once
405
375
// OID search is finished. Errors are sent to errCh in a non-blocking way.
406
376
func searchObjects (ctx context.Context , p poolWrapper , containerID cid.ID , account * wallet.Account , blockAttributeKey string , startIndex , endIndex , maxParallelSearches , maxRetries uint , errCh chan error , additionalFilters ... object.SearchFilters ) chan oid.ID {
407
- var res = make (chan oid.ID , 2 * searchBatchSize )
377
+ var res = make (chan oid.ID , 2 * neofs . DefaultSearchBatchSize )
408
378
go func () {
409
379
var wg sync.WaitGroup
410
380
defer close (res )
411
381
412
- for i := startIndex ; i < endIndex ; i += searchBatchSize * maxParallelSearches {
382
+ for i := startIndex ; i < endIndex ; i += neofs . DefaultSearchBatchSize * maxParallelSearches {
413
383
for j := range maxParallelSearches {
414
- start := i + j * searchBatchSize
415
- end := start + searchBatchSize
384
+ start := i + j * neofs . DefaultSearchBatchSize
385
+ end := start + neofs . DefaultSearchBatchSize
416
386
417
387
if start >= endIndex {
418
388
break
0 commit comments