Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: adding multi-rollup sequencer for the purpose of testing #18

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 5 additions & 7 deletions test/dummy.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ import (
"context"
"encoding/hex"
"errors"
"fmt"
"math"
"reflect"
"sync"
"time"

Expand Down Expand Up @@ -105,18 +107,14 @@ func (d *DummySequencer) GetNextBatch(ctx context.Context, req sequencing.GetNex
lastBatchHash := d.lastBatchHash
d.lastBatchHashMutex.RUnlock()

if lastBatchHash == nil && req.LastBatchHash != nil {
return nil, errors.New("lastBatch is supposed to be nil")
} else if lastBatchHash != nil && req.LastBatchHash == nil {
return nil, errors.New("lastBatch is not supposed to be nil")
} else if !bytes.Equal(lastBatchHash, req.LastBatchHash) {
return nil, errors.New("supplied lastBatch does not match with sequencer last batch")
if !reflect.DeepEqual(lastBatchHash, req.LastBatchHash) {
return nil, fmt.Errorf("batch hash mismatch: lastBatchHash = %x, req.LastBatchHash = %x", lastBatchHash, req.LastBatchHash)
}

batch := d.tq.GetNextBatch(req.MaxBytes)
batchRes := &sequencing.GetNextBatchResponse{Batch: batch, Timestamp: now}
// If there are no transactions, return empty batch without updating the last batch hash
if batch.Transactions == nil {
if len(batch.Transactions) == 0 {
return batchRes, nil
}

Expand Down
75 changes: 53 additions & 22 deletions test/dummy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ package test

import (
"context"
"crypto/rand"
"fmt"
"io"
"math"
"testing"
"time"
Expand All @@ -14,7 +17,8 @@ import (
func TestTransactionQueue_AddTransaction(t *testing.T) {
queue := NewTransactionQueue()

tx1 := []byte("transaction_1")
tx1, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
queue.AddTransaction(tx1)

// Check that the transaction was added
Expand All @@ -27,8 +31,10 @@ func TestTransactionQueue_GetNextBatch(t *testing.T) {
queue := NewTransactionQueue()

// Add multiple transactions
tx1 := []byte("transaction_1")
tx2 := []byte("transaction_2")
tx1, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
tx2, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
queue.AddTransaction(tx1)
queue.AddTransaction(tx2)

Expand All @@ -46,7 +52,8 @@ func TestTransactionQueue_GetNextBatch(t *testing.T) {
func TestDummySequencer_SubmitRollupTransaction(t *testing.T) {
// Define a test rollup ID and transaction
rollupId := []byte("test_rollup_id")
tx := []byte("test_transaction")
tx, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
sequencer := NewDummySequencer(rollupId)

// Submit a transaction
Expand Down Expand Up @@ -92,9 +99,12 @@ func TestDummySequencer_SubmitEmptyTransaction(t *testing.T) {
func TestDummySequencer_SubmitMultipleTransactions(t *testing.T) {
// Define a test rollup ID and multiple transactions
rollupId := []byte("test_rollup_id")
tx1 := []byte("transaction_1")
tx2 := []byte("transaction_2")
tx3 := []byte("transaction_3")
tx1, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
tx2, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
tx3, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
sequencer := NewDummySequencer(rollupId)

// Submit multiple transactions
Expand All @@ -111,7 +121,7 @@ func TestDummySequencer_SubmitMultipleTransactions(t *testing.T) {
Tx: tx3,
}

_, err := sequencer.SubmitRollupTransaction(context.Background(), req1)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req1)
assert.NoError(t, err)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req2)
assert.NoError(t, err)
Expand All @@ -129,13 +139,14 @@ func TestDummySequencer_SubmitMultipleTransactions(t *testing.T) {
func TestDummySequencer_GetNextBatch(t *testing.T) {
// Add a transaction to the queue
rollupId := []byte("test_rollup_id")
tx := []byte("test_transaction")
tx, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
sequencer := NewDummySequencer(rollupId)
req := sequencing.SubmitRollupTransactionRequest{
RollupId: rollupId,
Tx: tx,
}
_, err := sequencer.SubmitRollupTransaction(context.Background(), req)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req)
assert.NoError(t, err)

// Retrieve the next batch
Expand Down Expand Up @@ -178,12 +189,13 @@ func TestDummySequencer_GetNextBatch_LastBatchHashMismatch(t *testing.T) {
// Submit a transaction
rollupId := []byte("test_rollup_id")
sequencer := NewDummySequencer(rollupId)
tx := []byte("test_transaction")
tx, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
req := sequencing.SubmitRollupTransactionRequest{
RollupId: rollupId,
Tx: tx,
}
_, err := sequencer.SubmitRollupTransaction(context.Background(), req)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req)
assert.NoError(t, err)

// Retrieve the next batch
Expand All @@ -195,17 +207,20 @@ func TestDummySequencer_GetNextBatch_LastBatchHashMismatch(t *testing.T) {

// Assert that the batch hash mismatch error is returned
assert.Error(t, err)
assert.Equal(t, "lastBatch is supposed to be nil", err.Error())
assert.ErrorContains(t, err, "batch hash mismatch", "unexpected error message")
}

// Test retrieving a batch with maxBytes limit
func TestDummySequencer_GetNextBatch_MaxBytesLimit(t *testing.T) {
// Define a test rollup ID and multiple transactions
rollupId := []byte("test_rollup_id")
sequencer := NewDummySequencer(rollupId)
tx1 := []byte("transaction_1")
tx2 := []byte("transaction_2")
tx3 := []byte("transaction_3")
tx1, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
tx2, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
tx3, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)

// Submit multiple transactions
req1 := sequencing.SubmitRollupTransactionRequest{
Expand All @@ -221,7 +236,7 @@ func TestDummySequencer_GetNextBatch_MaxBytesLimit(t *testing.T) {
Tx: tx3,
}

_, err := sequencer.SubmitRollupTransaction(context.Background(), req1)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req1)
assert.NoError(t, err)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req2)
assert.NoError(t, err)
Expand Down Expand Up @@ -267,12 +282,13 @@ func TestDummySequencer_VerifyBatch(t *testing.T) {
// Add and retrieve a batch
rollupId := []byte("test_rollup_id")
sequencer := NewDummySequencer(rollupId)
tx := []byte("test_transaction")
tx, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
req := sequencing.SubmitRollupTransactionRequest{
RollupId: rollupId,
Tx: tx,
}
_, err := sequencer.SubmitRollupTransaction(context.Background(), req)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req)
assert.NoError(t, err)

// Get the next batch to generate batch hash
Expand Down Expand Up @@ -320,8 +336,10 @@ func TestDummySequencer_VerifyBatchWithMultipleTransactions(t *testing.T) {
// Define a test rollup ID and multiple transactions
rollupId := []byte("test_rollup_id")
sequencer := NewDummySequencer(rollupId)
tx1 := []byte("transaction_1")
tx2 := []byte("transaction_2")
tx1, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)
tx2, err := GenerateSecureRandomBytes(32)
assert.NoError(t, err)

// Submit multiple transactions
req1 := sequencing.SubmitRollupTransactionRequest{
Expand All @@ -333,7 +351,7 @@ func TestDummySequencer_VerifyBatchWithMultipleTransactions(t *testing.T) {
Tx: tx2,
}

_, err := sequencer.SubmitRollupTransaction(context.Background(), req1)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req1)
assert.NoError(t, err)
_, err = sequencer.SubmitRollupTransaction(context.Background(), req2)
assert.NoError(t, err)
Expand Down Expand Up @@ -375,3 +393,16 @@ func TestDummySequencer_VerifyBatch_NotFound(t *testing.T) {
assert.NoError(t, err)
assert.False(t, verifyResp.Status)
}

// GenerateSecureRandomBytes generates cryptographically secure random bytes of the given length.
func GenerateSecureRandomBytes(length int) ([]byte, error) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: imo for test helpers like this, you should just panic on error to save all the error checking in the tests. Since errors here are developer errors and the test would be expected to fail until fixed.

if length <= 0 {
return nil, fmt.Errorf("invalid length: %d, must be greater than 0", length)
}

buf := make([]byte, length)
if _, err := io.ReadFull(rand.Reader, buf); err != nil {
return nil, fmt.Errorf("failed to generate random bytes: %w", err)
}
return buf, nil
}
126 changes: 126 additions & 0 deletions test/multi_rollup_sequencer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
package test

import (
"context"
"encoding/hex"
"fmt"
"reflect"
"sync"
"time"

"github.com/rollkit/go-sequencing"
)

// MultiRollupSequencer is a sequencer for testing that serves multiple rollups
type MultiRollupSequencer struct {
rollups map[string]*RollupData
rollupsMutex sync.RWMutex
}

// RollupData holds the data for a specific rollup, including its transaction queue, last batch hash, and seen batches.
type RollupData struct {
tq *TransactionQueue
lastBatchHash []byte
lastBatchHashMutex sync.RWMutex

seenBatches map[string]struct{}
seenBatchesMutex sync.Mutex
Comment on lines +26 to +27
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Consider using sync.RWMutex for seenBatchesMutex to improve read performance

Currently, seenBatchesMutex is a sync.Mutex, which can become a bottleneck under high read concurrency. Since the VerifyBatch method frequently reads from seenBatches, switching to a sync.RWMutex allows multiple goroutines to read concurrently, enhancing performance.

}

// SubmitRollupTransaction implements sequencing.Sequencer.
func (d *MultiRollupSequencer) SubmitRollupTransaction(ctx context.Context, req sequencing.SubmitRollupTransactionRequest) (*sequencing.SubmitRollupTransactionResponse, error) {
rollup, err := d.getOrCreateRollup(req.RollupId)
if err != nil {
return nil, err
}
rollup.tq.AddTransaction(req.Tx)
return &sequencing.SubmitRollupTransactionResponse{}, nil
}
gupadhyaya marked this conversation as resolved.
Show resolved Hide resolved
Comment on lines +30 to +38
Copy link

@coderabbitai coderabbitai bot Oct 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Consider adding mutex protection for AddTransaction

The SubmitRollupTransaction method correctly retrieves or creates a rollup, but the AddTransaction call on the transaction queue is not protected by a mutex. This could lead to race conditions if multiple goroutines are submitting transactions concurrently.

Consider adding mutex protection around the AddTransaction call:

 func (d *MultiRollupSequencer) SubmitRollupTransaction(ctx context.Context, req sequencing.SubmitRollupTransactionRequest) (*sequencing.SubmitRollupTransactionResponse, error) {
 	rollup, err := d.getOrCreateRollup(req.RollupId)
 	if err != nil {
 		return nil, err
 	}
+	rollup.tq.mu.Lock()
+	defer rollup.tq.mu.Unlock()
 	rollup.tq.AddTransaction(req.Tx)
 	return &sequencing.SubmitRollupTransactionResponse{}, nil
 }

Committable suggestion was skipped due to low confidence.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lock is done inside AddTransaction

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gupadhyaya, thank you for the clarification. I see that the locking is already handled inside AddTransaction. I'll note this for future reviews.

(・ω・)


✏️ Learnings added
Learnt from: gupadhyaya
PR: rollkit/go-sequencing#18
File: test/multi_rollup_sequencer.go:30-38
Timestamp: 2024-10-17T06:12:48.426Z
Learning: In `MultiRollupSequencer` within `test/multi_rollup_sequencer.go`, the `AddTransaction` method of `TransactionQueue` handles mutex locking internally, so additional mutex protection in `SubmitRollupTransaction` is unnecessary.

Note: Learnings are effective only in the context of similar code segments. To apply general coding guidelines, please configure review instructions. You can manage existing learnings in the app.


// GetNextBatch implements sequencing.Sequencer.
func (d *MultiRollupSequencer) GetNextBatch(ctx context.Context, req sequencing.GetNextBatchRequest) (*sequencing.GetNextBatchResponse, error) {
rollup, err := d.getOrCreateRollup(req.RollupId)
if err != nil {
return nil, err
}

now := time.Now()
rollup.lastBatchHashMutex.RLock()
lastBatchHash := rollup.lastBatchHash
rollup.lastBatchHashMutex.RUnlock()

if !reflect.DeepEqual(lastBatchHash, req.LastBatchHash) {
return nil, fmt.Errorf("batch hash mismatch: lastBatchHash = %x, req.LastBatchHash = %x", lastBatchHash, req.LastBatchHash)
}

batch := rollup.tq.GetNextBatch(req.MaxBytes)
batchRes := &sequencing.GetNextBatchResponse{Batch: batch, Timestamp: now}
// If there are no transactions, return empty batch without updating the last batch hash
if len(batch.Transactions) == 0 {
return batchRes, nil
}

h, err := batch.Hash()
if err != nil {
return nil, err
}
gupadhyaya marked this conversation as resolved.
Show resolved Hide resolved

rollup.lastBatchHashMutex.Lock()
rollup.lastBatchHash = h
rollup.lastBatchHashMutex.Unlock()

rollup.seenBatchesMutex.Lock()
rollup.seenBatches[hex.EncodeToString(h)] = struct{}{}
rollup.seenBatchesMutex.Unlock()
return batchRes, nil
}
Comment on lines +40 to +76
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Consider optimizing the order of operations in GetNextBatch

The GetNextBatch method is well-implemented with proper synchronization. However, we can optimize it by checking for empty batches before calculating the hash:

Consider reordering the operations as follows:

 batch := rollup.tq.GetNextBatch(req.MaxBytes)
 batchRes := &sequencing.GetNextBatchResponse{Batch: batch, Timestamp: now}
+// If there are no transactions, return empty batch without updating the last batch hash
+if len(batch.Transactions) == 0 {
+    return batchRes, nil
+}

 h, err := batch.Hash()
 if err != nil {
     return nil, err
 }

-// If there are no transactions, return empty batch without updating the last batch hash
-if len(batch.Transactions) == 0 {
-    return batchRes, nil
-}

This change avoids unnecessary hash calculation for empty batches, potentially improving performance.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// GetNextBatch implements sequencing.Sequencer.
func (d *MultiRollupSequencer) GetNextBatch(ctx context.Context, req sequencing.GetNextBatchRequest) (*sequencing.GetNextBatchResponse, error) {
rollup, err := d.getOrCreateRollup(req.RollupId)
if err != nil {
return nil, err
}
now := time.Now()
rollup.lastBatchHashMutex.RLock()
lastBatchHash := rollup.lastBatchHash
rollup.lastBatchHashMutex.RUnlock()
if !reflect.DeepEqual(lastBatchHash, req.LastBatchHash) {
return nil, fmt.Errorf("batch hash mismatch: lastBatchHash = %x, req.LastBatchHash = %x", lastBatchHash, req.LastBatchHash)
}
batch := rollup.tq.GetNextBatch(req.MaxBytes)
batchRes := &sequencing.GetNextBatchResponse{Batch: batch, Timestamp: now}
// If there are no transactions, return empty batch without updating the last batch hash
if len(batch.Transactions) == 0 {
return batchRes, nil
}
h, err := batch.Hash()
if err != nil {
return nil, err
}
rollup.lastBatchHashMutex.Lock()
rollup.lastBatchHash = h
rollup.lastBatchHashMutex.Unlock()
rollup.seenBatchesMutex.Lock()
rollup.seenBatches[hex.EncodeToString(h)] = struct{}{}
rollup.seenBatchesMutex.Unlock()
return batchRes, nil
}
// GetNextBatch implements sequencing.Sequencer.
func (d *MultiRollupSequencer) GetNextBatch(ctx context.Context, req sequencing.GetNextBatchRequest) (*sequencing.GetNextBatchResponse, error) {
rollup, err := d.getOrCreateRollup(req.RollupId)
if err != nil {
return nil, err
}
now := time.Now()
rollup.lastBatchHashMutex.RLock()
lastBatchHash := rollup.lastBatchHash
rollup.lastBatchHashMutex.RUnlock()
if !reflect.DeepEqual(lastBatchHash, req.LastBatchHash) {
return nil, fmt.Errorf("batch hash mismatch: lastBatchHash = %x, req.LastBatchHash = %x", lastBatchHash, req.LastBatchHash)
}
batch := rollup.tq.GetNextBatch(req.MaxBytes)
batchRes := &sequencing.GetNextBatchResponse{Batch: batch, Timestamp: now}
// If there are no transactions, return empty batch without updating the last batch hash
if len(batch.Transactions) == 0 {
return batchRes, nil
}
h, err := batch.Hash()
if err != nil {
return nil, err
}
rollup.lastBatchHashMutex.Lock()
rollup.lastBatchHash = h
rollup.lastBatchHashMutex.Unlock()
rollup.seenBatchesMutex.Lock()
rollup.seenBatches[hex.EncodeToString(h)] = struct{}{}
rollup.seenBatchesMutex.Unlock()
return batchRes, nil
}


// VerifyBatch implements sequencing.Sequencer.
func (d *MultiRollupSequencer) VerifyBatch(ctx context.Context, req sequencing.VerifyBatchRequest) (*sequencing.VerifyBatchResponse, error) {
rollup, err := d.getOrCreateRollup(req.RollupId)
if err != nil {
return nil, err
}

rollup.seenBatchesMutex.Lock()
defer rollup.seenBatchesMutex.Unlock()
key := hex.EncodeToString(req.BatchHash)
if _, exists := rollup.seenBatches[key]; exists {
return &sequencing.VerifyBatchResponse{Status: true}, nil
}
return &sequencing.VerifyBatchResponse{Status: false}, nil
}
gupadhyaya marked this conversation as resolved.
Show resolved Hide resolved
gupadhyaya marked this conversation as resolved.
Show resolved Hide resolved

// getOrCreateRollup returns the RollupData for a given rollupId, creating it if necessary.
func (d *MultiRollupSequencer) getOrCreateRollup(rollupId []byte) (*RollupData, error) {
rollupKey := hex.EncodeToString(rollupId)

d.rollupsMutex.Lock()
defer d.rollupsMutex.Unlock()
rollup, exists := d.rollups[rollupKey]
if exists {
return rollup, nil
}

// Double-check existence after acquiring write lock
if rollup, exists := d.rollups[rollupKey]; exists {
return rollup, nil
}

// Create a new RollupData if it doesn't exist
rollup = &RollupData{
tq: NewTransactionQueue(),
seenBatches: make(map[string]struct{}, 0),
}
d.rollups[rollupKey] = rollup
return rollup, nil
}
Comment on lines +94 to +117
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Optimize locking strategy in getOrCreateRollup

The getOrCreateRollup method correctly implements the double-checked locking pattern for thread-safety. However, we can optimize it by reducing the time spent holding the write lock.

Consider the following optimization:

 func (d *MultiRollupSequencer) getOrCreateRollup(rollupId []byte) (*RollupData, error) {
     rollupKey := hex.EncodeToString(rollupId)

-    d.rollupsMutex.Lock()
-    defer d.rollupsMutex.Unlock()
+    d.rollupsMutex.RLock()
     rollup, exists := d.rollups[rollupKey]
     if exists {
+        d.rollupsMutex.RUnlock()
         return rollup, nil
     }
+    d.rollupsMutex.RUnlock()

+    // Create a new RollupData if it doesn't exist
+    newRollup := &RollupData{
+        tq:          NewTransactionQueue(),
+        seenBatches: make(map[string]struct{}, 0),
+    }

+    d.rollupsMutex.Lock()
+    defer d.rollupsMutex.Unlock()

     // Double-check existence after acquiring write lock
     if rollup, exists := d.rollups[rollupKey]; exists {
         return rollup, nil
     }

-    // Create a new RollupData if it doesn't exist
-    rollup = &RollupData{
-        tq:          NewTransactionQueue(),
-        seenBatches: make(map[string]struct{}, 0),
-    }
-    d.rollups[rollupKey] = rollup
-    return rollup, nil
+    d.rollups[rollupKey] = newRollup
+    return newRollup, nil
 }

This change reduces the time spent holding the write lock, potentially improving performance in high-concurrency scenarios.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// getOrCreateRollup returns the RollupData for a given rollupId, creating it if necessary.
func (d *MultiRollupSequencer) getOrCreateRollup(rollupId []byte) (*RollupData, error) {
rollupKey := hex.EncodeToString(rollupId)
d.rollupsMutex.Lock()
defer d.rollupsMutex.Unlock()
rollup, exists := d.rollups[rollupKey]
if exists {
return rollup, nil
}
// Double-check existence after acquiring write lock
if rollup, exists := d.rollups[rollupKey]; exists {
return rollup, nil
}
// Create a new RollupData if it doesn't exist
rollup = &RollupData{
tq: NewTransactionQueue(),
seenBatches: make(map[string]struct{}, 0),
}
d.rollups[rollupKey] = rollup
return rollup, nil
}
// getOrCreateRollup returns the RollupData for a given rollupId, creating it if necessary.
func (d *MultiRollupSequencer) getOrCreateRollup(rollupId []byte) (*RollupData, error) {
rollupKey := hex.EncodeToString(rollupId)
d.rollupsMutex.RLock()
rollup, exists := d.rollups[rollupKey]
if exists {
d.rollupsMutex.RUnlock()
return rollup, nil
}
d.rollupsMutex.RUnlock()
// Create a new RollupData if it doesn't exist
newRollup := &RollupData{
tq: NewTransactionQueue(),
seenBatches: make(map[string]struct{}, 0),
}
d.rollupsMutex.Lock()
defer d.rollupsMutex.Unlock()
// Double-check existence after acquiring write lock
if rollup, exists := d.rollups[rollupKey]; exists {
return rollup, nil
}
d.rollups[rollupKey] = newRollup
return newRollup, nil
}


// NewMultiRollupSequencer creates a new MultiRollupSequencer
func NewMultiRollupSequencer() *MultiRollupSequencer {
return &MultiRollupSequencer{
rollups: make(map[string]*RollupData),
}
}

var _ sequencing.Sequencer = &MultiRollupSequencer{}
Loading
Loading