-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathgpq_e2e_test.go
143 lines (120 loc) · 2.96 KB
/
gpq_e2e_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
package gpq_test
import (
"fmt"
"log"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/JustinTimperio/gpq"
"github.com/JustinTimperio/gpq/schema"
)
func TestE2E(t *testing.T) {
var (
total uint64 = 10_000_000
syncToDisk bool = true
lazySync bool = true
maxBuckets uint = 10
batchSize uint = 10_000
senders uint = 4
receivers uint = 4
)
defaultMessageOptions := schema.EnqueueOptions{
ShouldEscalate: true,
EscalationRate: time.Duration(time.Second),
CanTimeout: true,
Timeout: time.Duration(time.Second * 5),
}
opts := schema.GPQOptions{
MaxPriority: maxBuckets,
DiskCacheEnabled: syncToDisk,
DiskCachePath: "/tmp/gpq/batch-e2e-parallel",
DiskCacheCompression: true,
DiskEncryptionEnabled: true,
DiskEncryptionKey: []byte("12345678901234567890123456789012"),
LazyDiskCacheChannelSize: 1_000_000,
DiskWriteDelay: time.Duration(time.Second * 5),
LazyDiskCacheEnabled: lazySync,
LazyDiskBatchSize: 10_000,
}
_, queue, err := gpq.NewGPQ[uint](opts)
if err != nil {
log.Fatalln(err)
}
var (
received uint64
removed uint64
escalated uint64
)
var wg sync.WaitGroup
var shutdown = make(chan struct{}, receivers)
wg.Add(1)
go func() {
defer wg.Done()
ticker := time.NewTicker(time.Second * 1)
breaker:
for {
select {
case <-ticker.C:
r, e, err := queue.Prioritize()
if err != nil {
log.Fatalln(err)
}
atomic.AddUint64(&received, uint64(r))
atomic.AddUint64(&escalated, uint64(e))
t.Log("Received:", atomic.LoadUint64(&received), "Removed:", atomic.LoadUint64(&removed), "Escalated:", atomic.LoadUint64(&escalated))
case <-shutdown:
break breaker
}
}
t.Log("Exited Prioritize")
}()
for i := uint(0); i < senders; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := uint(0); j < (uint(total)/batchSize)/senders; j++ {
var miniBatch []schema.Item[uint]
for i := uint(0); i < batchSize; i++ {
p := j % maxBuckets
item := schema.NewItem(p, j, defaultMessageOptions)
miniBatch = append(miniBatch, item)
}
err := queue.EnqueueBatch(miniBatch)
if err != nil {
log.Fatalln(err)
}
}
t.Log("Worker:" + fmt.Sprint(i) + " Enqueued all items")
}()
}
for i := uint(0); i < receivers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
if atomic.LoadUint64(&received)+atomic.LoadUint64(&removed) >= total {
break
}
items, err := queue.DequeueBatch(batchSize)
if err != nil {
continue
}
atomic.AddUint64(&received, uint64(len(items)))
}
t.Log("Worker:" + fmt.Sprint(i) + " Dequeued all items")
shutdown <- struct{}{}
}()
}
wg.Wait()
if queue.ItemsInQueue() != 0 {
t.Fatal("Items in queue:", queue.ItemsInQueue())
}
t.Log("Waiting for queue to close")
queue.Close()
num := numberOfItemsInDB(opts.DiskCachePath)
if num > 0 {
log.Fatalln("Items in DB:", num)
}
t.Log("Batch Test Passed")
}