Skip to content

Commit 5b1fff8

Browse files
authored
Merge pull request #19251 from fuweid/34-fix-19179
[3.4] mvcc: restore tombstone index if it's first revision
2 parents fe42f91 + f3cae13 commit 5b1fff8

File tree

5 files changed

+154
-4
lines changed

5 files changed

+154
-4
lines changed

mvcc/key_index.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,15 @@ func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int6
127127
keysGauge.Inc()
128128
}
129129

130+
// restoreTombstone is used to restore a tombstone revision, which is the only
131+
// revision so far for a key. We don't know the creating revision (i.e. already
132+
// compacted) of the key, so set it empty.
133+
func (ki *keyIndex) restoreTombstone(lg *zap.Logger, main, sub int64) {
134+
ki.restore(lg, revision{}, revision{main, sub}, 1)
135+
ki.generations = append(ki.generations, generation{})
136+
keysGauge.Dec()
137+
}
138+
130139
// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
131140
// It also creates a new empty generation in the keyIndex.
132141
// It returns ErrRevisionNotFound when tombstone on an empty generation.

mvcc/key_index_test.go

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,51 @@ import (
1919
"testing"
2020

2121
"github.com/stretchr/testify/assert"
22+
"github.com/stretchr/testify/require"
2223
"go.uber.org/zap"
24+
"go.uber.org/zap/zaptest"
2325
)
2426

27+
func TestRestoreTombstone(t *testing.T) {
28+
lg := zaptest.NewLogger(t)
29+
30+
// restore from tombstone
31+
//
32+
// key: "foo"
33+
// modified: 16
34+
// "created": 16
35+
// generations:
36+
// {empty}
37+
// {{16, 0}(t)[0]}
38+
//
39+
ki := &keyIndex{key: []byte("foo")}
40+
ki.restoreTombstone(lg, 16, 0)
41+
42+
// get should return not found
43+
for retAt := 16; retAt <= 20; retAt++ {
44+
_, _, _, err := ki.get(lg, int64(retAt))
45+
require.ErrorIs(t, err, ErrRevisionNotFound)
46+
}
47+
48+
// doCompact should keep that tombstone
49+
availables := map[revision]struct{}{}
50+
ki.doCompact(16, availables)
51+
require.Len(t, availables, 1)
52+
_, ok := availables[revision{main: 16}]
53+
require.True(t, ok)
54+
55+
// should be able to put new revisions
56+
ki.put(lg, 17, 0)
57+
ki.put(lg, 18, 0)
58+
revs := ki.since(lg, 16)
59+
require.Equal(t, []revision{{16, 0}, {17, 0}, {18, 0}}, revs)
60+
61+
// compaction should remove restored tombstone
62+
ki.compact(lg, 17, map[revision]struct{}{})
63+
require.Len(t, ki.generations, 1)
64+
require.Equal(t, []revision{{17, 0}, {18, 0}}, ki.generations[0].revs)
65+
}
66+
2567
func TestKeyIndexGet(t *testing.T) {
2668
// key: "foo"
2769
// rev: 16

mvcc/kv_test.go

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@ import (
2929

3030
"github.com/prometheus/client_golang/prometheus"
3131
dto "github.com/prometheus/client_model/go"
32+
"github.com/stretchr/testify/assert"
3233
"go.uber.org/zap"
34+
"go.uber.org/zap/zaptest"
3335
)
3436

3537
// Functional tests for features implemented in v3 store. It treats v3 store
@@ -617,6 +619,8 @@ func TestKVHash(t *testing.T) {
617619
}
618620

619621
func TestKVRestore(t *testing.T) {
622+
compactBatchLimit := 5
623+
620624
tests := []func(kv KV){
621625
func(kv KV) {
622626
kv.Put([]byte("foo"), []byte("bar0"), 1)
@@ -634,10 +638,23 @@ func TestKVRestore(t *testing.T) {
634638
kv.Put([]byte("foo"), []byte("bar1"), 2)
635639
kv.Compact(traceutil.TODO(), 1)
636640
},
641+
func(kv KV) { // after restore, foo1 key only has tombstone revision
642+
kv.Put([]byte("foo1"), []byte("bar1"), 0)
643+
kv.Put([]byte("foo2"), []byte("bar2"), 0)
644+
kv.Put([]byte("foo3"), []byte("bar3"), 0)
645+
kv.Put([]byte("foo4"), []byte("bar4"), 0)
646+
kv.Put([]byte("foo5"), []byte("bar5"), 0)
647+
_, delAtRev := kv.DeleteRange([]byte("foo1"), nil)
648+
assert.Equal(t, int64(7), delAtRev)
649+
650+
// after compaction and restore, foo1 key only has tombstone revision
651+
ch, _ := kv.Compact(traceutil.TODO(), delAtRev)
652+
<-ch
653+
},
637654
}
638655
for i, tt := range tests {
639656
b, tmpPath := backend.NewDefaultTmpBackend()
640-
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
657+
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, nil, StoreConfig{CompactionBatchLimit: compactBatchLimit})
641658
tt(s)
642659
var kvss [][]mvccpb.KeyValue
643660
for k := int64(0); k < 10; k++ {
@@ -649,7 +666,7 @@ func TestKVRestore(t *testing.T) {
649666
s.Close()
650667

651668
// ns should recover the the previous state from backend.
652-
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
669+
ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, nil, StoreConfig{CompactionBatchLimit: compactBatchLimit})
653670

654671
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
655672
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)

mvcc/kvstore.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -571,8 +571,12 @@ func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int
571571
continue
572572
}
573573
ki.put(lg, rev.main, rev.sub)
574-
} else if !isTombstone(rkv.key) {
575-
ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
574+
} else {
575+
if isTombstone(rkv.key) {
576+
ki.restoreTombstone(lg, rev.main, rev.sub)
577+
} else {
578+
ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
579+
}
576580
idx.Insert(ki)
577581
kiCache[rkv.kstr] = ki
578582
}

tests/e2e/watch_test.go

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,3 +476,81 @@ func testStartWatcherFromCompactedRevision(t *testing.T, performCompactOnTombsto
476476
}
477477
}
478478
}
479+
480+
// TestResumeCompactionOnTombstone verifies whether a deletion event is preserved
481+
// when etcd restarts and resumes compaction on a key that only has a tombstone revision.
482+
func TestResumeCompactionOnTombstone(t *testing.T) {
483+
defer testutil.AfterTest(t)
484+
485+
ctx := context.Background()
486+
compactBatchLimit := 5
487+
488+
clus, cerr := newEtcdProcessCluster(t, &etcdProcessClusterConfig{
489+
clusterSize: 1,
490+
goFailEnabled: true,
491+
clientTLS: clientTLS,
492+
isClientAutoTLS: true,
493+
CompactionBatchLimit: compactBatchLimit,
494+
WatchProcessNotifyInterval: 100 * time.Millisecond,
495+
})
496+
require.NoError(t, cerr)
497+
defer clus.Close()
498+
499+
c1 := newClient(t, clus.EndpointsGRPC(), clientTLS, true)
500+
defer c1.Close()
501+
502+
keyPrefix := "/key-"
503+
for i := 0; i < compactBatchLimit; i++ {
504+
key := fmt.Sprintf("%s%d", keyPrefix, i)
505+
value := fmt.Sprintf("%d", i)
506+
507+
t.Logf("PUT key=%s, val=%s", key, value)
508+
_, err := c1.KV.Put(ctx, key, value)
509+
require.NoError(t, err)
510+
}
511+
512+
firstKey := keyPrefix + "0"
513+
t.Logf("DELETE key=%s", firstKey)
514+
deleteResp, err := c1.KV.Delete(ctx, firstKey)
515+
require.NoError(t, err)
516+
517+
var deleteEvent *clientv3.Event
518+
select {
519+
case watchResp := <-c1.Watch(ctx, firstKey, clientv3.WithRev(deleteResp.Header.Revision)):
520+
require.Len(t, watchResp.Events, 1)
521+
522+
require.Equal(t, mvccpb.DELETE, watchResp.Events[0].Type)
523+
deletedKey := string(watchResp.Events[0].Kv.Key)
524+
require.Equal(t, firstKey, deletedKey)
525+
526+
deleteEvent = watchResp.Events[0]
527+
case <-time.After(100 * time.Millisecond):
528+
t.Fatal("timed out getting watch response")
529+
}
530+
531+
require.NoError(t, clus.procs[0].Failpoints().SetupHTTP(ctx, "compactBeforeSetFinishedCompact", `panic`))
532+
533+
t.Logf("COMPACT rev=%d", deleteResp.Header.Revision)
534+
_, err = c1.KV.Compact(ctx, deleteResp.Header.Revision, clientv3.WithCompactPhysical())
535+
require.Error(t, err)
536+
537+
require.Error(t, clus.procs[0].Stop())
538+
// NOTE: The proc panics and exit code is 2. It's impossible to restart
539+
// that etcd proc because last exit code is 2 and Restart() refuses to
540+
// start new one. Using IsRunning() function is to cleanup status.
541+
require.False(t, clus.procs[0].IsRunning())
542+
require.NoError(t, clus.Restart())
543+
544+
c2 := newClient(t, clus.EndpointsGRPC(), clientTLS, true)
545+
defer c2.Close()
546+
547+
watchChan := c2.Watch(ctx, firstKey, clientv3.WithRev(deleteResp.Header.Revision))
548+
select {
549+
case watchResp := <-watchChan:
550+
require.Equal(t, []*clientv3.Event{deleteEvent}, watchResp.Events)
551+
case <-time.After(100 * time.Millisecond):
552+
// we care only about the first response, but have an
553+
// escape hatch in case the watch response is delayed.
554+
t.Fatal("timed out getting watch response")
555+
}
556+
}

0 commit comments

Comments
 (0)