Skip to content

Commit

Permalink
support iterator
Browse files Browse the repository at this point in the history
  • Loading branch information
coocood committed May 27, 2017
1 parent c7b4841 commit a47e26e
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 2 deletions.
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ without increased latency and degraded throughput.
* Nearly LRU algorithm
* Strictly limited memory usage
* Come with a toy server that supports a few basic Redis commands with pipeline
* Iterator support

## Performance

Expand Down Expand Up @@ -51,8 +52,8 @@ fmt.Println("entry count ", cache.EntryCount())

## Notice

* Memory is preallocated.
* If you allocate large amount of memory, you may need to set `debug.SetGCPercent()`
* Memory is preallocated.
* If you allocate large amount of memory, you may need to set `debug.SetGCPercent()`
to a much lower percentage to get a normal GC frequency.

## How it is done
Expand All @@ -70,4 +71,5 @@ Each segment has its own lock, so it supports high concurrent access.
* Support resize cache size at runtime.

## License

The MIT License
28 changes: 28 additions & 0 deletions cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,34 @@ func TestInt64Key(t *testing.T) {
}
}

func TestIterator(t *testing.T) {
cache := NewCache(1024)
count := 10000
for i := 0; i < count; i++ {
err := cache.Set([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("val%d", i)), 0)
if err != nil {
t.Error(err)
}
}
// Set some value that expires to make sure expired entry is not returned.
cache.Set([]byte("abc"), []byte("def"), 1)
time.Sleep(2 * time.Second)
it := cache.NewIterator()
for i := 0; i < count; i++ {
entry := it.Next()
if entry == nil {
t.Fatalf("entry is nil for %d", i)
}
if string(entry.Value) != "val"+string(entry.Key) {
t.Fatalf("entry key value not match %s %s", entry.Key, entry.Value)
}
}
e := it.Next()
if e != nil {
t.Fail()
}
}

func BenchmarkCacheSet(b *testing.B) {
cache := NewCache(256 * 1024 * 1024)
var key [8]byte
Expand Down
80 changes: 80 additions & 0 deletions iterator.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
package freecache

import (
"time"
"unsafe"
)

// Iterator iterates the entries for the cache.
type Iterator struct {
cache *Cache
segmentIdx int
slotIdx int
entryIdx int
}

// Entry represents a key/value pair.
type Entry struct {
Key []byte
Value []byte
}

// Next returns the next entry for the iterator.
// The order of the entries is not guaranteed.
// If there is no more entries to return, nil will be returned.
func (it *Iterator) Next() *Entry {
for it.segmentIdx < 256 {
entry := it.nextForSegment(it.segmentIdx)
if entry != nil {
return entry
}
it.segmentIdx++
it.slotIdx = 0
it.entryIdx = 0
}
return nil
}

func (it *Iterator) nextForSegment(segIdx int) *Entry {
it.cache.locks[segIdx].Lock()
defer it.cache.locks[segIdx].Unlock()
seg := &it.cache.segments[segIdx]
for it.slotIdx < 256 {
entry := it.nextForSlot(seg, it.slotIdx)
if entry != nil {
return entry
}
it.slotIdx++
it.entryIdx = 0
}
return nil
}

func (it *Iterator) nextForSlot(seg *segment, slotId int) *Entry {
slotOff := int32(it.slotIdx) * seg.slotCap
slot := seg.slotsData[slotOff : slotOff+seg.slotLens[it.slotIdx] : slotOff+seg.slotCap]
for it.entryIdx < len(slot) {
ptr := slot[it.entryIdx]
it.entryIdx++
now := uint32(time.Now().Unix())
var hdrBuf [ENTRY_HDR_SIZE]byte
seg.rb.ReadAt(hdrBuf[:], ptr.offset)
hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0]))
if hdr.expireAt == 0 || hdr.expireAt > now {
entry := new(Entry)
entry.Key = make([]byte, hdr.keyLen)
entry.Value = make([]byte, hdr.valLen)
seg.rb.ReadAt(entry.Key, ptr.offset+ENTRY_HDR_SIZE)
seg.rb.ReadAt(entry.Value, ptr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen))
return entry
}
}
return nil
}

// NewIterator creates a new iterator for the cache.
func (cache *Cache) NewIterator() *Iterator {
return &Iterator{
cache: cache,
}
}

0 comments on commit a47e26e

Please sign in to comment.