Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

gc: improve the performance of Juicefs gc command #5683

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions cmd/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@ $ juicefs gc redis://localhost --delete`,
Value: 10,
Usage: "number threads to delete leaked objects",
},
&cli.IntFlag{
Name: "cleanup-threads",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can reuse the threads for cleanup

Value: 10,
Usage: "number threads for cleaning up pending deleted files",
},
},
}
}
Expand All @@ -79,6 +84,10 @@ func gc(ctx *cli.Context) error {
removePassword(ctx.Args().Get(0))
metaConf := meta.DefaultConf()
metaConf.MaxDeletes = ctx.Int("threads")
metaConf.MaxCleanups = ctx.Int("cleanup-threads")
if metaConf.MaxCleanups <= 0 {
logger.Fatalf("cleanup threads should be greater than 0")
}
metaConf.NoBGJob = true
m := meta.NewClient(ctx.Args().Get(0), metaConf)
format, err := m.Load(true)
Expand Down
1 change: 1 addition & 0 deletions pkg/meta/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ type Config struct {
Strict bool // update ctime
Retries int
MaxDeletes int
MaxCleanups int // number of threads for cleaning up pending deleted files
SkipDirNlink int
CaseInsensi bool
ReadOnly bool
Expand Down
61 changes: 42 additions & 19 deletions pkg/meta/redis.go
Original file line number Diff line number Diff line change
Expand Up @@ -3341,36 +3341,59 @@ func (m *redisMeta) scanPendingFiles(ctx Context, scan pendingFileScan) error {
visited := make(map[Ino]bool)
start := int64(0)
const batchSize = 1000

threads := m.conf.MaxCleanups
deleteFileChan := make(chan redis.Z, threads)
var wg sync.WaitGroup

for i := 0; i < threads; i++ {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we move this part into base.go to reduce the duplicated code?

wg.Add(1)
go func() {
defer wg.Done()
for p := range deleteFileChan {
v := p.Member.(string)
ps := strings.Split(v, ":")
if len(ps) != 2 { // will be cleaned up as legacy
continue
}
inode, _ := strconv.ParseUint(ps[0], 10, 64)
if visited[Ino(inode)] {
continue
}
visited[Ino(inode)] = true
size, _ := strconv.ParseUint(ps[1], 10, 64)
clean, err := scan(Ino(inode), size, int64(p.Score))
if err != nil {
logger.Errorf("scan pending deleted files: %s", err)
continue
}
if clean {
m.doDeleteFileData_(Ino(inode), size, v)
}
}
}()
}

for {
pairs, err := m.rdb.ZRangeWithScores(Background(), m.delfiles(), start, start+batchSize).Result()
if err != nil {
close(deleteFileChan)
wg.Wait()
return err
}
start += batchSize
for _, p := range pairs {
v := p.Member.(string)
ps := strings.Split(v, ":")
if len(ps) != 2 { // will be cleaned up as legacy
continue
}
inode, _ := strconv.ParseUint(ps[0], 10, 64)
if visited[Ino(inode)] {
continue
}
visited[Ino(inode)] = true
size, _ := strconv.ParseUint(ps[1], 10, 64)
clean, err := scan(Ino(inode), size, int64(p.Score))
if err != nil {
return err
}
if clean {
m.doDeleteFileData_(Ino(inode), size, v)
}
deleteFileChan <- p
}

start += batchSize

if len(pairs) < batchSize {
break
}
}

close(deleteFileChan)
wg.Wait()
return nil
}

Expand Down
33 changes: 26 additions & 7 deletions pkg/meta/sql.go
Original file line number Diff line number Diff line change
Expand Up @@ -3295,15 +3295,34 @@ func (m *dbMeta) scanPendingFiles(ctx Context, scan pendingFileScan) error {
if err != nil {
return err
}

threads := m.conf.MaxCleanups
deleteFileChan := make(chan delfile, threads)
var wg sync.WaitGroup

for i := 0; i < threads; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for ds := range deleteFileChan {
clean, err := scan(ds.Inode, ds.Length, ds.Expire)
if err != nil {
logger.Errorf("scan pending deleted files: %s", err)
continue
}
if clean {
m.doDeleteFileData(ds.Inode, ds.Length)
}
}
}()
}

for _, ds := range dfs {
clean, err := scan(ds.Inode, ds.Length, ds.Expire)
if err != nil {
return err
}
if clean {
m.doDeleteFileData(ds.Inode, ds.Length)
}
deleteFileChan <- ds
}

close(deleteFileChan)
wg.Wait()
return nil
}

Expand Down
65 changes: 51 additions & 14 deletions pkg/meta/tkv.go
Original file line number Diff line number Diff line change
Expand Up @@ -2574,28 +2574,65 @@ func (m *kvMeta) scanPendingFiles(ctx Context, scan pendingFileScan) error {
}
// deleted files: Diiiiiiiissssssss
klen := 1 + 8 + 8
pairs, err := m.scanValues(m.fmtKey("D"), -1, func(k, v []byte) bool {
return len(k) == klen
})
if err != nil {
return err
batchSize := 100000

threads := m.conf.MaxCleanups
deleteFileChan := make(chan pair, threads)
var wg sync.WaitGroup

for i := 0; i < threads; i++ {
wg.Add(1)
go func() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If an exception occurs in the middle, the GC command should print that, same for redis and sql implement.

defer wg.Done()
for pair := range deleteFileChan {
key, value := pair.key, pair.value
if len(key) != klen {
logger.Errorf("invalid key %x", key)
continue
}
ino := m.decodeInode([]byte(key)[1:9])
size := binary.BigEndian.Uint64([]byte(key)[9:])
ts := m.parseInt64(value)
clean, err := scan(ino, size, ts)
if err != nil {
logger.Errorf("scan pending deleted files: %s", err)
continue
}
if clean {
m.doDeleteFileData(ino, size)
}
}
}()
}

for key, value := range pairs {
if len(key) != klen {
return fmt.Errorf("invalid key %x", key)
startKey := m.fmtKey("D")
endKey := nextKey(startKey)
for {
keys, values, err := m.scan(startKey, endKey, batchSize, func(k, v []byte) bool {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use client.scan directly

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

client.scan is a full scan method which retrieves all pending delete files, so it may not suitable for handling a large number of small files.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The scan implementation in TiKV and FDB already handles this situation by fetching in batches, but etcd doesn’t. Let’s keep it like this then.

return len(k) == klen
})
if len(keys) == 0 {
break
}
ino := m.decodeInode([]byte(key)[1:9])
size := binary.BigEndian.Uint64([]byte(key)[9:])
ts := m.parseInt64(value)
clean, err := scan(ino, size, ts)
if err != nil {
logger.Errorf("scan pending deleted files: %s", err)
close(deleteFileChan)
wg.Wait()
return err
}
if clean {
m.doDeleteFileData(ino, size)
startKey = nextKey(keys[len(keys)-1])

for index, key := range keys {
deleteFileChan <- pair{key, values[index]}
}

if len(keys) < batchSize {
break
}
}

close(deleteFileChan)
wg.Wait()
return nil
}

Expand Down