-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathzdfs.go
340 lines (296 loc) · 9.8 KB
/
zdfs.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
package zdfs
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/containerd/accelerated-container-image/pkg/types"
"github.com/containerd/containerd/v2/core/snapshots"
"github.com/containerd/containerd/v2/core/snapshots/storage"
"github.com/containerd/continuity"
"github.com/distribution/reference"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
zdfsIsReady bool //indicate if zdfs' binaries or rpms are ready
pouchDirLock sync.Mutex //Used by PrepareMetaForPouch(..) to guarantee thread safe during operating dirs or files
blockEngine string
)
const (
zdfsMetaDir = "zdfsmeta" //meta dir that contains the dadi image meta files
iNewFormat = ".aaaaaaaaaaaaaaaa.lsmt" //characteristic file of dadi image
zdfsChecksumFile = ".checksum_file" //file containing the checksum data if each dadi layer file to guarantee data consistent
zdfsOssurlFile = ".oss_url" //file containing the address of layer file
zdfsOssDataSizeFile = ".data_size" //file containing the size of layer file
zdfsOssTypeFile = ".type" //file containing the type, such as layern, commit(layer file on local dir), oss(layer file is in oss
zdfsTrace = ".trace"
overlaybdBaseLayer = "/opt/overlaybd/baselayers/.commit"
)
// If error is nil, the existence is valid.
// If error is not nil, the existence is invalid. Can't make sure if path exists.
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil //path exists.
}
if os.IsNotExist(err) {
return false, nil //pash doen't exist.
}
return false, err //can't make sure if path exists.
}
func hasOverlaybdBlobRef(dir string) (bool, error) {
fileNames := []string{iNewFormat, zdfsChecksumFile, zdfsOssurlFile, zdfsOssDataSizeFile, zdfsOssTypeFile}
for _, name := range fileNames {
fullPath := path.Join(dir, name)
b, err := pathExists(fullPath)
if err != nil {
return false, fmt.Errorf("LSMD ERROR failed to check if %s exists. err:%s", fullPath, err)
}
if !b {
return false, nil
}
}
return true, nil
}
func overlaybdConfPath(dir string) string {
return filepath.Join(dir, "block", "config.v1.json")
}
func overlaybdInitDebuglogPath(dir string) string {
return filepath.Join(dir, "block", "init-debug.log")
}
func isOverlaybdLayer(dir string) (bool, error) {
exists, _ := pathExists(overlaybdConfPath(dir))
if exists {
return true, nil
}
b, err := hasOverlaybdBlobRef(path.Join(dir, "fs"))
if err != nil {
logrus.Errorf("LSMD ERROR failed to IsZdfsLayerInApplyDiff(dir%s), err:%s", dir, err)
return false, fmt.Errorf("LSMD ERROR failed to IsZdfsLayerInApplyDiff(dir%s), err:%s", dir, err)
}
return b, nil
}
func getTrimStringFromFile(filePath string) (string, error) {
data, err := ioutil.ReadFile(filePath)
if err != nil {
return "", err
}
return strings.Trim(string(data), " \n"), nil
}
func updateSpec(dir, recordTracePath string) error {
bsConfig, err := loadBackingStoreConfig(dir)
if err != nil {
return err
}
if recordTracePath == bsConfig.RecordTracePath {
// No need to update
return nil
}
bsConfig.RecordTracePath = recordTracePath
return atomicWriteOverlaybdTargetConfig(dir, bsConfig)
}
func GetBlobRepoDigest(dir string) (string, string, error) {
// get repoUrl from .oss_url
url, err := getTrimStringFromFile(path.Join(dir, zdfsOssurlFile))
if err != nil {
return "", "", err
}
idx := strings.LastIndex(url, "/")
if !strings.HasPrefix(url[idx+1:], "sha256") {
return "", "", fmt.Errorf("can't parse sha256 from url %s", url)
}
return url[0:idx], url[idx+1:], nil
}
func GetBlobSize(dir string) (uint64, error) {
str, err := getTrimStringFromFile(path.Join(dir, zdfsOssDataSizeFile))
if err != nil {
return 0, err
}
return strconv.ParseUint(str, 10, 64)
}
func constructImageBlobURL(ref string) (string, error) {
refspec, err := reference.ParseNamed(ref)
if err != nil {
return "", errors.Wrapf(err, "invalid repo url %s", ref)
}
host := reference.Domain(refspec)
// repo := strings.TrimPrefix(refspec.Locator, host+"/")
repo := reference.Path(reference.TrimNamed(refspec))
return "https://" + path.Join(host, "v2", repo) + "/blobs", nil
}
// loadBackingStoreConfig loads overlaybd target config.
func loadBackingStoreConfig(dir string) (*types.OverlayBDBSConfig, error) {
confPath := overlaybdConfPath(dir)
data, err := ioutil.ReadFile(confPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read config(path=%s) of snapshot %s", confPath, dir)
}
var configJSON types.OverlayBDBSConfig
if err := json.Unmarshal(data, &configJSON); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal data(%s)", string(data))
}
return &configJSON, nil
}
func atomicWriteOverlaybdTargetConfig(dir string, configJSON *types.OverlayBDBSConfig) error {
data, err := json.Marshal(configJSON)
if err != nil {
return errors.Wrapf(err, "failed to marshal %+v configJSON into JSON", configJSON)
}
confPath := overlaybdConfPath(dir)
if err := continuity.AtomicWriteFile(confPath, data, 0600); err != nil {
return errors.Wrapf(err, "failed to commit the overlaybd config on %s", confPath)
}
return nil
}
func constructSpec(dir, parent, repo, digest string, size uint64, recordTracePath string) error {
configJSON := types.OverlayBDBSConfig{
Lowers: []types.OverlayBDBSConfigLower{},
ResultFile: overlaybdInitDebuglogPath(dir),
}
configJSON.RepoBlobURL = repo
if parent == "" {
configJSON.Lowers = append(configJSON.Lowers, types.OverlayBDBSConfigLower{
File: overlaybdBaseLayer,
})
} else {
parentConfJSON, err := loadBackingStoreConfig(parent)
if err != nil {
return err
}
if repo == "" {
configJSON.RepoBlobURL = parentConfJSON.RepoBlobURL
}
configJSON.Lowers = parentConfJSON.Lowers
}
configJSON.RecordTracePath = recordTracePath
configJSON.Lowers = append(configJSON.Lowers, types.OverlayBDBSConfigLower{
Digest: digest,
Size: int64(size),
Dir: path.Join(dir, "block"),
})
return atomicWriteOverlaybdTargetConfig(dir, &configJSON)
}
func PrepareOverlayBDSpec(ctx context.Context, key, id, dir string, info snapshots.Info, snPath func(string) string) (bool, error) {
if b, err := isOverlaybdLayer(dir); !b {
return false, nil
} else if err != nil {
return false, err
}
s, _ := storage.GetSnapshot(ctx, key)
lowers := func() []string {
ret := []string{}
for _, id := range s.ParentIDs {
ret = append(ret, snPath(id))
}
return ret
}()
makeConfig := func(dir string, parent string) error {
logrus.Infof("ENTER makeConfig(dir: %s, parent: %s)", dir, parent)
dstDir := path.Join(dir, "block")
repo, digest, err := GetBlobRepoDigest(dstDir)
if err != nil {
return err
}
refPath := path.Join(dir, "image_ref")
if b, _ := pathExists(refPath); b {
img, _ := os.ReadFile(refPath)
imageRef := string(img)
logrus.Infof("read imageRef from label.CRIImageRef: %s", imageRef)
repo, _ = constructImageBlobURL(imageRef)
}
logrus.Infof("construct repoBlobUrl: %s", repo)
size, _ := GetBlobSize(dstDir)
if err := constructSpec(dir, parent, repo, digest, size, ""); err != nil {
return err
}
return nil
}
doDir := func(dir string, parent string) error {
dstDir := path.Join(dir, zdfsMetaDir)
//1.check if the dir exists. Create the dir only when dir doesn't exist.
b, err := pathExists(dstDir)
if err != nil {
logrus.Errorf("LSMD ERROR PathExists(%s) err:%s", dstDir, err)
return err
}
if b {
configPath := overlaybdConfPath(dir)
configExists, err := pathExists(configPath)
if err != nil {
logrus.Errorf("LSMD ERROR PathExists(%s) err:%s", configPath, err)
return err
}
if configExists {
logrus.Infof("%s has been created yet.", configPath)
return updateSpec(dir, "")
}
// config.v1.json does not exist, for early pulled layers
return makeConfig(dir, parent)
}
b, _ = pathExists(path.Join(dir, "block", "config.v1.json"))
if b {
// is new dadi format
return nil
}
//2.create tmpDir in dir
tmpDir, err := os.MkdirTemp(dir, "temp_for_prepare_dadimeta")
if err != nil {
logrus.Errorf("LSMD ERROR os.MkdirTemp(%s.) err:%s", tmpDir, err)
return err
}
//3.copy meta files to tmpDir)
srcDir := path.Join(dir, "fs")
if err := copyPulledZdfsMetaFiles(srcDir, tmpDir); err != nil {
logrus.Errorf("failed to copyPulledZdfsMetaFiles(%s, %s), err:%s", srcDir, tmpDir, err)
return err
}
blockDir := path.Join(dir, "block")
if err := copyPulledZdfsMetaFiles(srcDir, blockDir); err != nil {
logrus.Errorf("failed to copyPulledZdfsMetaFiles(%s, %s), err:%s", srcDir, blockDir, err)
return err
}
//4.rename tmpDir to zdfsmeta
if err = os.Rename(tmpDir, dstDir); err != nil {
return err
}
//5.generate config.v1.json
return makeConfig(dir, parent)
}
num := len(lowers)
parent := ""
for m := 0; m < num; m++ {
dir := lowers[num-m-1]
if err := doDir(dir, parent); err != nil {
logrus.Errorf("LSMD ERROR doDir(%s) err:%s", dir, err)
return true, err
}
parent = dir
}
return true, doDir(snPath(id), parent)
}
func copyPulledZdfsMetaFiles(srcDir, dstDir string) error {
fileNames := []string{iNewFormat, zdfsChecksumFile, zdfsOssurlFile, zdfsOssDataSizeFile, zdfsOssTypeFile, zdfsTrace}
for _, name := range fileNames {
srcPath := path.Join(srcDir, name)
if _, err := os.Stat(srcPath); err != nil && os.IsNotExist(err) {
continue
}
data, err := os.ReadFile(srcPath)
if err != nil {
logrus.Errorf("LSMD ERROR ioutil.ReadFile(srcDir:%s, name:%s) dstDir:%s, err:%s", srcDir, name, dstDir, err)
return err
}
if err := os.WriteFile(path.Join(dstDir, name), data, 0666); err != nil {
logrus.Errorf("LSMD ERROR ioutil.WriteFile(path.Join(dstDir:%s, name:%s) srcDir:%s err:%s", dstDir, name, srcDir, err)
return err
}
}
return nil
}