From d556202a6e2ea8e8b376b683a3c54ede7b357dd9 Mon Sep 17 00:00:00 2001 From: xiekeyang Date: Fri, 7 Oct 2016 10:48:46 +0800 Subject: [PATCH 1/2] independently unpacking layer functions Signed-off-by: xiekeyang --- image/image.go | 4 +- image/layer.go | 200 ++++++++++++++++++++++++++++++++++++++++++++++ image/manifest.go | 174 ---------------------------------------- 3 files changed, 202 insertions(+), 176 deletions(-) create mode 100644 image/layer.go diff --git a/image/image.go b/image/image.go index 1551c1c..e462093 100644 --- a/image/image.go +++ b/image/image.go @@ -133,7 +133,7 @@ func unpack(w walker, dest, refName string) error { return err } - return m.unpack(w, dest) + return unpackLayerList(w, m.Layers, dest) } // CreateRuntimeBundleLayout walks through the file tree given by src and @@ -180,7 +180,7 @@ func createRuntimeBundle(w walker, dest, refName, rootfs string) error { return err } - err = m.unpack(w, filepath.Join(dest, rootfs)) + err = unpackLayerList(w, m.Layers, filepath.Join(dest, rootfs)) if err != nil { return err } diff --git a/image/layer.go b/image/layer.go new file mode 100644 index 0000000..cbfa333 --- /dev/null +++ b/image/layer.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/opencontainers/image-spec/schema" + "github.com/pkg/errors" +) + +func unpackLayerList(w walker, descList []descriptor, dest string) (retErr error) { + // error out if the dest directory is not empty + s, err := ioutil.ReadDir(dest) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "unable to open file") // err contains dest + } + if len(s) > 0 { + return fmt.Errorf("%s is not empty", dest) + } + defer func() { + // if we encounter error during unpacking + // clean up the partially-unpacked destination + if retErr != nil { + if err := os.RemoveAll(dest); err != nil { + fmt.Printf("Error: failed to remove partially-unpacked destination %v", err) + } + } + }() + for _, d := range descList { + if d.MediaType != string(schema.MediaTypeImageLayer) { + continue + } + + switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() { + return nil + } + + dd, err := filepath.Rel(filepath.Join("blobs", d.algo()), filepath.Clean(path)) + if err != nil || d.hash() != dd { + return nil + } + + if err := unpackLayer(dest, r); err != nil { + return errors.Wrap(err, "error extracting layer") + } + + return errEOW + }); err { + case nil: + return fmt.Errorf("%s: layer not found", dest) + case errEOW: + default: + return err + } + } + return nil +} + +func unpackLayer(dest string, r io.Reader) error { + entries := make(map[string]bool) + gz, err := gzip.NewReader(r) + if err != nil { + return errors.Wrap(err, "error creating gzip reader") + } + defer gz.Close() + + var dirs []*tar.Header + tr := tar.NewReader(gz) + +loop: + for { + hdr, err := tr.Next() + switch err { + case io.EOF: + break loop + case nil: + // success, continue below + default: + return errors.Wrapf(err, "error advancing tar stream") + } + + hdr.Name = filepath.Clean(hdr.Name) + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) { + if err3 := os.MkdirAll(parentPath, 0755); err3 != nil { + return err3 + } + } + } + path := filepath.Join(dest, hdr.Name) + if entries[path] { + return fmt.Errorf("duplicate entry for %s", path) + } + entries[path] = true + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + info := hdr.FileInfo() + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return fmt.Errorf("%q is outside of %q", hdr.Name, dest) + } + + if strings.HasPrefix(info.Name(), ".wh.") { + path = strings.Replace(path, ".wh.", "", 1) + + if err := os.RemoveAll(path); err != nil { + return errors.Wrap(err, "unable to delete whiteout path") + } + + continue loop + } + + switch hdr.Typeflag { + case tar.TypeDir: + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err2 := os.MkdirAll(path, info.Mode()); err2 != nil { + return errors.Wrap(err2, "error creating directory") + } + } + + case tar.TypeReg, tar.TypeRegA: + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode()) + if err != nil { + return errors.Wrap(err, "unable to open file") + } + + if _, err := io.Copy(f, tr); err != nil { + f.Close() + return errors.Wrap(err, "unable to copy") + } + f.Close() + + case tar.TypeLink: + target := filepath.Join(dest, hdr.Linkname) + + if !strings.HasPrefix(target, dest) { + return fmt.Errorf("invalid hardlink %q -> %q", target, hdr.Linkname) + } + + if err := os.Link(target, path); err != nil { + return err + } + + case tar.TypeSymlink: + target := filepath.Join(filepath.Dir(path), hdr.Linkname) + + if !strings.HasPrefix(target, dest) { + return fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname) + } + + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + case tar.TypeXGlobalHeader: + return nil + } + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + finfo := hdr.FileInfo() + // I believe the old version was using time.Now().UTC() to overcome an + // invalid error from chtimes.....but here we lose hdr.AccessTime like this... + if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil { + return errors.Wrap(err, "error changing time") + } + } + return nil +} diff --git a/image/manifest.go b/image/manifest.go index c438ede..e03c010 100644 --- a/image/manifest.go +++ b/image/manifest.go @@ -15,17 +15,13 @@ package image import ( - "archive/tar" "bytes" - "compress/gzip" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" - "strings" - "time" "github.com/opencontainers/image-spec/schema" "github.com/opencontainers/image-spec/specs-go/v1" @@ -87,173 +83,3 @@ func (m *manifest) validate(w walker) error { return nil } - -func (m *manifest) unpack(w walker, dest string) (retErr error) { - // error out if the dest directory is not empty - s, err := ioutil.ReadDir(dest) - if err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "unable to open file") // err contains dest - } - if len(s) > 0 { - return fmt.Errorf("%s is not empty", dest) - } - defer func() { - // if we encounter error during unpacking - // clean up the partially-unpacked destination - if retErr != nil { - if err := os.RemoveAll(dest); err != nil { - fmt.Printf("Error: failed to remove partially-unpacked destination %v", err) - } - } - }() - for _, d := range m.Layers { - if d.MediaType != string(schema.MediaTypeImageLayer) { - continue - } - - switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { - if info.IsDir() { - return nil - } - - dd, err := filepath.Rel(filepath.Join("blobs", d.algo()), filepath.Clean(path)) - if err != nil || d.hash() != dd { - return nil - } - - if err := unpackLayer(dest, r); err != nil { - return errors.Wrap(err, "error extracting layer") - } - - return errEOW - }); err { - case nil: - return fmt.Errorf("%s: layer not found", dest) - case errEOW: - default: - return err - } - } - return nil -} - -func unpackLayer(dest string, r io.Reader) error { - entries := make(map[string]bool) - gz, err := gzip.NewReader(r) - if err != nil { - return errors.Wrap(err, "error creating gzip reader") - } - defer gz.Close() - - var dirs []*tar.Header - tr := tar.NewReader(gz) - -loop: - for { - hdr, err := tr.Next() - switch err { - case io.EOF: - break loop - case nil: - // success, continue below - default: - return errors.Wrapf(err, "error advancing tar stream") - } - - hdr.Name = filepath.Clean(hdr.Name) - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) { - if err3 := os.MkdirAll(parentPath, 0755); err3 != nil { - return err3 - } - } - } - path := filepath.Join(dest, hdr.Name) - if entries[path] { - return fmt.Errorf("duplicate entry for %s", path) - } - entries[path] = true - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - info := hdr.FileInfo() - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return fmt.Errorf("%q is outside of %q", hdr.Name, dest) - } - - if strings.HasPrefix(info.Name(), ".wh.") { - path = strings.Replace(path, ".wh.", "", 1) - - if err := os.RemoveAll(path); err != nil { - return errors.Wrap(err, "unable to delete whiteout path") - } - - continue loop - } - - switch hdr.Typeflag { - case tar.TypeDir: - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err2 := os.MkdirAll(path, info.Mode()); err2 != nil { - return errors.Wrap(err2, "error creating directory") - } - } - - case tar.TypeReg, tar.TypeRegA: - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode()) - if err != nil { - return errors.Wrap(err, "unable to open file") - } - - if _, err := io.Copy(f, tr); err != nil { - f.Close() - return errors.Wrap(err, "unable to copy") - } - f.Close() - - case tar.TypeLink: - target := filepath.Join(dest, hdr.Linkname) - - if !strings.HasPrefix(target, dest) { - return fmt.Errorf("invalid hardlink %q -> %q", target, hdr.Linkname) - } - - if err := os.Link(target, path); err != nil { - return err - } - - case tar.TypeSymlink: - target := filepath.Join(filepath.Dir(path), hdr.Linkname) - - if !strings.HasPrefix(target, dest) { - return fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname) - } - - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - case tar.TypeXGlobalHeader: - return nil - } - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - finfo := hdr.FileInfo() - // I believe the old version was using time.Now().UTC() to overcome an - // invalid error from chtimes.....but here we lose hdr.AccessTime like this... - if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil { - return errors.Wrap(err, "error changing time") - } - } - return nil -} From 9658b441460b4fefe9227ec0a471334591e57d0d Mon Sep 17 00:00:00 2001 From: xiekeyang Date: Fri, 18 Nov 2016 11:42:28 +0800 Subject: [PATCH 2/2] change unit test from manifest to layer Signed-off-by: xiekeyang --- image/{manifest_test.go => layer_test.go} | 28 +++++++++++------------ 1 file changed, 13 insertions(+), 15 deletions(-) rename image/{manifest_test.go => layer_test.go} (87%) diff --git a/image/manifest_test.go b/image/layer_test.go similarity index 87% rename from image/manifest_test.go rename to image/layer_test.go index 81242e1..2e037de 100644 --- a/image/manifest_test.go +++ b/image/layer_test.go @@ -26,6 +26,8 @@ import ( "path/filepath" "strings" "testing" + + "github.com/opencontainers/image-spec/specs-go/v1" ) func TestUnpackLayerDuplicateEntries(t *testing.T) { @@ -65,7 +67,7 @@ func TestUnpackLayerDuplicateEntries(t *testing.T) { } } -func TestUnpackLayer(t *testing.T) { +func TestUnpackLayerList(t *testing.T) { tmp1, err := ioutil.TempDir("", "test-layer") if err != nil { t.Fatal(err) @@ -106,13 +108,11 @@ func TestUnpackLayer(t *testing.T) { t.Fatal(err) } - testManifest := manifest{ - Layers: []descriptor{descriptor{ - MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: fmt.Sprintf("sha256:%s", fmt.Sprintf("%x", h.Sum(nil))), - }}, - } - err = testManifest.unpack(newPathWalker(tmp1), filepath.Join(tmp1, "rootfs")) + layerList := []descriptor{descriptor{ + MediaType: v1.MediaTypeImageLayer, + Digest: fmt.Sprintf("sha256:%s", fmt.Sprintf("%x", h.Sum(nil))), + }} + err = unpackLayerList(newPathWalker(tmp1), layerList, filepath.Join(tmp1, "rootfs")) if err != nil { t.Fatal(err) } @@ -167,13 +167,11 @@ func TestUnpackLayerRemovePartialyUnpackedFile(t *testing.T) { t.Fatal(err) } - testManifest := manifest{ - Layers: []descriptor{descriptor{ - MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: fmt.Sprintf("sha256:%s", fmt.Sprintf("%x", h.Sum(nil))), - }}, - } - err = testManifest.unpack(newPathWalker(tmp1), filepath.Join(tmp1, "rootfs")) + layerList := []descriptor{descriptor{ + MediaType: v1.MediaTypeImageLayer, + Digest: fmt.Sprintf("sha256:%s", fmt.Sprintf("%x", h.Sum(nil))), + }} + err = unpackLayerList(newPathWalker(tmp1), layerList, filepath.Join(tmp1, "rootfs")) if err != nil && !strings.Contains(err.Error(), "duplicate entry for") { t.Fatal(err) }