diff --git a/cmd/zeta/main.go b/cmd/zeta/main.go index 3b74974..2cb71d6 100644 --- a/cmd/zeta/main.go +++ b/cmd/zeta/main.go @@ -53,6 +53,7 @@ type App struct { LsFiles command.LsFiles `cmd:"ls-files" help:"Show information about files in the index and the working tree"` HashObject command.HashObject `cmd:"hash-object" help:"Compute hash or create object"` MergeFile command.MergeFile `cmd:"merge-file" help:"Run a three-way file merge"` + Show command.Show `cmd:"show" help:"Show various types of objects"` Version command.Version `cmd:"version" help:"Display version information"` Debug bool `name:"debug" help:"Enable debug mode; analyze timing"` } diff --git a/go.mod b/go.mod index 8b902cc..643de43 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,8 @@ require ( github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 github.com/ProtonMail/go-crypto v1.1.3 github.com/aws/aws-sdk-go-v2 v1.32.7 - github.com/aws/aws-sdk-go-v2/config v1.28.6 - github.com/aws/aws-sdk-go-v2/credentials v1.17.47 + github.com/aws/aws-sdk-go-v2/config v1.28.7 + github.com/aws/aws-sdk-go-v2/credentials v1.17.48 github.com/aws/aws-sdk-go-v2/service/s3 v1.71.1 github.com/creack/pty v1.1.24 github.com/danieljoos/wincred v1.2.2 @@ -43,7 +43,7 @@ require ( github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect @@ -52,9 +52,9 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 // indirect github.com/aws/smithy-go v1.22.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.5.0 // indirect diff --git a/go.sum b/go.sum index 63b129f..2378143 100644 --- a/go.sum +++ b/go.sum @@ -16,12 +16,12 @@ github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= -github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= -github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/config v1.28.7 h1:GduUnoTXlhkgnxTD93g1nv4tVPILbdNQOzav+Wpg7AE= +github.com/aws/aws-sdk-go-v2/config v1.28.7/go.mod h1:vZGX6GVkIE8uECSUHB6MWAUsd4ZcG2Yq/dMa4refR3M= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48 h1:IYdLD1qTJ0zanRavulofmqut4afs45mOWEI+MzZtTfQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48/go.mod h1:tOscxHN3CGmuX9idQ3+qbkzrjVIx32lqDSU1/0d/qXs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM= @@ -40,12 +40,12 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 h1:Hi0KGbrnr57bEH github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7/go.mod h1:wKNgWgExdjjrm4qvfbTorkvocEstaoDl4WCvGfeCy9c= github.com/aws/aws-sdk-go-v2/service/s3 v1.71.1 h1:aOVVZJgWbaH+EJYPvEgkNhCEbXXvH7+oML36oaPK3zE= github.com/aws/aws-sdk-go-v2/service/s3 v1.71.1/go.mod h1:r+xl5yzMk9083rMR+sJ5TYj9Tihvf/l1oxzZXDgGj2Q= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 h1:CvuUmnXI7ebaUAhbJcDy9YQx8wHR69eZ9I7q5hszt/g= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8/go.mod h1:XDeGv1opzwm8ubxddF0cgqkZWsyOtw4lr6dxwmb6YQg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 h1:F2rBfNAL5UyswqoeWv9zs74N/NanhK16ydHW1pahX6E= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7/go.mod h1:JfyQ0g2JG8+Krq0EuZNnRwX0mU0HrwY/tG6JNfcqh4k= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 h1:Xgv/hyNgvLda/M9l9qxXc4UFSgppnRczLxlMs5Ae/QY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3/go.mod h1:5Gn+d+VaaRgsjewpMvGazt0WfcFO+Md4wLOuBfGR9Bc= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= diff --git a/modules/diferenco/text.go b/modules/diferenco/text.go index 4a804a9..dec7968 100644 --- a/modules/diferenco/text.go +++ b/modules/diferenco/text.go @@ -94,17 +94,37 @@ func readRawText(r io.Reader, size int) (string, error) { return unsafe.String(unsafe.SliceData(content), len(content)), nil } -func ReadUnifiedText(r io.Reader, size int64, textConv bool) (content string, charset string, err error) { +func ReadUnifiedText(r io.Reader, size int64, textconv bool) (content string, charset string, err error) { if size > MAX_DIFF_SIZE { return "", "", ErrNonTextContent } - if textConv { + if textconv { return readUnifiedText(r) } content, err = readRawText(r, int(size)) return content, UTF8, err } +func NewUnifiedReaderEx(r io.Reader, textconv bool) (io.Reader, string, error) { + sniffBytes, err := streamio.ReadMax(r, sniffLen) + if err != nil { + return nil, "", err + } + reader := io.MultiReader(bytes.NewReader(sniffBytes), r) + if !textconv { + if bytes.IndexByte(sniffBytes, 0) != -1 { + return reader, BINARY, nil + } + return reader, UTF8, nil + } + charset := detectCharset(sniffBytes) + // binary or UTF-8 not need convert + if charset == BINARY || strings.EqualFold(charset, UTF8) { + return reader, charset, nil + } + return chardet.NewReader(reader, charset), charset, nil +} + func NewUnifiedReader(r io.Reader) (io.Reader, error) { sniffBytes, err := streamio.ReadMax(r, sniffLen) if err != nil { diff --git a/modules/streamio/bytes.go b/modules/streamio/bytes.go index c232a2d..4d70138 100644 --- a/modules/streamio/bytes.go +++ b/modules/streamio/bytes.go @@ -36,7 +36,7 @@ func PutByteSlice(buf *[]byte) { } // GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool. -// Returns a buffer that is resetted and ready for use. +// Returns a buffer that is reset and ready for use. // // After use, the *bytes.Buffer should be put back into the sync.Pool // by calling PutBytesBuffer. diff --git a/modules/streamio/sync.go b/modules/streamio/sync.go index 79d3a57..e795209 100644 --- a/modules/streamio/sync.go +++ b/modules/streamio/sync.go @@ -13,7 +13,7 @@ var bufioReader = sync.Pool{ } // GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool. -// Returns a bufio.Reader that is resetted with reader and ready for use. +// Returns a bufio.Reader that is reset with reader and ready for use. // // After use, the *bufio.Reader should be put back into the sync.Pool // by calling PutBufioReader. @@ -39,7 +39,7 @@ var bufferWriter = sync.Pool{ } // GetBufferWriter returns a *bufio.Writer that is managed by a sync.Pool. -// Returns a bufio.Writer that is resetted with writer and ready for use. +// Returns a bufio.Writer that is reset with writer and ready for use. // // After use, the *bufio.Writer should be put back into the sync.Pool // by calling PutBufferWriter. diff --git a/modules/streamio/zlib.go b/modules/streamio/zlib.go index 5333de7..c0acc63 100644 --- a/modules/streamio/zlib.go +++ b/modules/streamio/zlib.go @@ -35,7 +35,7 @@ type ZLibReader struct { } // GetZlibReader returns a ZLibReader that is managed by a sync.Pool. -// Returns a ZLibReader that is resetted using a dictionary that is +// Returns a ZLibReader that is reset using a dictionary that is // also managed by a sync.Pool. // // After use, the ZLibReader should be put back into the sync.Pool @@ -58,7 +58,7 @@ func PutZlibReader(z *ZLibReader) { } // GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool. -// Returns a writer that is resetted with w and ready for use. +// Returns a writer that is reset with w and ready for use. // // After use, the *zlib.Writer should be put back into the sync.Pool // by calling PutZlibWriter. diff --git a/modules/streamio/zstd.go b/modules/streamio/zstd.go index 420a38c..6634f4f 100644 --- a/modules/streamio/zstd.go +++ b/modules/streamio/zstd.go @@ -31,7 +31,7 @@ type ZstdDecoder struct { } // GetZstdReader returns a ZstdDecoder that is managed by a sync.Pool. -// Returns a ZLibReader that is resetted using a dictionary that is +// Returns a ZLibReader that is reset using a dictionary that is // also managed by a sync.Pool. // // After use, the ZstdDecoder should be put back into the sync.Pool @@ -55,7 +55,7 @@ type ZstdEncoder struct { } // GetZstdWriter returns a *ztsd.Encoder that is managed by a sync.Pool. -// Returns a writer that is resetted with w and ready for use. +// Returns a writer that is reset with w and ready for use. // // After use, the *ztsd.Encoder should be put back into the sync.Pool // by calling PutZstdWriter. diff --git a/modules/zeta/object/commit.go b/modules/zeta/object/commit.go index 1f1b42d..fbc7a3a 100644 --- a/modules/zeta/object/commit.go +++ b/modules/zeta/object/commit.go @@ -313,6 +313,17 @@ func (c *Commit) Root(ctx context.Context) (*Tree, error) { return resolveTree(ctx, c.b, c.Tree) } +// File returns the file with the specified "path" in the commit and a +// nil error if the file exists. If the file does not exist, it returns +// a nil file and the ErrFileNotFound error. +func (c *Commit) File(ctx context.Context, path string) (*File, error) { + tree, err := c.Root(ctx) + if err != nil { + return nil, err + } + return tree.File(ctx, path) +} + // StatsContext returns the stats of a commit. Error will be return if context // expires. Provided context must be non-nil. func (c *Commit) StatsContext(ctx context.Context, m noder.Matcher, opts *PatchOptions) (FileStats, error) { diff --git a/modules/zeta/object/patch.go b/modules/zeta/object/patch.go index ebf7f1d..015a6b0 100644 --- a/modules/zeta/object/patch.go +++ b/modules/zeta/object/patch.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "path" "strconv" "strings" @@ -21,7 +22,7 @@ var ( type PatchOptions struct { Algorithm diferenco.Algorithm - TextConv bool + Textconv bool Match func(string) bool } @@ -29,6 +30,20 @@ func sizeOverflow(f *File) bool { return f != nil && f.Size > diferenco.MAX_DIFF_SIZE } +func PathRenameCombine(from, to string) string { + fromPaths := strings.Split(from, "/") + toPaths := strings.Split(to, "/") + n := min(len(fromPaths), len(toPaths)) + i := 0 + for i < n && fromPaths[i] == toPaths[i] { + i++ + } + if i == 0 { + return fmt.Sprintf("%s => %s", from, to) + } + return fmt.Sprintf("%s/{%s => %s}", path.Join(fromPaths[0:i]...), path.Join(fromPaths[i:]...), path.Join(toPaths[i:]...)) +} + func fileStatName(from, to *File) string { if from == nil { // New File is created. @@ -40,7 +55,7 @@ func fileStatName(from, to *File) string { } if from.Path != to.Path { // File is renamed. - return fmt.Sprintf("%s => %s", from.Path, to.Path) + return PathRenameCombine(from.Path, to.Path) } return from.Path } @@ -63,14 +78,14 @@ func fileStatWithContext(ctx context.Context, opts *PatchOptions, c *Change) (*F if sizeOverflow(from) || sizeOverflow(to) { return s, nil } - fromContent, err := from.UnifiedText(ctx, opts.TextConv) + fromContent, err := from.UnifiedText(ctx, opts.Textconv) if plumbing.IsNoSuchObject(err) || err == diferenco.ErrNonTextContent { return s, nil } if err != nil { return nil, err } - toContent, err := to.UnifiedText(ctx, opts.TextConv) + toContent, err := to.UnifiedText(ctx, opts.Textconv) if plumbing.IsNoSuchObject(err) || err == diferenco.ErrNonTextContent { return s, nil } @@ -121,14 +136,14 @@ func filePatchWithContext(ctx context.Context, opts *PatchOptions, c *Change) (* if sizeOverflow(from) || sizeOverflow(to) { return &diferenco.Unified{From: from.asFile(), To: to.asFile(), IsBinary: true}, nil } - fromContent, err := from.UnifiedText(ctx, opts.TextConv) + fromContent, err := from.UnifiedText(ctx, opts.Textconv) if plumbing.IsNoSuchObject(err) || err == diferenco.ErrNonTextContent { return &diferenco.Unified{From: from.asFile(), To: to.asFile(), IsBinary: true}, nil } if err != nil { return nil, err } - toContent, err := to.UnifiedText(ctx, opts.TextConv) + toContent, err := to.UnifiedText(ctx, opts.Textconv) if plumbing.IsNoSuchObject(err) || err == diferenco.ErrNonTextContent { return &diferenco.Unified{From: from.asFile(), To: to.asFile(), IsBinary: true}, nil } diff --git a/modules/zeta/object/patch_test.go b/modules/zeta/object/patch_test.go new file mode 100644 index 0000000..670e80b --- /dev/null +++ b/modules/zeta/object/patch_test.go @@ -0,0 +1,30 @@ +package object + +import ( + "fmt" + "os" + "testing" +) + +func TestPathRenameCombine(t *testing.T) { + pp := []struct { + A, B string + }{ + { + "a.txt", + "b.txt", + }, + { + "pkg/command/command_merge_file.go", + "pkg/command/merge.go", + }, + { + "pkg/command/command_merge_file.go", + "pkg/merge.go", + }, + } + for _, i := range pp { + d := PathRenameCombine(i.A, i.B) + fmt.Fprintf(os.Stderr, "%s => %s|%s\n", i.A, i.B, d) + } +} diff --git a/modules/zeta/object/tree.go b/modules/zeta/object/tree.go index 3e6337a..cf7c3d8 100644 --- a/modules/zeta/object/tree.go +++ b/modules/zeta/object/tree.go @@ -571,6 +571,16 @@ func resolveTree(ctx context.Context, b Backend, h plumbing.Hash) (*Tree, error) return t, nil } +// File returns the hash of the file identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) File(ctx context.Context, path string) (*File, error) { + e, err := t.FindEntry(ctx, path) + if err != nil { + return nil, &ErrEntryNotFound{entry: path} + } + return newFile(e.Name, path, e.Mode, e.Hash, e.Size, t.b), nil +} + // Diff returns a list of changes between this tree and the provided one func (t *Tree) Diff(to *Tree, m noder.Matcher) (Changes, error) { return t.DiffContext(context.Background(), to, m) diff --git a/pkg/command/command_cat.go b/pkg/command/command_cat.go index bedb5d7..7aef2d5 100644 --- a/pkg/command/command_cat.go +++ b/pkg/command/command_cat.go @@ -10,13 +10,15 @@ import ( ) type Cat struct { - Hash string `arg:"" name:"object" help:"The name of the object to show"` - WriteMax int64 `name:"max" short:"m" optional:"" help:"Blob output size limit; use 0 for unlimited. Units: KB, MB, GB, K, M, G" default:"20m" type:"size"` + Object string `arg:"" name:"object" help:"The name of the object to show"` T bool `name:"type" short:"t" help:"Show object type"` DisplaySize bool `name:":" short:"s" help:"Show object size"` JSON bool `name:"json" short:"j" help:"Returns data as JSON; limited to commits, trees, fragments, and tags"` - Textconv bool `name:"textconv" help:"Output text as Unicode; blobs only"` + Textconv bool `name:"textconv" help:"Converting text to Unicode"` + Direct bool `name:"direct" help:"View files directly"` Verify bool `name:"verify" help:"Verify object hash"` + Limit int64 `name:"limit" short:"L" help:"Omits blobs larger than n bytes or units. n may be zero. supported units: KB,MB,GB,K,M,G" default:"-1" type:"size"` + Output string `name:"output" help:"Output to a specific file instead of stdout" placeholder:""` } func (c *Cat) Run(g *Globals) error { @@ -30,12 +32,14 @@ func (c *Cat) Run(g *Globals) error { } defer r.Close() return r.Cat(context.Background(), &zeta.CatOptions{ - Hash: c.Hash, - SizeMax: c.WriteMax, - Type: c.T, - DisplaySize: c.DisplaySize, - Textconv: c.Textconv, - FormatJSON: c.JSON, - Verify: c.Verify, + Object: c.Object, + Limit: c.Limit, + Type: c.T, + PrintSize: c.DisplaySize, + Textconv: c.Textconv, + Direct: c.Direct, + PrintJSON: c.JSON, + Verify: c.Verify, + Output: c.Output, }) } diff --git a/pkg/command/command_diff.go b/pkg/command/command_diff.go index 22ae5dd..1f53a01 100644 --- a/pkg/command/command_diff.go +++ b/pkg/command/command_diff.go @@ -26,17 +26,17 @@ type Diff struct { Z bool `name:":z" short:"z" help:"Output diff-raw with lines terminated with NUL"` Staged bool `name:"staged" help:"Compare the differences between the staging area and "` Cached bool `name:"cached" help:"Compare the differences between the staging area and "` - TextConv bool `name:"textconv" help:"Convert text to Unicode and compare differences"` + Textconv bool `name:"textconv" help:"Converting text to Unicode"` MergeBase string `name:"merge-base" help:"If --merge-base is given, use the common ancestor of and HEAD instead"` - Output string `name:"output" help:"Output to a specific file instead of stdout" placeholder:""` Histogram bool `name:"histogram" help:"Generate a diff using the \"Histogram diff\" algorithm"` ONP bool `name:"onp" help:"Generate a diff using the \"O(NP) diff\" algorithm"` Myers bool `name:"myers" help:"Generate a diff using the \"Myers diff\" algorithm"` Patience bool `name:"patience" help:"Generate a diff using the \"Patience diff\" algorithm"` Minimal bool `name:"minimal" help:"Spend extra time to make sure the smallest possible diff is produced"` DiffAlgorithm string `name:"diff-algorithm" help:"Choose a diff algorithm, supported: histogram|onp|myers|patience|minimal"` - From string `arg:"" optional:"" name:"from" help:"From"` - To string `arg:"" optional:"" name:"to" help:"To"` + Output string `name:"output" help:"Output to a specific file instead of stdout" placeholder:""` + From string `arg:"" optional:"" name:"from" help:""` + To string `arg:"" optional:"" name:"to" help:""` passthroughArgs []string `kong:"-"` } @@ -69,20 +69,18 @@ func (c *Diff) checkAlgorithm() (diferenco.Algorithm, error) { if len(c.DiffAlgorithm) != 0 { return diferenco.AlgorithmFromName(c.DiffAlgorithm) } - if c.Histogram { + switch { + case c.Histogram: return diferenco.Histogram, nil - } - if c.ONP { + case c.ONP: return diferenco.ONP, nil - } - if c.Myers { + case c.Myers: return diferenco.Myers, nil - } - if c.Patience { + case c.Patience: return diferenco.Patience, nil - } - if c.Minimal { + case c.Minimal: return diferenco.Minimal, nil + default: } return diferenco.Unspecified, nil } @@ -105,7 +103,7 @@ func (c *Diff) NewOptions() (*zeta.DiffOptions, error) { To: c.To, Staged: c.Staged || c.Cached, MergeBase: c.MergeBase, - TextConv: c.TextConv, + Textconv: c.Textconv, Algorithm: a, } if len(c.To) == 0 { @@ -152,7 +150,7 @@ func (c *Diff) render(u *diferenco.Unified) error { s := u.Stat() name := c.From if c.From != c.To { - name = fmt.Sprintf("%s => %s", c.From, c.To) + name = object.PathRenameCombine(c.From, c.To) } opts.ShowStats(context.Background(), object.FileStats{ object.FileStat{ @@ -186,6 +184,8 @@ func (c *Diff) diffNoIndex(g *Globals) error { die("missing arg, example: zeta diff --no-index from to") return ErrArgRequired } + c.From = cleanPath(c.From) + c.To = cleanPath(c.To) if c.NameOnly || c.NameStatus { return c.nameStatus() } @@ -196,12 +196,12 @@ func (c *Diff) diffNoIndex(g *Globals) error { return err } g.DbgPrint("from %s to %s", c.From, c.To) - from, err := zeta.ReadContent(c.From, c.TextConv) + from, err := zeta.ReadContent(c.From, c.Textconv) if err != nil { diev("zeta diff --no-index hash error: %v", err) return err } - to, err := zeta.ReadContent(c.To, c.TextConv) + to, err := zeta.ReadContent(c.To, c.Textconv) if err != nil && err != diferenco.ErrNonTextContent { diev("zeta diff --no-index read text error: %v", err) return err diff --git a/pkg/command/command_merge.go b/pkg/command/command_merge.go index d240570..04389f0 100644 --- a/pkg/command/command_merge.go +++ b/pkg/command/command_merge.go @@ -23,7 +23,7 @@ type Merge struct { FFOnly bool `name:"ff-only" help:"Abort if fast-forward is not possible"` Squash bool `name:"squash" help:"Create a single commit instead of doing a merge"` AllowUnrelatedHistories bool `name:"allow-unrelated-histories" help:"Allow merging unrelated histories"` - Textconv bool `name:"textconv" help:"Convert text to Unicode before merging"` + Textconv bool `name:"textconv" help:"Converting text to Unicode"` Message []string `name:"message" short:"m" help:"Merge commit message (for a non-fast-forward merge)"` File string `name:"file" short:"F" help:"Read message from file"` Signoff bool `name:"signoff" negatable:"" help:"Add a Signed-off-by trailer" default:"false"` diff --git a/pkg/command/command_merge_file.go b/pkg/command/command_merge_file.go index 234dc10..d6c533e 100644 --- a/pkg/command/command_merge_file.go +++ b/pkg/command/command_merge_file.go @@ -17,9 +17,9 @@ type MergeFile struct { ZDiff3 bool `name:"zdiff3" negatable:"" help:"Use a zealous diff3 based merge"` DiffAlgorithm string `name:"diff-algorithm" help:"Choose a diff algorithm, supported: histogram|onp|myers|patience|minimal"` L []string `name:":L" short:"L" help:"Set labels for file1/orig-file/file2"` - F1 string `arg:"" name:"0" help:"file1"` - O string `arg:"" name:"1" help:"orig-file"` - F2 string `arg:"" name:"2" help:"file2"` + F1 string `arg:"" name:"file1" help:""` + O string `arg:"" name:"orig-file" help:""` + F2 string `arg:"" name:"file2" help:""` } const ( diff --git a/pkg/command/command_merge_tree.go b/pkg/command/command_merge_tree.go index 54d797b..1263a88 100644 --- a/pkg/command/command_merge_tree.go +++ b/pkg/command/command_merge_tree.go @@ -15,7 +15,7 @@ type MergeTree struct { MergeBase string `name:"merge-base" help:"Specify a merge-base for the merge"` AllowUnrelatedHistories bool `name:"allow-unrelated-histories" help:"If branches lack common history, merge-tree errors. Use this flag to force merge"` NameOnly bool `name:"name-only" help:"Only output conflict-related file names"` - Textconv bool `name:"textconv" help:"Convert text to Unicode before merging"` + Textconv bool `name:"textconv" help:"Converting text to Unicode"` Z bool `name:":z" short:"z" help:"Terminate entries with NUL byte"` JSON bool `name:"json" help:"Convert conflict results to JSON"` } diff --git a/pkg/command/command_show.go b/pkg/command/command_show.go new file mode 100644 index 0000000..8874554 --- /dev/null +++ b/pkg/command/command_show.go @@ -0,0 +1,84 @@ +package command + +import ( + "context" + "fmt" + "os" + + "github.com/antgroup/hugescm/modules/diferenco" + "github.com/antgroup/hugescm/pkg/zeta" +) + +// merge commit: only show commit metadata +// commit: show commit metadata and diff +// tree: tree path +// tree list +// blob: blob content + +// Show various types of objects +type Show struct { + Textconv bool `name:"textconv" help:"Converting text to Unicode"` + Output string `name:"output" help:"Output to a specific file instead of stdout" placeholder:""` + Histogram bool `name:"histogram" help:"Generate a diff using the \"Histogram diff\" algorithm"` + ONP bool `name:"onp" help:"Generate a diff using the \"O(NP) diff\" algorithm"` + Myers bool `name:"myers" help:"Generate a diff using the \"Myers diff\" algorithm"` + Patience bool `name:"patience" help:"Generate a diff using the \"Patience diff\" algorithm"` + Minimal bool `name:"minimal" help:"Spend extra time to make sure the smallest possible diff is produced"` + DiffAlgorithm string `name:"diff-algorithm" help:"Choose a diff algorithm, supported: histogram|onp|myers|patience|minimal"` + Limit int64 `name:"limit" short:"L" help:"Omits blobs larger than n bytes or units. n may be zero. supported units: KB,MB,GB,K,M,G" default:"-1" type:"size"` + Objects []string `arg:"" optional:"" name:"object" help:""` +} + +const ( + showSummaryFormat = `%szeta show [] ...` +) + +func (c *Show) Summary() string { + return fmt.Sprintf(showSummaryFormat, W("Usage: ")) +} + +func (c *Show) checkAlgorithm() (diferenco.Algorithm, error) { + if len(c.DiffAlgorithm) != 0 { + return diferenco.AlgorithmFromName(c.DiffAlgorithm) + } + switch { + case c.Histogram: + return diferenco.Histogram, nil + case c.ONP: + return diferenco.ONP, nil + case c.Myers: + return diferenco.Myers, nil + case c.Patience: + return diferenco.Patience, nil + case c.Minimal: + return diferenco.Minimal, nil + default: + } + return diferenco.Unspecified, nil +} + +func (c *Show) Run(g *Globals) error { + if len(c.Objects) == 0 { + c.Objects = append(c.Objects, "HEAD") + } + r, err := zeta.Open(context.Background(), &zeta.OpenOptions{ + Worktree: g.CWD, + Values: g.Values, + Verbose: g.Verbose, + }) + if err != nil { + return err + } + defer r.Close() + a, err := c.checkAlgorithm() + if err != nil { + fmt.Fprintf(os.Stderr, "parse options error: %v\n", err) + return err + } + return r.Show(context.Background(), &zeta.ShowOptions{ + Objects: c.Objects, + Textconv: c.Textconv, + Limit: c.Limit, + Algorithm: a, + }) +} diff --git a/pkg/command/msic.go b/pkg/command/msic.go index 555cdec..51ec339 100644 --- a/pkg/command/msic.go +++ b/pkg/command/msic.go @@ -171,10 +171,14 @@ func die(m string) { _, _ = os.Stderr.Write(b.Bytes()) } +func cleanPath(p string) string { + return filepath.ToSlash(filepath.Clean(p)) +} + func slashPaths(paths []string) []string { newPaths := make([]string, 0, len(paths)) for _, p := range paths { - newPaths = append(newPaths, filepath.ToSlash(p)) + newPaths = append(newPaths, cleanPath(p)) } return newPaths } diff --git a/pkg/tr/languages/zh-CN.toml b/pkg/tr/languages/zh-CN.toml index ebad9e1..9b8eafd 100644 --- a/pkg/tr/languages/zh-CN.toml +++ b/pkg/tr/languages/zh-CN.toml @@ -16,7 +16,7 @@ "fatal: " = "致命错误:" "hint: " = "提示:" "warning: " = "警告:" -# Checkout +# checkout "Checkout remote, switch branches, or restore worktree files" = "检出远程,切换分支或还原工作区文件" "Remote url or branch" = "远程 URL 或分支" "Destination for the new repository" = "存储库的保存位置" @@ -40,7 +40,7 @@ "Checkout '%s' success.\n" = "成功检出 '%s'。\n" "Checkout one after another, total: %d\n" = "一个接着另一个的检出文件,总计:%d\n" "The repository filesystem is '%s', which may affect zeta's operation." = "存储库文件系统为 '%s',可能会影响 zeta 运行。" -# Switch +# switch "Switch branches" = "切换分支" "Branch to switch to and start-point" = "要切换的分支和 start-point" "Create a new branch named starting at before switching to the branch" = "在切换到分支之前,从 开始创建一个名为 的新分支" @@ -54,7 +54,7 @@ "Attempt to checkout from remote when branch is absent" = "当分支不存在时,尝试从远程检出" "couldn't find branch '%s', add '--remote' download and switch to this branch" = "找不到分支 '%s',添加 '--remote' 下载并切换到该分支" "missing branch or commit argument" = "缺少分支或提交参数" -# Fetch +# fetch "Download objects and reference from remote" = "从远程下载对象和引用" "Reference or commit to be downloaded" = "待下载的引用或者提交" "Get complete history" = "获取完整的历史" @@ -76,7 +76,7 @@ "non-fast-forward" = "非快进" "fetch missing object error: %v" = "拉取缺失对象错误:%v" "fetch target '%s' error: %v" = "拉取目标 '%s' 错误:%v" -# Pull +# pull "Fetch from and integrate with remote" = "从远程获取并与其集成" "Specifies which branch to fetch and update" = "指定要获取和更新的分支" "Allow fast-forward" = "允许快进" @@ -89,7 +89,7 @@ "Please enter a commit message to explain why this merge is necessary," = "请输入一个提交信息以解释此合并的必要性,尤其是将一个更新后的上游分支" "especially if it merges an updated upstream into a topic branch." = "合并到主题分支。" "Lines starting with '%c' will be ignored, and an empty message aborts." = "以 '%c' 开始的行将被忽略,而空的提交说明将终止提交。" -# Commit +# commit "Record changes to the repository" = "记录对存储库的更改" "Use the given as the commit message. Concatenate multiple -m options as separate paragraphs" = "使用给定的作为提交说明。多个 -m 选项的值会作为独立段落合并" "Take the commit message from the given file. Use - to read the message from the standard input" = "从给定文件中获取提交消息。 使用 - 从标准输入读取消息" @@ -99,7 +99,7 @@ "Replace the tip of the current branch by creating a new commit" = "通过创建新的提交来替换当前分支的提示" "Aborting commit due to empty commit message." = "终止提交因为提交说明为空。" "Please enter the commit message for your changes. Lines starting\nwith '%c' will be ignored, and an empty message aborts the commit." = "请为您的变更输入提交说明。以 '%c' 开始的行将被忽略,而一个空的提交\n说明将会终止提交。" -# Push +# push "Update remote refs along with associated objects" = "更新远程引用以及关联的对象" "Option to transmit" = "传输选项" "Update remote tag reference" = "更新远程标签引用" @@ -112,7 +112,7 @@ "upload large objects error: %v" = "上传大对象错误:%v" "Push failed: %v" = "推送失败:%v" "parse report error: %v" = "解析报告错误:%v" -# Branch +# branch "List, create, or delete branches" = "列出、创建或删除分支" "Get and set repository or global options" = "获取和设置存储库或全局选项" "Provide contents or details of repository objects" = "提供存储库对象的内容或类型和大小信息" @@ -131,15 +131,16 @@ "cannot delete branch '%s' used by worktree at '%s'" = "无法强制更新被工作区 '%[2]s' 所使用的分支 '%[1]s'" "Branch '%s' has been moved to '%s'\n" = "已经将分支 '%s' 移动到 '%s'\n" "Deleted branch %s (was %s).\n" = "已删除分支 %s(曾为 %s)。\n" -# Cat +# cat "The name of the object to show" = "要显示的对象的名称。" "Show object type" = "显示对象的类型" "Show object size" = "显示对象的大小" "Verify object hash" = "验证对象的哈希" "Blob output size limit; use 0 for unlimited. Units: KB, MB, GB, K, M, G" = "输出 blob 的大小限制;设置为 0 无限制。支持单位:KB, MB, GB, K, M, G" "Returns data as JSON; limited to commits, trees, fragments, and tags" = "仅提交、树、片段、标签数据以 JSON 格式返回" -"Output text as Unicode; blobs only" = "输出文本转为 Unicode,仅限 blobs" -# Config +"Converting text to Unicode" = "将文本转为 Unicode" +"View files directly" = "直接查看文件" +# config "Name and value, support: appears in pairs or , eg: zeta config K1=V1 K2=V2" = "名称和值,支持: 这样成对出现或者 ,举例:zeta config K1=V1 K2=V2" "Use system config file" = "使用系统级配置文件" "Only read or write to global ~/.zeta.toml" = "只读取或写入全局配置 ~/.zeta.toml" @@ -152,7 +153,7 @@ "Terminate values with NUL byte" = "终止值是 NUL 字节" "zeta config will ensure that any input or output is valid under the given type constraint(s), support: bool, int, float, date" = "zeta 配置将确保任何输入或输出在给定类型约束下有效, 支持类型: bool, int, float, date" "only one config file at a time" = "一次只能有一个配置文件" -# Tag +# tag "List, create, or delete tags" = "列出、创建或删除标签" "Annotated tag, needs a message" = "附注标签,需要一个说明" "Take the tag message from the given file. Use - to read the message from the standard input" = "从给定文件中获取标签消息。 使用 - 从标准输入读取消息" @@ -166,12 +167,12 @@ "no tag message?" = "无标签说明?" "Write a message for tag:" = "输入一个标签说明:" "Lines starting with '%c' will be ignored." = "以 '%c' 开头的行将被忽略。" -# Log +# log "Show commit logs" = "显示提交日志" "Show the working tree status" = "显示工作树状态" "Give the output in the short-format" = "以短格式给出输出" "Revision range" = "版本范围" -# Status +# status "(use \"zeta restore --staged ...\" to unstage)" = "(使用 \"zeta restore --staged <文件>...\" 以取消暂存)" "Changes not staged for commit" = "尚未暂存以备提交的变更" "(use \"zeta add ...\" to update what will be committed)" = "(使用 \"zeta add <文件>...\" 更新要提交的内容)" @@ -192,7 +193,7 @@ "On branch" = "位于分支" "HEAD detached at" = "头指针分离于" "Changes to be committed:" = "要提交的变更:" -# Version +# version "Display version information" = "显示版本信息" "Also print build options" = "还打印构建选项" "Run \"%s --help\" for more information." = "运行 \"%s --help\" 以获取更多信息。" @@ -207,7 +208,7 @@ "param '%s' must be either -x or +x" = "参数取值 '%s' 必须是 -x 或 +x" "Nothing specified, nothing added." = "没有指定文件,也没有文件被添加。" "hint: Maybe you wanted to say 'zeta add .'?" = "提示:也许您想要执行 'zeta add .'?" -# GC +# gc "Cleanup unnecessary files and optimize the local repository" = "清除不必要的文件和优化本地仓库" "Pack %s objects: loose object %d packed objects %d\n" = "打包 %s 对象:松散对象 %d 打包对象 %d\n" "Pack %s objects: no smaller loose object, skipping packing.\n" = "打包 %s 对象:无较小松散对象,跳过打包。\n" @@ -216,7 +217,7 @@ "completed" = "完成" "Removed duplicate packages: %d, duplicate objects: %d empty dirs: %d\n" = "已删除重复的包:%d 重复对象:%d 空目录:%d\n" "Pruning objects older than specified date (default is 2 weeks ago, configurable with gc.pruneExpire)" = "清理早于指定日期的孤立对象(默认为 2 周前,可通过 gc.pruneExpire 配置)" -# Restore +# restore "Restore files" = "恢复文件" "Restore files completed" = "恢复文件完成" "Which tree-ish to checkout from" = "要检出哪一个树" @@ -227,7 +228,7 @@ "SYNOPSIS" = "概要" "Specify restore location. By default, restores working tree. Use --staged for index only, or both for both." = "指定恢复位置。默认情况下,恢复工作树。使用 --staged 仅恢复索引,或同时指定两者以恢复两者。" "you must specify path(s) to restore" = "您必须指定要恢复的路径" -# Reset +# reset "Reset current HEAD to the specified state" = "将当前 HEAD 重置为指定状态" "Reset HEAD and index" = "重置 HEAD 和索引" "Reset only HEAD" = "仅重置 HEAD" @@ -238,7 +239,7 @@ "Unstaged changes after reset:" = "重置后取消暂存的变更:" "Fetch missing objects" = "获取丢失的对象" "is now at" = "现在位于" -# Clean +# clean "Remove untracked files from the working tree" = "从工作树中移除未跟踪的文件" "Remove whole directories" = "删除整个目录" "Remove ignored files, too" = "也删除忽略的文件" @@ -247,7 +248,7 @@ "Would remove" = "将删除" "Removing" = "正删除" "refusing to clean, please specify at least -f or -n" = "拒绝 clean,请至少指定 -f 或者 -n" -# LS-Tree +# ls-tree "List the contents of a tree object" = "列出树对象的内容" "Only show trees" = "只显示树" "Recurse into subtrees" = "递归到子树" @@ -258,7 +259,7 @@ "Use digits to display object names" = "用 位数字显示对象名" "ID of a tree-ish" = "ID 或者 tree 对象哈希" "Given paths, show as match patterns; else, use root as sole argument" = "有路径时,显示为匹配模式;否则,使用根目录作为唯一路径参数" -# Diff +# diff "Show changes between commits, commit and working tree, etc" = "显示提交之间的更改、提交和工作树等" "Compares two given paths on the filesystem" = "比较文件系统上给定的两个路径" "Show only names of changed files" = "仅显示已更改文件的名称" @@ -269,7 +270,6 @@ "Output diff-raw with lines terminated with NUL" = "输出 diff-raw,行以 NUL 结尾" "Compare the differences between the staging area and " = "比较暂存区和 之间的差异" "If --merge-base is given, use the common ancestor of and HEAD instead" = "如果给定 --merge-base,则使用 与 HEAD 的共同祖先" -"Convert text to Unicode and compare differences" = "将文本转变为 Unicode 然后再比较差异" "Output to a specific file instead of stdout" = "输出到特定文件而不是 stdout" "Generate a diff using the \"Histogram diff\" algorithm" = "使用 \"Histogram diff\" 算法生成差异" "Generate a diff using the \"O(NP) diff\" algorithm" = "使用 \"O(NP) diff\" 算法生成差异" @@ -277,13 +277,13 @@ "Generate a diff using the \"Patience diff\" algorithm" = "使用 \"Patience diff\" 算法生成差异" "Choose a diff algorithm, supported: histogram|onp|myers|patience|minimal" = "选择一个 diff 算法,支持:histogram|onp|myers|patience|minimal" "Spend extra time to make sure the smallest possible diff is produced" = "花费额外的时间来确保产生尽可能最小的差异" -# RM +# rm "Remove files from the working tree and from the index" = "从工作树和索引中删除文件" "Override the up-to-date check" = "忽略文件更新状态检查" "Do not list removed files" = "不列出删除的文件" "Only remove from the index" = "只从索引区删除" "Allow recursive removal" = "允许递归删除" -# Merge +# merge "Join two development histories together" = "将两段发展史连在一起" "Merge specific revision into HEAD" = "将特定的版本合并到 HEAD" "Create a single commit instead of doing a merge" = "创建一个单独的提交而不是做一次合并" @@ -299,19 +299,18 @@ "Updating" = "更新" "refusing to merge unrelated histories" = "拒绝合并无关的历史" "No merge message -- not updating HEAD" = "无合并信息 -- 未更新 HEAD" -# Rebase +# rebase "Reapply commits on top of another base tip" = "在另一个 base 之上重新应用提交" "Rebase onto given branch" = "变基到给定的分支" "Abort and checkout the original branch" = "终止并检出原有分支" "Continue" = "继续" "Successfully rebased and updated %s.\n" = "成功变基并更新 %s。\n" "cannot rebase: You have unstaged changes." = "不能变基:您有未暂存的变更。" -# Merge-tree +# merge-tree "Perform merge without touching index or working tree" = "执行合并而不触及索引和工作区" "Specify a merge-base for the merge" = "指定用于合并的合并基线" "If branches lack common history, merge-tree errors. Use this flag to force merge" = "如果分支无共同历史,合并会失败,这个标志用来绕过这个限制" "Only output conflict-related file names" = "仅输出冲突相关的文件名" -"Convert text to Unicode before merging" = "将文本转变为 Unicode 然后再执行合并" "Convert conflict results to JSON" = "将冲突结果转换为 JSON" "Auto-merging %s" = "自动合并 %s" "warning: Cannot merge binary files: %s (%s vs. %s)" = "警告: 无法合并二进制文件: %s (%s vs. %s)" @@ -322,7 +321,7 @@ "CONFLICT (modify/delete): %s deleted in %s and modified in %s." = "冲突(修改/删除):%s 在 %s 中被删除,在 %s 中被修改。" "content" = "内容" "add/add" = "添加/添加" -# Stash +# stash "Stash the changes in a dirty working directory away" = "将脏工作目录中的更改暂存起来" "Stash local changes and revert to HEAD" = "暂存本地更改并恢复到 HEAD" "List the stash entries that you currently have" = "列出您当前拥有的暂存条目" @@ -388,6 +387,8 @@ "Use a diff3 based merge" = "使用基于 diff3 的合并" "Use a zealous diff3 based merge" = "使用基于狂热 diff3(zealous diff3)的合并" "Set labels for file1/orig-file/file2" = "为 文件1/初始文件/文件2 设置标签" +# show +"Show various types of objects" = "显示各种类型的对象" # Others "WARNING" = "警告" "not zeta repository" = "不是 zeta 存储库" diff --git a/pkg/zeta/blame.go b/pkg/zeta/blame.go new file mode 100644 index 0000000..cdc8a88 --- /dev/null +++ b/pkg/zeta/blame.go @@ -0,0 +1,573 @@ +// Copyright 2018 Sourced Technologies, S.L. +// SPDX-License-Identifier: Apache-2.0 + +package zeta + +import ( + "bytes" + "container/heap" + "context" + "errors" + "fmt" + "io" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/antgroup/hugescm/modules/diferenco" + "github.com/antgroup/hugescm/modules/plumbing" + "github.com/antgroup/hugescm/modules/zeta/object" +) + +// BlameResult represents the result of a Blame operation. +type BlameResult struct { + // Path is the path of the File that we're blaming. + Path string + // Rev (Revision) is the hash of the specified Commit used to generate this result. + Rev plumbing.Hash + // Lines contains every line with its authorship. + Lines []*Line +} + +func contentLines(content string) []string { + splits := strings.Split(content, "\n") + // remove the last line if it is empty + if splits[len(splits)-1] == "" { + return splits[:len(splits)-1] + } + return splits +} + +// Blame returns a BlameResult with the information about the last author of +// each line from file `path` at commit `c`. +func Blame(ctx context.Context, c *object.Commit, path string) (*BlameResult, error) { + // The file to blame is identified by the input arguments: + // commit and path. commit is a Commit object obtained from a Repository. Path + // represents a path to a specific file contained in the repository. + // + // Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified. + // + // When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its + // parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its + // parents then it will assign the change to itself. + // + // When encountering 2 parents that have made the same change to a file it will choose the parent that was merged + // into the current branch first (this is determined by the order of the parents inside the commit). + // + // This currently works on a line by line basis, if performance becomes an issue it could be changed to work with + // hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary. + + b := new(blame) + b.fRev = c + b.path = path + b.q = new(priorityQueue) + + file, err := b.fRev.File(ctx, path) + if err != nil { + return nil, err + } + contents, err := file.UnifiedText(ctx, false) + if err != nil { + return nil, err + } + finalLines := contentLines(contents) + finalLength := len(finalLines) + + needsMap := make([]lineMap, finalLength) + for i := range needsMap { + needsMap[i] = lineMap{i, i, nil, -1} + } + b.q.Push(&queueItem{ + nil, + nil, + c, + path, + contents, + needsMap, + 0, + false, + 0, + }) + items := make([]*queueItem, 0) + for { + items = items[:0] + for { + if b.q.Len() == 0 { + return nil, errors.New("invalid state: no items left on the blame queue") + } + item := b.q.Pop() + items = append(items, item) + next := b.q.Peek() + if next == nil || next.Hash != item.Commit.Hash { + break + } + } + finished, err := b.addBlames(ctx, items) + if err != nil { + return nil, err + } + if finished { + break + } + } + + b.lineToCommit = make([]*object.Commit, finalLength) + for i := range needsMap { + b.lineToCommit[i] = needsMap[i].Commit + } + + lines, err := newLines(finalLines, b.lineToCommit) + if err != nil { + return nil, err + } + + return &BlameResult{ + Path: path, + Rev: c.Hash, + Lines: lines, + }, nil +} + +// Line values represent the contents and author of a line in BlamedResult values. +type Line struct { + // Author is the email address of the last author that modified the line. + Author string + // AuthorName is the name of the last author that modified the line. + AuthorName string + // Text is the original text of the line. + Text string + // Date is when the original text of the line was introduced + Date time.Time + // Hash is the commit hash that introduced the original line + Hash plumbing.Hash +} + +func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line { + return &Line{ + Author: author, + AuthorName: authorName, + Text: text, + Hash: hash, + Date: date, + } +} + +func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { + result := make([]*Line, 0, len(contents)) + for i := range contents { + result = append(result, newLine( + commits[i].Author.Email, commits[i].Author.Name, contents[i], + commits[i].Author.When, commits[i].Hash, + )) + } + + return result, nil +} + +// this struct is internally used by the blame function to hold its +// inputs, outputs and state. +type blame struct { + // the path of the file to blame + path string + // the commit of the final revision of the file to blame + fRev *object.Commit + // resolved lines + lineToCommit []*object.Commit + // queue of commits that need resolving + q *priorityQueue +} + +type lineMap struct { + Orig, Cur int + Commit *object.Commit + FromParentNo int +} + +func (b *blame) addBlames(ctx context.Context, curItems []*queueItem) (bool, error) { + curItem := curItems[0] + + // Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates + // not only if they are all the same. + if len(curItems) == 1 { + curItems = nil + } else if curItem.IdenticalToChild { + allSame := true + lenCurItems := len(curItems) + lowestParentNo := curItem.ParentNo + for i := 1; i < lenCurItems; i++ { + if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child { + allSame = false + break + } + lowestParentNo = min(lowestParentNo, curItems[i].ParentNo) + } + if allSame { + curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1 + curItems = nil // free the memory + curItem.ParentNo = lowestParentNo + + // Now check if we can remove the parent completely + for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 { + oldChild := curItem.Child + curItem.Child = oldChild.Child + curItem.ParentNo = oldChild.ParentNo + } + } + } + + // if we have more than 1 item for this commit, create a single needsMap + if len(curItems) > 1 { + curItem.MergedChildren = make([]childToNeedsMap, len(curItems)) + for i, c := range curItems { + curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo} + } + newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap)) + newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...) + + for i := 1; i < len(curItems); i++ { + cur := curItems[i].NeedsMap + n := 0 // position in newNeedsMap + c := 0 // position in current list + for c < len(cur) { + if n == len(newNeedsMap) { + newNeedsMap = append(newNeedsMap, cur[c:]...) + break + } else if newNeedsMap[n].Cur == cur[c].Cur { + n++ + c++ + } else if newNeedsMap[n].Cur < cur[c].Cur { + n++ + } else { + newNeedsMap = append(newNeedsMap, cur[c]) + newPos := len(newNeedsMap) - 1 + for newPos > n { + newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1] + newPos-- + } + } + } + } + curItem.NeedsMap = newNeedsMap + curItem.IdenticalToChild = false + curItem.Child = nil + curItems = nil // free the memory + } + + parents, err := parentsContainingPath(ctx, curItem.path, curItem.Commit) + if err != nil { + return false, err + } + + anyPushed := false + for parnetNo, prev := range parents { + currentHash, err := blobHash(ctx, curItem.path, curItem.Commit) + if err != nil { + return false, err + } + prevHash, err := blobHash(ctx, prev.Path, prev.Commit) + if err != nil { + return false, err + } + if currentHash == prevHash { + if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild { + // commit that has 1 parent and 1 child and is the same as both, bypass it completely + b.q.Push(&queueItem{ + Child: curItem.Child, + Commit: prev.Commit, + path: prev.Path, + Contents: curItem.Contents, + NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item + IdenticalToChild: true, + ParentNo: curItem.ParentNo, + }) + } else { + b.q.Push(&queueItem{ + Child: curItem, + Commit: prev.Commit, + path: prev.Path, + Contents: curItem.Contents, + NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy + IdenticalToChild: true, + ParentNo: parnetNo, + }) + curItem.numParentsNeedResolving++ + } + anyPushed = true + continue + } + + // get the contents of the file + file, err := prev.Commit.File(ctx, prev.Path) + if err != nil { + return false, err + } + prevContents, err := file.UnifiedText(ctx, false) + if err != nil { + return false, err + } + u, err := diferenco.DoUnified(ctx, &diferenco.Options{ + S1: prevContents, + S2: curItem.Contents, + }) + if err != nil { + return false, err + } + prevl := -1 + curl := -1 + need := 0 + getFromParent := make([]lineMap, 0) + out: + for _, h := range u.Hunks { + for hl := 0; hl < len(h.Lines); hl++ { + switch h.Lines[hl].Kind { + case diferenco.Equal: + prevl++ + curl++ + if curl == curItem.NeedsMap[need].Cur { + // add to needs + getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1}) + // move to next need + need++ + if need >= len(curItem.NeedsMap) { + break out + } + } + case diferenco.Insert: + curl++ + if curl == curItem.NeedsMap[need].Cur { + // the line we want is added, it may have been added here (or by another parent), skip it for now + need++ + if need >= len(curItem.NeedsMap) { + break out + } + } + case diferenco.Delete: + prevl++ + continue out + default: + return false, errors.New("invalid state: invalid hunk Type") + } + } + } + + if len(getFromParent) > 0 { + b.q.Push(&queueItem{ + curItem, + nil, + prev.Commit, + prev.Path, + prevContents, + getFromParent, + 0, + false, + parnetNo, + }) + curItem.numParentsNeedResolving++ + anyPushed = true + } + } + + curItem.Contents = "" // no longer need, free the memory + + if !anyPushed { + return finishNeeds(curItem) + } + + return false, nil +} + +func finishNeeds(curItem *queueItem) (bool, error) { + // any needs left in the needsMap must have come from this revision + for i := range curItem.NeedsMap { + if curItem.NeedsMap[i].Commit == nil { + curItem.NeedsMap[i].Commit = curItem.Commit + curItem.NeedsMap[i].FromParentNo = -1 + } + } + + if curItem.Child == nil && curItem.MergedChildren == nil { + return true, nil + } + + if curItem.MergedChildren == nil { + return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo) + } + + for _, ctn := range curItem.MergedChildren { + m := 0 // position in merged needs map + p := 0 // position in parent needs map + for p < len(ctn.NeedsMap) { + if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur { + ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit + m++ + p++ + } else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur { + p++ + } else { + m++ + } + } + finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo) + if finished || err != nil { + return finished, err + } + } + + return false, nil +} + +func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) { + if identicalToChild { + for i := range child.NeedsMap { + l := &child.NeedsMap[i] + if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig { + return false, errors.New("needsMap isn't the same? Why not??") + } + if l.Commit == nil || parentNo < l.FromParentNo { + l.Commit = needsMap[i].Commit + l.FromParentNo = parentNo + } + } + } else { + i := 0 + out: + for j := range child.NeedsMap { + l := &child.NeedsMap[j] + for needsMap[i].Orig < l.Cur { + i++ + if i == len(needsMap) { + break out + } + } + if l.Cur == needsMap[i].Orig { + if l.Commit == nil || parentNo < l.FromParentNo { + l.Commit = needsMap[i].Commit + l.FromParentNo = parentNo + } + } + } + } + child.numParentsNeedResolving-- + if child.numParentsNeedResolving == 0 { + finished, err := finishNeeds(child) + if finished || err != nil { + return finished, err + } + } + + return false, nil +} + +// String prints the results of a Blame using git-blame's style. +func (b BlameResult) String() string { + var buf bytes.Buffer + + // max line number length + mlnl := len(strconv.Itoa(len(b.Lines))) + // max author length + mal := b.maxAuthorLength() + format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl) + + for ln := range b.Lines { + _, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8], + b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text) + } + return buf.String() +} + +// utility function to calculate the number of runes needed +// to print the longest author name in the blame of a file. +func (b BlameResult) maxAuthorLength() int { + m := 0 + for ln := range b.Lines { + m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName)) + } + return m +} + +type childToNeedsMap struct { + Child *queueItem + NeedsMap []lineMap + IdenticalToChild bool + ParentNo int +} + +type queueItem struct { + Child *queueItem + MergedChildren []childToNeedsMap + Commit *object.Commit + path string + Contents string + NeedsMap []lineMap + numParentsNeedResolving int + IdenticalToChild bool + ParentNo int +} + +type priorityQueueImp []*queueItem + +func (pq *priorityQueueImp) Len() int { return len(*pq) } +func (pq *priorityQueueImp) Less(i, j int) bool { + return !(*pq)[i].Commit.Less((*pq)[j].Commit) +} +func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] } +func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) } +func (pq *priorityQueueImp) Pop() any { + n := len(*pq) + ret := (*pq)[n-1] + (*pq)[n-1] = nil // ovoid memory leak + *pq = (*pq)[0 : n-1] + + return ret +} +func (pq *priorityQueueImp) Peek() *object.Commit { + if len(*pq) == 0 { + return nil + } + return (*pq)[0].Commit +} + +type priorityQueue priorityQueueImp + +func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) } +func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() } +func (pq *priorityQueue) Push(c *queueItem) { + heap.Push((*priorityQueueImp)(pq), c) +} +func (pq *priorityQueue) Pop() *queueItem { + return heap.Pop((*priorityQueueImp)(pq)).(*queueItem) +} +func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() } + +type parentCommit struct { + Commit *object.Commit + Path string +} + +func parentsContainingPath(ctx context.Context, path string, c *object.Commit) ([]parentCommit, error) { + // TODO: benchmark this method making git.object.Commit.parent public instead of using + // an iterator + var result []parentCommit + iter := c.MakeParents() + for { + parent, err := iter.Next(ctx) + if err == io.EOF { + return result, nil + } + if err != nil { + return nil, err + } + if _, err := parent.File(ctx, path); err == nil { + result = append(result, parentCommit{parent, path}) + } + } +} + +func blobHash(ctx context.Context, path string, commit *object.Commit) (plumbing.Hash, error) { + file, err := commit.File(ctx, path) + if err != nil { + return plumbing.ZeroHash, err + } + return file.Hash, nil +} diff --git a/pkg/zeta/blame_test.go b/pkg/zeta/blame_test.go new file mode 100644 index 0000000..98e8711 --- /dev/null +++ b/pkg/zeta/blame_test.go @@ -0,0 +1,34 @@ +package zeta + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/antgroup/hugescm/modules/plumbing" +) + +func TestBlame(t *testing.T) { + r, err := Open(context.Background(), &OpenOptions{ + Worktree: "/private/tmp/hugescm-dev", + }) + if err != nil { + fmt.Fprintf(os.Stderr, "open repo error: %v\n", err) + return + } + defer r.Close() + cc, err := r.odb.Commit(context.Background(), plumbing.NewHash("4b2982c5c8835dfc3c1a8d0eddca9100e1aee1b7e7b9da44160bc9de99aa0b77")) + if err != nil { + fmt.Fprintf(os.Stderr, "open commit error: %v\n", err) + return + } + b, err := Blame(context.Background(), cc, "pkg/zeta/worktree_diff.go") + if err != nil { + fmt.Fprintf(os.Stderr, "open commit error: %v\n", err) + return + } + for _, line := range b.Lines { + fmt.Fprintf(os.Stderr, "%s %s %s\n", line.Author, line.Date, line.Text) + } +} diff --git a/pkg/zeta/cat.go b/pkg/zeta/cat.go index 7e9d563..d8a1710 100644 --- a/pkg/zeta/cat.go +++ b/pkg/zeta/cat.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "io" - "math" "os" "strings" @@ -19,14 +18,38 @@ import ( "github.com/antgroup/hugescm/modules/zeta/object" ) +const ( + MAX_SHOW_BINARY_BLOB = 10<<20 - 8 +) + type CatOptions struct { - Hash string - SizeMax int64 // blob limit size - Type bool // object type - DisplaySize bool - Textconv bool - FormatJSON bool - Verify bool + Object string + Limit int64 // blob limit size + Type bool // object type + PrintSize bool + PrintJSON bool + Verify bool + Textconv bool + Direct bool + Output string +} + +func (opts *CatOptions) Println(a ...any) error { + fd, _, err := opts.NewFD() + if err != nil { + return err + } + defer fd.Close() + _, err = fmt.Fprintln(fd, a...) + return err +} + +func (opts *CatOptions) NewFD() (io.WriteCloser, bool, error) { + if len(opts.Output) == 0 { + return &NopWriteCloser{Writer: os.Stdout}, IsTerminal(os.Stdout.Fd()), nil + } + fd, err := os.Create(opts.Output) + return fd, false, err } func catShowError(oid string, err error) error { @@ -41,18 +64,18 @@ func catShowError(oid string, err error) error { return err } -func (r *Repository) catMissingObject(ctx context.Context, oid plumbing.Hash) (*object.Blob, error) { - b, err := r.odb.Blob(ctx, oid) - if err == nil { - return b, nil - } - if !plumbing.IsNoSuchObject(err) { - return nil, err +func (r *Repository) fetchMissingBlob(ctx context.Context, oid plumbing.Hash) error { + if r.odb.Exists(oid, false) { + return nil } if !r.promisorEnabled() { - return nil, err + return plumbing.NoSuchObject(oid) } - if err = r.promiseMissingFetch(ctx, oid); err != nil { + return r.promiseMissingFetch(ctx, oid) +} + +func (r *Repository) catMissingObject(ctx context.Context, oid plumbing.Hash) (*object.Blob, error) { + if err := r.fetchMissingBlob(ctx, oid); err != nil { return nil, err } return r.odb.Blob(ctx, oid) @@ -64,98 +87,145 @@ func objectSize(a object.Encoder) int { return b.Len() } -func (r *Repository) showSize(ctx context.Context, oid plumbing.Hash) (err error) { +func (r *Repository) printSize(ctx context.Context, opts *CatOptions, oid plumbing.Hash) error { var a any + var err error if a, err = r.odb.Object(ctx, oid); err == nil { - if v, ok := a.(object.Encoder); ok { - fmt.Fprintf(os.Stdout, "%d\n", objectSize(v)) - return + if v, ok := a.(object.Encoder); !ok { + return opts.Println(objectSize(v)) } // unreachable - return + return nil } if !plumbing.IsNoSuchObject(err) { fmt.Fprintf(os.Stderr, "cat-file: resolve object '%s' error: %v\n", oid, err) - return + return err } var b *object.Blob if b, err = r.catMissingObject(ctx, oid); err != nil { return catShowError(oid.String(), err) } defer b.Close() - fmt.Fprintf(os.Stdout, "%d\n", b.Size) + return opts.Println(b.Size) +} + +func (r *Repository) printType(ctx context.Context, opts *CatOptions, oid plumbing.Hash) error { + a, err := r.odb.Object(ctx, oid) + if plumbing.IsNoSuchObject(err) { + if err := r.fetchMissingBlob(ctx, oid); err == nil { + return opts.Println("blob") + } + } + if err != nil { + return catShowError(oid.String(), err) + } + switch a.(type) { + case *object.Commit: + return opts.Println("commit") + case *object.Tag: + return opts.Println("tag") + case *object.Tree: + return opts.Println("tree") + case *object.Fragments: + return opts.Println("fragments") + } return nil } -func (r *Repository) catBlob(ctx context.Context, w io.Writer, oid plumbing.Hash, n int64, textconv, verify bool) error { +const ( + binaryTruncated = "*** Binary truncated ***" +) + +func (r *Repository) catBlob(ctx context.Context, opts *CatOptions, oid plumbing.Hash) error { if oid == backend.BLANK_BLOB_HASH { return nil // empty blob, skip } - if n <= 0 { - n = math.MaxInt64 - } b, err := r.catMissingObject(ctx, oid) if err != nil { return err } defer b.Close() - if verify { + fd, outTerm, err := opts.NewFD() + if err != nil { + return err + } + if opts.Verify { h := plumbing.NewHasher() if _, err := io.Copy(h, b.Contents); err != nil { return err } - fmt.Fprintln(os.Stdout, h.Sum()) + fmt.Fprintln(fd, h.Sum()) return nil } - reader := b.Contents - if textconv { - if reader, err = diferenco.NewUnifiedReader(b.Contents); err != nil { - return err + reader, charset, err := diferenco.NewUnifiedReaderEx(b.Contents, opts.Textconv) + if err != nil { + return err + } + if opts.Limit < 0 { + opts.Limit = b.Size + } + if outTerm && charset == diferenco.BINARY { + if opts.Limit > MAX_SHOW_BINARY_BLOB { + reader = io.MultiReader(io.LimitReader(reader, MAX_SHOW_BINARY_BLOB), strings.NewReader(binaryTruncated)) + opts.Limit = int64(MAX_SHOW_BINARY_BLOB + len(binaryTruncated)) } + return processColor(reader, fd, opts.Limit) } - if _, err = io.Copy(w, io.LimitReader(reader, n)); err != nil { + if _, err = io.Copy(fd, io.LimitReader(reader, opts.Limit)); err != nil { return err } return nil } -func (r *Repository) showType(ctx context.Context, oid plumbing.Hash) error { - a, err := r.odb.Object(ctx, oid) - if plumbing.IsNoSuchObject(err) { - b, err := r.catMissingObject(ctx, oid) +func (r *Repository) catFragments(ctx context.Context, opts *CatOptions, ff *object.Fragments) error { + fd, outTerm, err := opts.NewFD() + if err != nil { + return err + } + defer fd.Close() + objects := make([]*object.Blob, 0, len(ff.Entries)) + defer func() { + for _, o := range objects { + _ = o.Close() + } + }() + readers := make([]io.Reader, 0, len(ff.Entries)) + for _, e := range ff.Entries { + o, err := r.catMissingObject(ctx, e.Hash) if err != nil { - return catShowError(oid.String(), err) + return err } - defer b.Close() - fmt.Fprintln(os.Stdout, "blob") - return nil + objects = append(objects, o) + readers = append(readers, o.Contents) } - if err != nil { - return catShowError(oid.String(), err) + if opts.Limit < 0 { + opts.Limit = int64(ff.Size) } - switch a.(type) { - case *object.Commit: - fmt.Fprintln(os.Stdout, "commit") - case *object.Tag: - fmt.Fprintln(os.Stdout, "tag") - case *object.Tree: - fmt.Fprintln(os.Stdout, "tree") - case *object.Fragments: - fmt.Fprintln(os.Stdout, "fragments") + // fragments ignore --textconv + reader := io.MultiReader(readers...) + if outTerm { + if opts.Limit > MAX_SHOW_BINARY_BLOB { + reader = io.MultiReader(io.LimitReader(reader, MAX_SHOW_BINARY_BLOB), strings.NewReader(binaryTruncated)) + opts.Limit = int64(MAX_SHOW_BINARY_BLOB + len(binaryTruncated)) + } + return processColor(reader, fd, opts.Limit) + } + if _, err = io.Copy(fd, io.LimitReader(reader, opts.Limit)); err != nil { + return err } return nil } func (r *Repository) catObject(ctx context.Context, opts *CatOptions, oid plumbing.Hash) error { - if opts.DisplaySize { - return r.showSize(ctx, oid) + if opts.PrintSize { + return r.printSize(ctx, opts, oid) } if opts.Type { - return r.showType(ctx, oid) + return r.printType(ctx, opts, oid) } a, err := r.odb.Object(ctx, oid) if plumbing.IsNoSuchObject(err) { - return catShowError(oid.String(), r.catBlob(ctx, os.Stdout, oid, opts.SizeMax, opts.Textconv, opts.Verify)) + return catShowError(oid.String(), r.catBlob(ctx, opts, oid)) } if err != nil { return catShowError(oid.String(), err) @@ -168,11 +238,27 @@ func (r *Repository) catObject(ctx context.Context, opts *CatOptions, oid plumbi } return nil } - if opts.FormatJSON { - return json.NewEncoder(os.Stdout).Encode(a) + if opts.PrintJSON { + fd, _, err := opts.NewFD() + if err != nil { + return err + } + defer fd.Close() + return json.NewEncoder(fd).Encode(a) + } + if opts.Direct { + // only fragments support direct read + if ff, ok := a.(*object.Fragments); ok { + return r.catFragments(ctx, opts, ff) + } } if w, ok := a.(object.Printer); ok { - _ = w.Pretty(os.Stdout) + fd, _, err := opts.NewFD() + if err != nil { + return err + } + defer fd.Close() + _ = w.Pretty(fd) } return nil } @@ -187,7 +273,7 @@ func (r *Repository) catBranchOrTag(ctx context.Context, opts *CatOptions, branc } func (r *Repository) Cat(ctx context.Context, opts *CatOptions) error { - k, v, ok := strings.Cut(opts.Hash, ":") + k, v, ok := strings.Cut(opts.Object, ":") if !ok { return r.catBranchOrTag(ctx, opts, k) } diff --git a/pkg/zeta/hastyhex.go b/pkg/zeta/hastyhex.go new file mode 100644 index 0000000..f13bf19 --- /dev/null +++ b/pkg/zeta/hastyhex.go @@ -0,0 +1,117 @@ +package zeta + +import ( + "io" + "math" +) + +const ( + CN byte = 0x37 /* null */ + CS byte = 0x92 /* space */ + CP byte = 0x96 /* print */ + CC byte = 0x95 /* control */ + CH byte = 0x93 /* high */ + colorTemplate = "00000000 " + + "\x1b[XXm## \x1b[XXm## \x1b[XXm## \x1b[XXm## " + + "\x1b[XXm## \x1b[XXm## \x1b[XXm## \x1b[XXm## " + + "\x1b[XXm## \x1b[XXm## \x1b[XXm## \x1b[XXm## " + + "\x1b[XXm## \x1b[XXm## \x1b[XXm## \x1b[XXm## " + + "\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm." + + "\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm.\x1b[XXm." + + "\x1b[0m\n" +) + +var ( + hex = []byte("0123456789abcdef") + table = []byte{ + CN, CC, CC, CC, CC, CC, CC, CC, CC, CC, CS, CS, CS, CS, CC, CC, CC, CC, CC, CC, CC, CC, CC, CC, CC, CC, + CC, CC, CC, CC, CC, CC, CS, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, + CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, + CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, + CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CP, CC, CH, CH, + CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, + CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, + CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, + CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, + CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, CH, + } + displayTable = []byte{ + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, + 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, + 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, + 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, + 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + } + slots = []int{ /* ANSI-color, hex, ANSI-color, ASCII */ + 12, 15, 142, 145, 20, 23, 148, 151, 28, 31, 154, 157, 36, 39, 160, 163, + 44, 47, 166, 169, 52, 55, 172, 175, 60, 63, 178, 181, 68, 71, 184, 187, + 77, 80, 190, 193, 85, 88, 196, 199, 93, 96, 202, 205, 101, 104, 208, 211, + 109, 112, 214, 217, 117, 120, 220, 223, 125, 128, 226, 229, 133, 136, 232, 235} +) + +func processColor(r io.Reader, w io.Writer, size int64) error { + var input [16]byte + colortemplate := []byte(colorTemplate) + if size < 0 { + size = math.MaxInt64 + } + var offset int64 + for { + rn := min(size, 16) + n, err := io.ReadFull(r, input[:rn]) + if err != nil { + break + } + /* Write the offset */ + for i := 0; i < 8; i++ { + colortemplate[i] = hex[(offset>>(28-i*4))&15] + } + size -= int64(n) + /* Fill out the colortemplate */ + for i := 0; i < 16; i++ { + /* Use a fixed loop count instead of "n" to encourage loop + * unrolling by the compiler. Empty bytes will be erased + * later. + */ + v := input[i] + c := table[v] + colortemplate[slots[i*4+0]+0] = hex[c>>4] + colortemplate[slots[i*4+0]+1] = hex[c&15] + colortemplate[slots[i*4+1]+0] = hex[v>>4] + colortemplate[slots[i*4+1]+1] = hex[v&15] + colortemplate[slots[i*4+2]+0] = hex[c>>4] + colortemplate[slots[i*4+2]+1] = hex[c&15] + colortemplate[slots[i*4+3]+0] = displayTable[v] + } + /* Erase any trailing bytes */ + for i := n; i < 16; i++ { + /* This loop is only used once: the last line of output. The + * branch predictor will quickly learn that it's never taken. + */ + colortemplate[slots[i*4+0]+0] = '0' + colortemplate[slots[i*4+0]+1] = '0' + colortemplate[slots[i*4+1]+0] = ' ' + colortemplate[slots[i*4+1]+1] = ' ' + colortemplate[slots[i*4+2]+0] = '0' + colortemplate[slots[i*4+2]+1] = '0' + colortemplate[slots[i*4+3]+0] = ' ' + } + if _, err := w.Write(colortemplate); err != nil { + return err + } + offset += 16 + if n != 16 || size <= 0 { + break + } + } + return nil +} diff --git a/pkg/zeta/hastyhex_test.go b/pkg/zeta/hastyhex_test.go new file mode 100644 index 0000000..23a4541 --- /dev/null +++ b/pkg/zeta/hastyhex_test.go @@ -0,0 +1,18 @@ +package zeta + +import ( + "bytes" + "crypto/rand" + "os" + "testing" +) + +func TestProcessColor(t *testing.T) { + b := make([]byte, 1000) + _, err := rand.Read(b) + if err != nil { + return + } + processColor(bytes.NewReader(b), os.Stdout, int64(len(b))) + +} diff --git a/pkg/zeta/merge_file.go b/pkg/zeta/merge_file.go index a933081..fd7e5db 100644 --- a/pkg/zeta/merge_file.go +++ b/pkg/zeta/merge_file.go @@ -57,7 +57,7 @@ type MergeFileOptions struct { Style int DiffAlgorithm string Stdout bool - TextConv bool + Textconv bool } func (opts *MergeFileOptions) diffAlgorithmFromName(defaultDiffAlgorithm string) diferenco.Algorithm { diff --git a/pkg/zeta/merge_tree.go b/pkg/zeta/merge_tree.go index 8765303..15739bf 100644 --- a/pkg/zeta/merge_tree.go +++ b/pkg/zeta/merge_tree.go @@ -27,7 +27,7 @@ type MergeTreeOptions struct { AllowUnrelatedHistories, Z, NameOnly, Textconv, JSON bool } -func (r *Repository) readMissingText(ctx context.Context, oid plumbing.Hash, textConv bool) (string, string, error) { +func (r *Repository) readMissingText(ctx context.Context, oid plumbing.Hash, textconv bool) (string, string, error) { br, err := r.odb.Blob(ctx, oid) switch { case err == nil: @@ -43,7 +43,7 @@ func (r *Repository) readMissingText(ctx context.Context, oid plumbing.Hash, tex return "", "", err } defer br.Close() - return diferenco.ReadUnifiedText(br.Contents, br.Size, textConv) + return diferenco.ReadUnifiedText(br.Contents, br.Size, textconv) } func (o *MergeTreeOptions) formatJson(result *odb.MergeResult) { diff --git a/pkg/zeta/misc.go b/pkg/zeta/misc.go index 44976bd..ca2ce61 100644 --- a/pkg/zeta/misc.go +++ b/pkg/zeta/misc.go @@ -47,6 +47,10 @@ var ( W = tr.W // translate func wrap ) +func IsTerminal(fd uintptr) bool { + return isatty.IsTerminal(fd) || isatty.IsCygwinTerminal(fd) +} + func checkIsTrueColorTerminal() bool { if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) { return false @@ -342,7 +346,7 @@ type Content struct { IsBinary bool } -func ReadContent(p string, textConv bool) (*Content, error) { +func ReadContent(p string, textconv bool) (*Content, error) { fd, err := os.Open(p) if err != nil { return nil, err @@ -370,7 +374,7 @@ func ReadContent(p string, textConv bool) (*Content, error) { if _, err := fd.Seek(0, io.SeekStart); err != nil { return nil, err } - if fc.Text, _, err = diferenco.ReadUnifiedText(fd, si.Size(), textConv); err != nil { + if fc.Text, _, err = diferenco.ReadUnifiedText(fd, si.Size(), textconv); err != nil { if err == diferenco.ErrNonTextContent { fc.IsBinary = true return fc, nil @@ -380,7 +384,7 @@ func ReadContent(p string, textConv bool) (*Content, error) { return fc, nil } -func ReadText(p string, textConv bool) (string, error) { +func ReadText(p string, textconv bool) (string, error) { fd, err := os.Open(p) if err != nil { return "", err @@ -390,6 +394,14 @@ func ReadText(p string, textConv bool) (string, error) { if err != nil { return "", err } - content, _, err := diferenco.ReadUnifiedText(fd, si.Size(), textConv) + content, _, err := diferenco.ReadUnifiedText(fd, si.Size(), textconv) return content, err } + +type NopWriteCloser struct { + io.Writer +} + +func (NopWriteCloser) Close() error { + return nil +} diff --git a/pkg/zeta/odb/merge.go b/pkg/zeta/odb/merge.go index d131b42..8b67572 100644 --- a/pkg/zeta/odb/merge.go +++ b/pkg/zeta/odb/merge.go @@ -481,13 +481,13 @@ func flatBranchName(s string) string { return b.String() } -func (d *ODB) unifiedText(ctx context.Context, oid plumbing.Hash, textConv bool) (string, string, error) { +func (d *ODB) unifiedText(ctx context.Context, oid plumbing.Hash, textconv bool) (string, string, error) { br, err := d.Blob(ctx, oid) if err != nil { return "", "", err } defer br.Close() - return diferenco.ReadUnifiedText(br.Contents, br.Size, textConv) + return diferenco.ReadUnifiedText(br.Contents, br.Size, textconv) } // MergeTree: three way merge tree diff --git a/pkg/zeta/odb/merge_driver.go b/pkg/zeta/odb/merge_driver.go index 34dbb2c..ea5cc6c 100644 --- a/pkg/zeta/odb/merge_driver.go +++ b/pkg/zeta/odb/merge_driver.go @@ -14,7 +14,7 @@ import ( ) type MergeDriver func(ctx context.Context, o, a, b string, labelO, labelA, labelB string) (string, bool, error) -type TextGetter func(ctx context.Context, oid plumbing.Hash, textConv bool) (string, string, error) +type TextGetter func(ctx context.Context, oid plumbing.Hash, textconv bool) (string, string, error) type mergeOptions struct { O, A, B plumbing.Hash diff --git a/pkg/zeta/revision.go b/pkg/zeta/revision.go index 9d34090..f094c0f 100644 --- a/pkg/zeta/revision.go +++ b/pkg/zeta/revision.go @@ -221,6 +221,9 @@ func (r *Repository) readTree(ctx context.Context, oid plumbing.Hash, p string) if err != nil { return nil, err } + if len(p) == 0 { + return root, nil + } e, err := root.FindEntry(ctx, p) if err != nil { return nil, err diff --git a/pkg/zeta/show.go b/pkg/zeta/show.go new file mode 100644 index 0000000..163820c --- /dev/null +++ b/pkg/zeta/show.go @@ -0,0 +1,127 @@ +package zeta + +import ( + "context" + "io" + "strings" + + "github.com/antgroup/hugescm/modules/diferenco" + "github.com/antgroup/hugescm/modules/plumbing" + "github.com/antgroup/hugescm/modules/zeta/object" +) + +type ShowOptions struct { + Objects []string + Textconv bool + Algorithm diferenco.Algorithm + Limit int64 + w io.Writer + useColor bool +} + +type showObject struct { + name string + oid plumbing.Hash +} + +func (r *Repository) parseObject(ctx context.Context, name string) (plumbing.Hash, error) { + prefix, p, ok := strings.Cut(name, ":") + oid, err := r.Revision(ctx, prefix) + if !ok || err != nil { + return oid, err + } + e, err := r.parseEntry(ctx, oid, p) + if err != nil { + return plumbing.ZeroHash, err + } + return e.Hash, nil +} + +func (r *Repository) showFetch(ctx context.Context, oid plumbing.Hash) error { + if r.odb.Exists(oid, false) || r.odb.Exists(oid, true) { + return nil + } + if !r.promisorEnabled() { + return plumbing.NoSuchObject(oid) + } + return r.promiseMissingFetch(ctx, oid) +} + +func (r *Repository) Show(ctx context.Context, opts *ShowOptions) error { + objects := make([]*showObject, 0, len(opts.Objects)) + for _, o := range opts.Objects { + oid, err := r.parseObject(ctx, o) + if err != nil { + die_error("parse object %s error: %v", o, err) + return err + } + if err := r.showFetch(ctx, oid); err != nil { + die_error("search object %s error: %v", oid, err) + return err + } + objects = append(objects, &showObject{name: o, oid: oid}) + } + p := NewPrinter(ctx) + defer p.Close() + opts.w = p + opts.useColor = p.useColor + for _, o := range objects { + if err := r.showOne(ctx, opts, o); err != nil { + return err + } + } + return nil +} + +func (r *Repository) showOne(ctx context.Context, opts *ShowOptions, so *showObject) error { + var o any + var err error + if o, err = r.odb.Object(ctx, so.oid); err != nil { + if plumbing.IsNoSuchObject(err) { + return r.showBlob(ctx, opts, so) + } + return catShowError(so.oid.String(), err) + } + switch a := o.(type) { + case *object.Tree: + case *object.Commit: + return r.showCommit(ctx, opts, so, a) + case *object.Tag: + return r.showTag(ctx, opts, so, a) + case *object.Fragments: + } + return nil +} + +func (r *Repository) showBlob(ctx context.Context, opts *ShowOptions, so *showObject) error { + b, err := r.catMissingObject(ctx, so.oid) + if err != nil { + return err + } + defer b.Close() + if opts.Limit < 0 { + opts.Limit = b.Size + } + reader, charset, err := diferenco.NewUnifiedReaderEx(b.Contents, opts.Textconv) + if err != nil { + return err + } + if opts.useColor && charset == diferenco.BINARY { + if opts.Limit > MAX_SHOW_BINARY_BLOB { + reader = io.MultiReader(io.LimitReader(reader, MAX_SHOW_BINARY_BLOB), strings.NewReader(binaryTruncated)) + opts.Limit = int64(MAX_SHOW_BINARY_BLOB + len(binaryTruncated)) + } + return processColor(reader, opts.w, opts.Limit) + } + _, err = io.Copy(opts.w, io.LimitReader(reader, opts.Limit)) + return err +} + +func (r *Repository) showCommit(ctx context.Context, opts *ShowOptions, so *showObject, t *object.Commit) error { + return nil +} + +func (r *Repository) showTag(ctx context.Context, opts *ShowOptions, so *showObject, t *object.Tag) error { + + return nil +} diff --git a/pkg/zeta/showdiff.go b/pkg/zeta/showdiff.go index ab9a78d..9d5bdc0 100644 --- a/pkg/zeta/showdiff.go +++ b/pkg/zeta/showdiff.go @@ -29,7 +29,7 @@ type DiffOptions struct { From string To string PathSpec []string - TextConv bool + Textconv bool UseColor bool Way3 bool Algorithm diferenco.Algorithm @@ -37,7 +37,7 @@ type DiffOptions struct { func (opts *DiffOptions) po() *object.PatchOptions { m := NewMatcher(opts.PathSpec) - return &object.PatchOptions{TextConv: opts.TextConv, Algorithm: opts.Algorithm, Match: m.Match} + return &object.PatchOptions{Textconv: opts.Textconv, Algorithm: opts.Algorithm, Match: m.Match} } func (opts *DiffOptions) ShowChanges(ctx context.Context, changes object.Changes) error { diff --git a/pkg/zeta/switch_test.go b/pkg/zeta/switch_test.go index e2a4ff8..2599f13 100644 --- a/pkg/zeta/switch_test.go +++ b/pkg/zeta/switch_test.go @@ -31,5 +31,5 @@ func TestCat(t *testing.T) { return } defer r.Close() - _ = r.Cat(context.Background(), &CatOptions{Hash: "2be5d4418893425e546a6146fbda18eac95ea9a7fbb05faab02096738a974a11"}) + _ = r.Cat(context.Background(), &CatOptions{Object: "2be5d4418893425e546a6146fbda18eac95ea9a7fbb05faab02096738a974a11"}) } diff --git a/pkg/zeta/worktree_diff.go b/pkg/zeta/worktree_diff.go index edd0967..5e45eb8 100644 --- a/pkg/zeta/worktree_diff.go +++ b/pkg/zeta/worktree_diff.go @@ -18,27 +18,27 @@ import ( "github.com/antgroup/hugescm/modules/zeta/object" ) -func (w *Worktree) openText(p string, size int64, textConv bool) (string, error) { +func (w *Worktree) openText(p string, size int64, textconv bool) (string, error) { fd, err := w.fs.Open(p) if err != nil { return "", err } defer fd.Close() - content, _, err := diferenco.ReadUnifiedText(fd, size, textConv) + content, _, err := diferenco.ReadUnifiedText(fd, size, textconv) return content, err } -func (w *Worktree) openBlobText(ctx context.Context, oid plumbing.Hash, textConv bool) (string, error) { +func (w *Worktree) openBlobText(ctx context.Context, oid plumbing.Hash, textconv bool) (string, error) { br, err := w.odb.Blob(ctx, oid) if err != nil { return "", err } defer br.Close() - content, _, err := diferenco.ReadUnifiedText(br.Contents, br.Size, textConv) + content, _, err := diferenco.ReadUnifiedText(br.Contents, br.Size, textconv) return content, err } -func (w *Worktree) readContent(ctx context.Context, p noder.Path, textConv bool) (f *diferenco.File, content string, fragments bool, bin bool, err error) { +func (w *Worktree) readContent(ctx context.Context, p noder.Path, textconv bool) (f *diferenco.File, content string, fragments bool, bin bool, err error) { if p == nil { return nil, "", false, false, nil } @@ -49,7 +49,7 @@ func (w *Worktree) readContent(ctx context.Context, p noder.Path, textConv bool) if a.Size() > diferenco.MAX_DIFF_SIZE { return f, "", false, true, nil } - content, err = w.openText(name, a.Size(), textConv) + content, err = w.openText(name, a.Size(), textconv) if err == diferenco.ErrNonTextContent { return f, "", false, true, nil } @@ -62,7 +62,7 @@ func (w *Worktree) readContent(ctx context.Context, p noder.Path, textConv bool) if a.Size() > diferenco.MAX_DIFF_SIZE { return f, "", false, true, nil } - content, err = w.openBlobText(ctx, a.HashRaw(), textConv) + content, err = w.openBlobText(ctx, a.HashRaw(), textconv) // When the current repository uses an incomplete checkout mechanism, we treat these files as binary files, i.e. no differences can be calculated. if err == diferenco.ErrNonTextContent || plumbing.IsNoSuchObject(err) { return f, "", false, true, nil @@ -76,7 +76,7 @@ func (w *Worktree) readContent(ctx context.Context, p noder.Path, textConv bool) if a.Size() > diferenco.MAX_DIFF_SIZE { return f, "", false, true, nil } - content, err = w.openBlobText(ctx, a.HashRaw(), textConv) + content, err = w.openBlobText(ctx, a.HashRaw(), textconv) if err == diferenco.ErrNonTextContent || plumbing.IsNoSuchObject(err) { return f, "", false, true, nil } @@ -108,7 +108,7 @@ func (w *Worktree) filePatchWithContext(ctx context.Context, c *merkletrie.Chang } // getPatchContext: In the object package, there is no patch implementation for worktree diff, so we need -func (w *Worktree) getPatchContext(ctx context.Context, changes merkletrie.Changes, m *Matcher, textConv bool) ([]*diferenco.Unified, error) { +func (w *Worktree) getPatchContext(ctx context.Context, changes merkletrie.Changes, m *Matcher, textconv bool) ([]*diferenco.Unified, error) { var filePatches []*diferenco.Unified for _, c := range changes { select { @@ -120,7 +120,7 @@ func (w *Worktree) getPatchContext(ctx context.Context, changes merkletrie.Chang if !m.Match(name) { continue } - p, err := w.filePatchWithContext(ctx, &c, textConv) + p, err := w.filePatchWithContext(ctx, &c, textconv) if err != nil { return nil, err } @@ -174,7 +174,7 @@ func (w *Worktree) fileStatWithContext(ctx context.Context, c *merkletrie.Change return s, nil } -func (w *Worktree) getStatsContext(ctx context.Context, changes merkletrie.Changes, m *Matcher, textConv bool) (object.FileStats, error) { +func (w *Worktree) getStatsContext(ctx context.Context, changes merkletrie.Changes, m *Matcher, textconv bool) (object.FileStats, error) { var fileStats []object.FileStat for _, c := range changes { select { @@ -186,7 +186,7 @@ func (w *Worktree) getStatsContext(ctx context.Context, changes merkletrie.Chang if !m.Match(name) { continue } - s, err := w.fileStatWithContext(ctx, &c, textConv) + s, err := w.fileStatWithContext(ctx, &c, textconv) if err != nil { return nil, err } @@ -202,14 +202,14 @@ func (w *Worktree) showChanges(ctx context.Context, opts *DiffOptions, changes m } m := NewMatcher(opts.PathSpec) if opts.showStatOnly() { - fileStats, err := w.getStatsContext(ctx, changes, m, opts.TextConv) + fileStats, err := w.getStatsContext(ctx, changes, m, opts.Textconv) if err != nil { return err } return opts.ShowStats(ctx, fileStats) } - filePatchs, err := w.getPatchContext(ctx, changes, m, opts.TextConv) + filePatchs, err := w.getPatchContext(ctx, changes, m, opts.Textconv) if err != nil { return err }