diff --git a/adapter.go b/adapter.go index 67eaa6b..c311d07 100644 --- a/adapter.go +++ b/adapter.go @@ -7,7 +7,6 @@ import ( "sync" "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" - _brotli "github.com/andybalholm/brotli" ) const ( @@ -27,6 +26,7 @@ const ( // 200 is a somewhat arbitrary number; in experiments compressing short text/markup-like sequences // with different compressors we saw that sequences shorter that ~180 the output generated by the // compressor would sometime be larger than the input. + // This default may change between versions. // In general there can be no one-size-fits-all value: you will want to measure if a different // minimum size improves end-to-end performance for your workloads. DefaultMinSize = 200 @@ -119,7 +119,7 @@ func addVaryHeader(h http.Header, value string) { func DefaultAdapter(opts ...Option) (func(http.Handler) http.Handler, error) { defaults := []Option{ GzipCompressionLevel(gzip.DefaultCompression), - BrotliCompressionLevel(_brotli.DefaultCompression), + BrotliCompressionLevel(brotli.DefaultCompression), MinSize(DefaultMinSize), } opts = append(defaults, opts...) @@ -173,7 +173,7 @@ func GzipCompressionLevel(level int) Option { // The default is 3 (the same default used in the reference brotli C // implementation). func BrotliCompressionLevel(level int) Option { - c, err := brotli.New(_brotli.WriterOptions{Quality: level}) + c, err := brotli.New(brotli.Options{Quality: level}) if err != nil { return errorOption(err) } diff --git a/adapter_test.go b/adapter_test.go index cdd1e8e..7141eae 100644 --- a/adapter_test.go +++ b/adapter_test.go @@ -13,9 +13,12 @@ import ( "strconv" "testing" + "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" "github.com/CAFxX/httpcompression/contrib/klauspost/zstd" - "github.com/andybalholm/brotli" "github.com/stretchr/testify/assert" + + ibrotli "github.com/andybalholm/brotli" + kpzstd "github.com/klauspost/compress/zstd" ) const ( @@ -908,15 +911,20 @@ func (noopHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {} func BenchmarkAdapter(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000, 100000} { b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - for _, ae := range []string{"gzip", "br", "zstd"} { - b.Run(ae, func(b *testing.B) { - b.Run("serial", func(b *testing.B) { - benchmark(b, false, size, ae) - }) - b.Run("parallel", func(b *testing.B) { - benchmark(b, true, size, ae) + for ae, maxq := range map[string]int{"gzip": 9, "br": 11, "zstd": 4} { + if size < DefaultMinSize { + maxq = 1 + } + for q := 1; q <= maxq; q++ { + b.Run(fmt.Sprintf("%s/%d", ae, q), func(b *testing.B) { + b.Run("serial", func(b *testing.B) { + benchmark(b, false, size, ae, q) + }) + b.Run("parallel", func(b *testing.B) { + benchmark(b, true, size, ae, q) + }) }) - }) + } } }) } @@ -934,36 +942,47 @@ func gzipStrLevel(s string, lvl int) []byte { func brotliStrLevel(s string, lvl int) []byte { var b bytes.Buffer - w := brotli.NewWriterLevel(&b, lvl) + w := ibrotli.NewWriterLevel(&b, lvl) io.WriteString(w, s) w.Close() return b.Bytes() } -func benchmark(b *testing.B, parallel bool, size int, ae string) { +func benchmark(b *testing.B, parallel bool, size int, ae string, d int) { bin, err := ioutil.ReadFile("testdata/benchmark.json") if err != nil { b.Fatal(err) } - zenc, _ := zstd.New() + var enc CompressorProvider + switch ae { + case "gzip": + enc, err = NewDefaultGzipCompressor(d) + case "br": + enc, err = brotli.New(brotli.Options{Quality: d}) + case "zstd": + enc, err = zstd.New(kpzstd.WithEncoderLevel(kpzstd.EncoderLevel(d))) + } + if err != nil { + b.Fatal(err) + } req, _ := http.NewRequest("GET", "/whatever", nil) req.Header.Set("Accept-Encoding", ae) handler := newTestHandler( string(bin[:size]), - Compressor(zstd.Encoding, 2, zenc), + Compressor(ae, 100, enc), ) res := httptest.NewRecorder() handler.ServeHTTP(res, req) - if size < 20 { + if size < DefaultMinSize { if res.Code != 200 || res.Header().Get("Content-Encoding") != "" || res.Body.Len() != size { - b.Fatal(res) + b.Fatalf("code=%d, accept-encoding=%q, body=%d", res.Code, res.Header().Get("Content-Encoding"), res.Body.Len()) } } else { - if res.Code != 200 || res.Header().Get("Content-Encoding") != ae || res.Body.Len() < size/10 { - b.Fatal(res) + if res.Code != 200 || res.Header().Get("Content-Encoding") != ae || res.Body.Len() < size/10 || res.Body.Len() == size { + b.Fatalf("code=%d, accept-encoding=%q, body=%d", res.Code, res.Header().Get("Content-Encoding"), res.Body.Len()) } } @@ -974,6 +993,7 @@ func benchmark(b *testing.B, parallel bool, size int, ae string) { for pb.Next() { res.reset() handler.ServeHTTP(res, req) + b.ReportMetric(float64(res.b*100)/float64(size), "%") } }) } else { @@ -981,12 +1001,14 @@ func benchmark(b *testing.B, parallel bool, size int, ae string) { for i := 0; i < b.N; i++ { res.reset() handler.ServeHTTP(res, req) + b.ReportMetric(float64(res.b*100)/float64(size), "%") } } } type discardResponseWriter struct { h http.Header + b int } func (w *discardResponseWriter) Header() http.Header { @@ -996,7 +1018,8 @@ func (w *discardResponseWriter) Header() http.Header { return w.h } -func (*discardResponseWriter) Write(b []byte) (int, error) { +func (w *discardResponseWriter) Write(b []byte) (int, error) { + w.b += len(b) return len(b), nil } @@ -1004,9 +1027,7 @@ func (*discardResponseWriter) WriteHeader(int) { } func (w *discardResponseWriter) reset() { - if w.h == nil { - return - } + w.b = 0 for k := range w.h { delete(w.h, k) } diff --git a/contrib/andybalholm/brotli/brotli.go b/contrib/andybalholm/brotli/brotli.go index f8b01e5..7867af1 100644 --- a/contrib/andybalholm/brotli/brotli.go +++ b/contrib/andybalholm/brotli/brotli.go @@ -9,8 +9,8 @@ import ( ) const ( - Encoding = "br" - DefaultLevel = brotli.DefaultCompression + Encoding = "br" + DefaultCompression = brotli.DefaultCompression ) type Options = brotli.WriterOptions diff --git a/contrib/klauspost/gzip/gzip.go b/contrib/klauspost/gzip/gzip.go index 17d31c6..ab77f1d 100644 --- a/contrib/klauspost/gzip/gzip.go +++ b/contrib/klauspost/gzip/gzip.go @@ -8,8 +8,8 @@ import ( ) const ( - Encoding = "gzip" - DefaultLevel = gzip.DefaultCompression + Encoding = "gzip" + DefaultCompression = gzip.DefaultCompression ) type compressor struct { diff --git a/contrib/klauspost/pgzip/pgzip.go b/contrib/klauspost/pgzip/pgzip.go index f9493c3..d9c9f77 100644 --- a/contrib/klauspost/pgzip/pgzip.go +++ b/contrib/klauspost/pgzip/pgzip.go @@ -8,8 +8,8 @@ import ( ) const ( - Encoding = "gzip" - DefaultLevel = pgzip.DefaultCompression + Encoding = "gzip" + DefaultCompression = pgzip.DefaultCompression ) type compressor struct { diff --git a/contrib/klauspost/zstd/zstd.go b/contrib/klauspost/zstd/zstd.go index 226f84f..9b7b717 100644 --- a/contrib/klauspost/zstd/zstd.go +++ b/contrib/klauspost/zstd/zstd.go @@ -7,7 +7,10 @@ import ( "github.com/klauspost/compress/zstd" ) -const Encoding = "zstd" +const ( + Encoding = "zstd" + DefaultCompression = zstd.SpeedDefault +) type compressor struct { pool sync.Pool