From 51dd70d343b258e8753de473c861e8eb66caa0b0 Mon Sep 17 00:00:00 2001 From: Sadegh Sohani Date: Thu, 12 Dec 2024 19:32:20 +0330 Subject: [PATCH] docs: add swagger documentation for APIs (#168) --- docs/docs.go | 800 +++++ docs/swagger.json | 771 +++++ docs/swagger.yaml | 495 +++ go.mod | 25 +- go.sum | 38 + internal/platforms/web/http/http.go | 26 +- internal/platforms/web/http/http_handler.go | 160 + vendor/github.com/KyleBanks/depth/.gitignore | 16 + vendor/github.com/KyleBanks/depth/.travis.yml | 9 + vendor/github.com/KyleBanks/depth/LICENSE | 21 + vendor/github.com/KyleBanks/depth/Makefile | 32 + vendor/github.com/KyleBanks/depth/README.md | 232 ++ vendor/github.com/KyleBanks/depth/depth.go | 129 + vendor/github.com/KyleBanks/depth/pkg.go | 184 ++ vendor/github.com/ghodss/yaml/.gitignore | 20 + vendor/github.com/ghodss/yaml/.travis.yml | 7 + vendor/github.com/ghodss/yaml/LICENSE | 50 + vendor/github.com/ghodss/yaml/README.md | 121 + vendor/github.com/ghodss/yaml/fields.go | 501 +++ vendor/github.com/ghodss/yaml/yaml.go | 277 ++ .../go-openapi/jsonpointer/.editorconfig | 26 + .../go-openapi/jsonpointer/.gitignore | 1 + .../go-openapi/jsonpointer/.golangci.yml | 61 + .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 + .../github.com/go-openapi/jsonpointer/LICENSE | 202 ++ .../go-openapi/jsonpointer/README.md | 19 + .../go-openapi/jsonpointer/pointer.go | 531 ++++ .../go-openapi/jsonreference/.gitignore | 1 + .../go-openapi/jsonreference/.golangci.yml | 61 + .../jsonreference/CODE_OF_CONDUCT.md | 74 + .../go-openapi/jsonreference/LICENSE | 202 ++ .../go-openapi/jsonreference/README.md | 19 + .../jsonreference/internal/normalize_url.go | 69 + .../go-openapi/jsonreference/reference.go | 158 + .../github.com/go-openapi/spec/.editorconfig | 26 + vendor/github.com/go-openapi/spec/.gitignore | 1 + .../github.com/go-openapi/spec/.golangci.yml | 61 + .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/spec/LICENSE | 202 ++ vendor/github.com/go-openapi/spec/README.md | 54 + vendor/github.com/go-openapi/spec/cache.go | 98 + .../go-openapi/spec/contact_info.go | 57 + vendor/github.com/go-openapi/spec/debug.go | 49 + vendor/github.com/go-openapi/spec/embed.go | 17 + vendor/github.com/go-openapi/spec/errors.go | 19 + vendor/github.com/go-openapi/spec/expander.go | 607 ++++ .../go-openapi/spec/external_docs.go | 24 + vendor/github.com/go-openapi/spec/header.go | 203 ++ vendor/github.com/go-openapi/spec/info.go | 184 ++ vendor/github.com/go-openapi/spec/items.go | 234 ++ vendor/github.com/go-openapi/spec/license.go | 56 + .../github.com/go-openapi/spec/normalizer.go | 202 ++ .../go-openapi/spec/normalizer_nonwindows.go | 44 + .../go-openapi/spec/normalizer_windows.go | 154 + .../github.com/go-openapi/spec/operation.go | 400 +++ .../github.com/go-openapi/spec/parameter.go | 326 ++ .../github.com/go-openapi/spec/path_item.go | 87 + vendor/github.com/go-openapi/spec/paths.go | 97 + .../github.com/go-openapi/spec/properties.go | 91 + vendor/github.com/go-openapi/spec/ref.go | 193 ++ vendor/github.com/go-openapi/spec/resolver.go | 127 + vendor/github.com/go-openapi/spec/response.go | 152 + .../github.com/go-openapi/spec/responses.go | 140 + vendor/github.com/go-openapi/spec/schema.go | 645 ++++ .../go-openapi/spec/schema_loader.go | 331 ++ .../spec/schemas/jsonschema-draft-04.json | 149 + .../go-openapi/spec/schemas/v2/schema.json | 1607 ++++++++++ .../go-openapi/spec/security_scheme.go | 170 ++ vendor/github.com/go-openapi/spec/spec.go | 78 + vendor/github.com/go-openapi/spec/swagger.go | 448 +++ vendor/github.com/go-openapi/spec/tag.go | 75 + vendor/github.com/go-openapi/spec/url_go19.go | 11 + .../github.com/go-openapi/spec/validations.go | 215 ++ .../github.com/go-openapi/spec/xml_object.go | 68 + .../github.com/go-openapi/swag/.editorconfig | 26 + .../github.com/go-openapi/swag/.gitattributes | 2 + vendor/github.com/go-openapi/swag/.gitignore | 5 + .../github.com/go-openapi/swag/.golangci.yml | 60 + .../github.com/go-openapi/swag/BENCHMARK.md | 52 + .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/swag/LICENSE | 202 ++ vendor/github.com/go-openapi/swag/README.md | 23 + vendor/github.com/go-openapi/swag/convert.go | 208 ++ .../go-openapi/swag/convert_types.go | 730 +++++ vendor/github.com/go-openapi/swag/doc.go | 31 + vendor/github.com/go-openapi/swag/file.go | 33 + .../go-openapi/swag/initialism_index.go | 202 ++ vendor/github.com/go-openapi/swag/json.go | 312 ++ vendor/github.com/go-openapi/swag/loading.go | 176 ++ .../github.com/go-openapi/swag/name_lexem.go | 93 + vendor/github.com/go-openapi/swag/net.go | 38 + vendor/github.com/go-openapi/swag/path.go | 59 + vendor/github.com/go-openapi/swag/split.go | 508 +++ .../go-openapi/swag/string_bytes.go | 8 + vendor/github.com/go-openapi/swag/util.go | 364 +++ vendor/github.com/go-openapi/swag/yaml.go | 481 +++ vendor/github.com/josharian/intern/README.md | 5 + vendor/github.com/josharian/intern/intern.go | 44 + vendor/github.com/josharian/intern/license.md | 21 + .../github.com/labstack/echo/v4/CHANGELOG.md | 40 + vendor/github.com/labstack/echo/v4/Makefile | 4 +- vendor/github.com/labstack/echo/v4/README.md | 7 +- vendor/github.com/labstack/echo/v4/bind.go | 106 +- vendor/github.com/labstack/echo/v4/binder.go | 9 +- vendor/github.com/labstack/echo/v4/context.go | 10 +- vendor/github.com/labstack/echo/v4/echo.go | 20 +- vendor/github.com/labstack/echo/v4/echo_fs.go | 2 +- vendor/github.com/labstack/echo/v4/group.go | 2 +- vendor/github.com/labstack/echo/v4/ip.go | 2 +- .../github.com/labstack/echo/v4/renderer.go | 29 + .../github.com/labstack/echo/v4/response.go | 6 +- .../echo/v4/responsecontroller_1.19.go | 44 - .../echo/v4/responsecontroller_1.20.go | 20 - vendor/github.com/labstack/echo/v4/router.go | 33 +- vendor/github.com/mailru/easyjson/LICENSE | 7 + .../github.com/mailru/easyjson/buffer/pool.go | 278 ++ .../mailru/easyjson/jlexer/bytestostr.go | 24 + .../easyjson/jlexer/bytestostr_nounsafe.go | 13 + .../mailru/easyjson/jlexer/error.go | 15 + .../mailru/easyjson/jlexer/lexer.go | 1244 ++++++++ .../mailru/easyjson/jwriter/writer.go | 405 +++ .../github.com/swaggo/echo-swagger/.gitignore | 17 + .../swaggo/echo-swagger/.goreleaser.yml | 10 + vendor/github.com/swaggo/echo-swagger/LICENSE | 21 + .../echo-swagger/PULL_REQUEST_TEMPLATE.md | 8 + .../github.com/swaggo/echo-swagger/README.md | 89 + .../github.com/swaggo/echo-swagger/swagger.go | 311 ++ vendor/github.com/swaggo/files/v2/.gitmodules | 3 + vendor/github.com/swaggo/files/v2/LICENSE | 21 + vendor/github.com/swaggo/files/v2/Makefile | 10 + vendor/github.com/swaggo/files/v2/README.md | 9 + vendor/github.com/swaggo/files/v2/files.go | 12 + vendor/github.com/swaggo/swag/.gitignore | 24 + vendor/github.com/swaggo/swag/.goreleaser.yml | 30 + .../github.com/swaggo/swag/CODE_OF_CONDUCT.md | 46 + vendor/github.com/swaggo/swag/CONTRIBUTING.md | 16 + vendor/github.com/swaggo/swag/Dockerfile | 36 + vendor/github.com/swaggo/swag/Makefile | 78 + .../swaggo/swag/PULL_REQUEST_TEMPLATE.md | 8 + vendor/github.com/swaggo/swag/README.md | 1004 ++++++ vendor/github.com/swaggo/swag/README_pt.md | 968 ++++++ vendor/github.com/swaggo/swag/README_zh-CN.md | 771 +++++ vendor/github.com/swaggo/swag/const.go | 567 ++++ vendor/github.com/swaggo/swag/doc.go | 5 + vendor/github.com/swaggo/swag/enums.go | 13 + vendor/github.com/swaggo/swag/field_parser.go | 686 +++++ vendor/github.com/swaggo/swag/formatter.go | 182 ++ vendor/github.com/swaggo/swag/generics.go | 448 +++ vendor/github.com/swaggo/swag/golist.go | 78 + vendor/github.com/swaggo/swag/license | 21 + vendor/github.com/swaggo/swag/operation.go | 1249 ++++++++ vendor/github.com/swaggo/swag/package.go | 187 ++ vendor/github.com/swaggo/swag/packages.go | 613 ++++ vendor/github.com/swaggo/swag/parser.go | 1920 ++++++++++++ vendor/github.com/swaggo/swag/schema.go | 293 ++ vendor/github.com/swaggo/swag/spec.go | 64 + vendor/github.com/swaggo/swag/swagger.go | 72 + vendor/github.com/swaggo/swag/types.go | 123 + vendor/github.com/swaggo/swag/utils.go | 55 + vendor/github.com/swaggo/swag/version.go | 4 + vendor/golang.org/x/net/http2/frame.go | 4 +- vendor/golang.org/x/net/http2/http2.go | 42 +- vendor/golang.org/x/net/http2/server.go | 35 +- vendor/golang.org/x/net/http2/transport.go | 137 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 9 + .../x/sys/unix/zerrors_linux_386.go | 6 + .../x/sys/unix/zerrors_linux_amd64.go | 6 + .../x/sys/unix/zerrors_linux_arm.go | 6 + .../x/sys/unix/zerrors_linux_arm64.go | 7 + .../x/sys/unix/zerrors_linux_loong64.go | 6 + .../x/sys/unix/zerrors_linux_mips.go | 6 + .../x/sys/unix/zerrors_linux_mips64.go | 6 + .../x/sys/unix/zerrors_linux_mips64le.go | 6 + .../x/sys/unix/zerrors_linux_mipsle.go | 6 + .../x/sys/unix/zerrors_linux_ppc.go | 6 + .../x/sys/unix/zerrors_linux_ppc64.go | 6 + .../x/sys/unix/zerrors_linux_ppc64le.go | 6 + .../x/sys/unix/zerrors_linux_riscv64.go | 6 + .../x/sys/unix/zerrors_linux_s390x.go | 6 + .../x/sys/unix/zerrors_linux_sparc64.go | 6 + .../x/sys/unix/ztypes_darwin_amd64.go | 60 + .../x/sys/unix/ztypes_darwin_arm64.go | 60 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 20 +- .../x/sys/windows/syscall_windows.go | 2 + .../golang.org/x/sys/windows/types_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 28 +- vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/ast/astutil/enclosing.go | 654 ++++ .../x/tools/go/ast/astutil/imports.go | 490 +++ .../x/tools/go/ast/astutil/rewrite.go | 486 +++ .../golang.org/x/tools/go/ast/astutil/util.go | 11 + .../x/tools/go/buildutil/allpackages.go | 195 ++ .../x/tools/go/buildutil/fakecontext.go | 111 + .../x/tools/go/buildutil/overlay.go | 101 + .../golang.org/x/tools/go/buildutil/tags.go | 100 + .../golang.org/x/tools/go/buildutil/util.go | 209 ++ .../golang.org/x/tools/go/internal/cgo/cgo.go | 219 ++ .../x/tools/go/internal/cgo/cgo_pkgconfig.go | 42 + vendor/golang.org/x/tools/go/loader/doc.go | 202 ++ vendor/golang.org/x/tools/go/loader/loader.go | 1064 +++++++ vendor/golang.org/x/tools/go/loader/util.go | 123 + vendor/gopkg.in/yaml.v2/.travis.yml | 17 + vendor/gopkg.in/yaml.v2/LICENSE | 201 ++ vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 744 +++++ vendor/gopkg.in/yaml.v2/decode.go | 815 +++++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ vendor/gopkg.in/yaml.v2/encode.go | 390 +++ vendor/gopkg.in/yaml.v2/parserc.go | 1095 +++++++ vendor/gopkg.in/yaml.v2/readerc.go | 412 +++ vendor/gopkg.in/yaml.v2/resolve.go | 258 ++ vendor/gopkg.in/yaml.v2/scannerc.go | 2711 +++++++++++++++++ vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 478 +++ vendor/gopkg.in/yaml.v2/yamlh.go | 739 +++++ vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ vendor/modules.txt | 59 +- 221 files changed, 43752 insertions(+), 223 deletions(-) create mode 100644 docs/docs.go create mode 100644 docs/swagger.json create mode 100644 docs/swagger.yaml create mode 100644 vendor/github.com/KyleBanks/depth/.gitignore create mode 100644 vendor/github.com/KyleBanks/depth/.travis.yml create mode 100644 vendor/github.com/KyleBanks/depth/LICENSE create mode 100644 vendor/github.com/KyleBanks/depth/Makefile create mode 100644 vendor/github.com/KyleBanks/depth/README.md create mode 100644 vendor/github.com/KyleBanks/depth/depth.go create mode 100644 vendor/github.com/KyleBanks/depth/pkg.go create mode 100644 vendor/github.com/ghodss/yaml/.gitignore create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml create mode 100644 vendor/github.com/ghodss/yaml/LICENSE create mode 100644 vendor/github.com/ghodss/yaml/README.md create mode 100644 vendor/github.com/ghodss/yaml/fields.go create mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig create mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonpointer/.golangci.yml create mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go create mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonreference/.golangci.yml create mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonreference/README.md create mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go create mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig create mode 100644 vendor/github.com/go-openapi/spec/.gitignore create mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/spec/LICENSE create mode 100644 vendor/github.com/go-openapi/spec/README.md create mode 100644 vendor/github.com/go-openapi/spec/cache.go create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go create mode 100644 vendor/github.com/go-openapi/spec/debug.go create mode 100644 vendor/github.com/go-openapi/spec/embed.go create mode 100644 vendor/github.com/go-openapi/spec/errors.go create mode 100644 vendor/github.com/go-openapi/spec/expander.go create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go create mode 100644 vendor/github.com/go-openapi/spec/header.go create mode 100644 vendor/github.com/go-openapi/spec/info.go create mode 100644 vendor/github.com/go-openapi/spec/items.go create mode 100644 vendor/github.com/go-openapi/spec/license.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go create mode 100644 vendor/github.com/go-openapi/spec/operation.go create mode 100644 vendor/github.com/go-openapi/spec/parameter.go create mode 100644 vendor/github.com/go-openapi/spec/path_item.go create mode 100644 vendor/github.com/go-openapi/spec/paths.go create mode 100644 vendor/github.com/go-openapi/spec/properties.go create mode 100644 vendor/github.com/go-openapi/spec/ref.go create mode 100644 vendor/github.com/go-openapi/spec/resolver.go create mode 100644 vendor/github.com/go-openapi/spec/response.go create mode 100644 vendor/github.com/go-openapi/spec/responses.go create mode 100644 vendor/github.com/go-openapi/spec/schema.go create mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go create mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json create mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go create mode 100644 vendor/github.com/go-openapi/spec/spec.go create mode 100644 vendor/github.com/go-openapi/spec/swagger.go create mode 100644 vendor/github.com/go-openapi/spec/tag.go create mode 100644 vendor/github.com/go-openapi/spec/url_go19.go create mode 100644 vendor/github.com/go-openapi/spec/validations.go create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go create mode 100644 vendor/github.com/go-openapi/swag/.editorconfig create mode 100644 vendor/github.com/go-openapi/swag/.gitattributes create mode 100644 vendor/github.com/go-openapi/swag/.gitignore create mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml create mode 100644 vendor/github.com/go-openapi/swag/BENCHMARK.md create mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/swag/LICENSE create mode 100644 vendor/github.com/go-openapi/swag/README.md create mode 100644 vendor/github.com/go-openapi/swag/convert.go create mode 100644 vendor/github.com/go-openapi/swag/convert_types.go create mode 100644 vendor/github.com/go-openapi/swag/doc.go create mode 100644 vendor/github.com/go-openapi/swag/file.go create mode 100644 vendor/github.com/go-openapi/swag/initialism_index.go create mode 100644 vendor/github.com/go-openapi/swag/json.go create mode 100644 vendor/github.com/go-openapi/swag/loading.go create mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go create mode 100644 vendor/github.com/go-openapi/swag/net.go create mode 100644 vendor/github.com/go-openapi/swag/path.go create mode 100644 vendor/github.com/go-openapi/swag/split.go create mode 100644 vendor/github.com/go-openapi/swag/string_bytes.go create mode 100644 vendor/github.com/go-openapi/swag/util.go create mode 100644 vendor/github.com/go-openapi/swag/yaml.go create mode 100644 vendor/github.com/josharian/intern/README.md create mode 100644 vendor/github.com/josharian/intern/intern.go create mode 100644 vendor/github.com/josharian/intern/license.md create mode 100644 vendor/github.com/labstack/echo/v4/renderer.go delete mode 100644 vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go delete mode 100644 vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go create mode 100644 vendor/github.com/mailru/easyjson/LICENSE create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go create mode 100644 vendor/github.com/swaggo/echo-swagger/.gitignore create mode 100644 vendor/github.com/swaggo/echo-swagger/.goreleaser.yml create mode 100644 vendor/github.com/swaggo/echo-swagger/LICENSE create mode 100644 vendor/github.com/swaggo/echo-swagger/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/swaggo/echo-swagger/README.md create mode 100644 vendor/github.com/swaggo/echo-swagger/swagger.go create mode 100644 vendor/github.com/swaggo/files/v2/.gitmodules create mode 100644 vendor/github.com/swaggo/files/v2/LICENSE create mode 100644 vendor/github.com/swaggo/files/v2/Makefile create mode 100644 vendor/github.com/swaggo/files/v2/README.md create mode 100644 vendor/github.com/swaggo/files/v2/files.go create mode 100644 vendor/github.com/swaggo/swag/.gitignore create mode 100644 vendor/github.com/swaggo/swag/.goreleaser.yml create mode 100644 vendor/github.com/swaggo/swag/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/swaggo/swag/CONTRIBUTING.md create mode 100644 vendor/github.com/swaggo/swag/Dockerfile create mode 100644 vendor/github.com/swaggo/swag/Makefile create mode 100644 vendor/github.com/swaggo/swag/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/swaggo/swag/README.md create mode 100644 vendor/github.com/swaggo/swag/README_pt.md create mode 100644 vendor/github.com/swaggo/swag/README_zh-CN.md create mode 100644 vendor/github.com/swaggo/swag/const.go create mode 100644 vendor/github.com/swaggo/swag/doc.go create mode 100644 vendor/github.com/swaggo/swag/enums.go create mode 100644 vendor/github.com/swaggo/swag/field_parser.go create mode 100644 vendor/github.com/swaggo/swag/formatter.go create mode 100644 vendor/github.com/swaggo/swag/generics.go create mode 100644 vendor/github.com/swaggo/swag/golist.go create mode 100644 vendor/github.com/swaggo/swag/license create mode 100644 vendor/github.com/swaggo/swag/operation.go create mode 100644 vendor/github.com/swaggo/swag/package.go create mode 100644 vendor/github.com/swaggo/swag/packages.go create mode 100644 vendor/github.com/swaggo/swag/parser.go create mode 100644 vendor/github.com/swaggo/swag/schema.go create mode 100644 vendor/github.com/swaggo/swag/spec.go create mode 100644 vendor/github.com/swaggo/swag/swagger.go create mode 100644 vendor/github.com/swaggo/swag/types.go create mode 100644 vendor/github.com/swaggo/swag/utils.go create mode 100644 vendor/github.com/swaggo/swag/version.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/allpackages.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/fakecontext.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/overlay.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/tags.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/util.go create mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo.go create mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go create mode 100644 vendor/golang.org/x/tools/go/loader/doc.go create mode 100644 vendor/golang.org/x/tools/go/loader/loader.go create mode 100644 vendor/golang.org/x/tools/go/loader/util.go create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/docs/docs.go b/docs/docs.go new file mode 100644 index 00000000..a6087e3d --- /dev/null +++ b/docs/docs.go @@ -0,0 +1,800 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "contact": {}, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/calculate/fee": { + "post": { + "description": "Calculate the fee based on the provided amount", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Calculate" + ], + "summary": "Calculate fee", + "parameters": [ + { + "description": "Calculate Fee Request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.CalculateFeeRequest" + } + } + ], + "responses": { + "200": { + "description": "Calculation result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/calculate/help": { + "get": { + "description": "Calculate help information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Calculate" + ], + "summary": "Calculate help information", + "responses": { + "200": { + "description": "Successful calculation result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/calculate/reward": { + "post": { + "description": "Calculate the reward based on the provided stake and number of days", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Calculate" + ], + "summary": "Calculate reward", + "parameters": [ + { + "description": "Calculate Reward Request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.CalculateRewardRequest" + } + } + ], + "responses": { + "200": { + "description": "Calculation result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/commands": { + "get": { + "description": "Retrieve a list of all available commands", + "produces": [ + "application/json" + ], + "tags": [ + "Commands" + ], + "summary": "Get available commands", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.GetCommandsResponse" + } + } + } + } + }, + "/help": { + "get": { + "description": "This endpoint runs the 'help' command and returns the result, including color, title, error message, and success status.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Help" + ], + "summary": "Executes a help command and returns the result.", + "responses": { + "200": { + "description": "Help command result", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "Result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/market/help": { + "get": { + "description": "This endpoint processes the \"market help\" command and returns relevant details about the market command.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Market" + ], + "summary": "Get help for the market command", + "responses": { + "200": { + "description": "Command help details", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "Result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/market/price": { + "get": { + "description": "Retrieves the current market price information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Market" + ], + "summary": "Get Market Price", + "responses": { + "200": { + "description": "Successfully retrieved the market price", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/network/health": { + "get": { + "description": "Retrieve the health status of the network", + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Get network health", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/network/help": { + "get": { + "description": "Provides network help information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Provides network help information", + "responses": { + "200": { + "description": "Successfully retrieved network help", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/network/node-info": { + "post": { + "description": "Retrieves information about a network node based on the provided validator address.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Get Network Node Information", + "parameters": [ + { + "description": "Validator Address", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.NetworkNodeInfoRequest" + } + } + ], + "responses": { + "200": { + "description": "The result of the command", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/network/status": { + "post": { + "description": "Retrieves the current status of the network node, including command results such as color, title, error message, and success status.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Get network node status", + "parameters": [ + { + "description": "Request body", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object" + } + } + ], + "responses": { + "200": { + "description": "Successfully retrieved network node status", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/phoenix/faucet": { + "post": { + "description": "Initiates a faucet request for the Phoenix", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Phoenix" + ], + "summary": "Initiates a faucet request for the Phoenix", + "parameters": [ + { + "description": "Faucet request with address", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.PhoenixFaucetRequest" + } + } + ], + "responses": { + "200": { + "description": "Successful response containing faucet status", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/phoenix/help": { + "get": { + "description": "Executes Phoenix help command.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Phoenix" + ], + "summary": "Executes Phoenix help command", + "responses": { + "200": { + "description": "OK", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/phoenix/status": { + "get": { + "description": "Get Phoenix Status", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Phoenix" + ], + "summary": "Get Phoenix Status", + "responses": { + "200": { + "description": "Status of the Phoenix application", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/claim": { + "post": { + "description": "This endpoint allows users to claim a voucher by providing a valid code and their address.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Claim a voucher using the provided code and address.", + "parameters": [ + { + "description": "Voucher Claim Request", + "name": "voucherClaimRequest", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.VoucherClaimRequest" + } + } + ], + "responses": { + "200": { + "description": "Voucher claim successful", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/create-one": { + "post": { + "description": "Creates a voucher.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Creates a new voucher", + "parameters": [ + { + "description": "Voucher details", + "name": "voucherCreateOneRequest", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.VoucherCreateOneRequest" + } + } + ], + "responses": { + "200": { + "description": "Voucher creation successful", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/help": { + "get": { + "description": "Executes the 'voucher help' command and returns the result including color, title, error, message, and success status.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Get help information for the voucher command", + "responses": { + "200": { + "description": "Successful response with command result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/status": { + "post": { + "description": "Accepts a voucher code and returns the status of the voucher, including title, message, error, color, and success information.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Check the status of a voucher", + "parameters": [ + { + "description": "Voucher Status Request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.VoucherStatusRequest" + } + } + ], + "responses": { + "200": { + "description": "Voucher status response", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/vouchers/create-bulk": { + "post": { + "description": "This API allows you to create multiple vouchers.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Create vouchers in bulk", + "parameters": [ + { + "type": "string", + "description": "File containing voucher data", + "name": "file", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "Notification flag (optional)", + "name": "notify", + "in": "formData" + } + ], + "responses": { + "200": { + "description": "Command result containing the status of the bulk voucher creation", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + } + }, + "definitions": { + "command.InputBox": { + "type": "integer", + "enum": [ + 0, + 1, + 2, + 3, + 4 + ], + "x-enum-varnames": [ + "InputBoxText", + "InputBoxNumber", + "InputBoxFile", + "InputBoxAmount", + "InputBoxToggle" + ] + }, + "http.BasicResponse": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + }, + "http.CalculateFeeRequest": { + "type": "object", + "properties": { + "amount": { + "type": "string" + } + } + }, + "http.CalculateRewardRequest": { + "type": "object", + "properties": { + "days": { + "type": "string" + }, + "stake": { + "type": "string" + } + } + }, + "http.CommandArgsResponse": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + }, + "type": { + "$ref": "#/definitions/command.InputBox" + } + } + }, + "http.CommandResponse": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "$ref": "#/definitions/http.CommandArgsResponse" + } + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "subCommands": { + "type": "array", + "items": { + "$ref": "#/definitions/http.CommandResponse" + } + } + } + }, + "http.CommandResult": { + "type": "object", + "properties": { + "color": { + "type": "string" + }, + "error": { + "type": "string" + }, + "message": { + "type": "string" + }, + "successful": { + "type": "boolean" + }, + "title": { + "type": "string" + } + } + }, + "http.GetCommandsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/http.CommandResponse" + } + } + } + }, + "http.NetworkNodeInfoRequest": { + "type": "object", + "properties": { + "validatorAddress": { + "type": "string" + } + } + }, + "http.PhoenixFaucetRequest": { + "type": "object", + "properties": { + "address": { + "type": "string" + } + } + }, + "http.VoucherClaimRequest": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "code": { + "type": "string" + } + } + }, + "http.VoucherCreateOneRequest": { + "type": "object", + "properties": { + "amount": { + "type": "string" + }, + "description": { + "type": "string" + }, + "recipient": { + "type": "string" + }, + "validMonths": { + "type": "string" + } + } + }, + "http.VoucherStatusRequest": { + "type": "object", + "properties": { + "code": { + "type": "string" + } + } + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "", + Host: "", + BasePath: "/api/v1", + Schemes: []string{}, + Title: "", + Description: "", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/docs/swagger.json b/docs/swagger.json new file mode 100644 index 00000000..597f52a5 --- /dev/null +++ b/docs/swagger.json @@ -0,0 +1,771 @@ +{ + "swagger": "2.0", + "info": { + "contact": {} + }, + "paths": { + "/calculate/fee": { + "post": { + "description": "Calculate the fee based on the provided amount", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Calculate" + ], + "summary": "Calculate fee", + "parameters": [ + { + "description": "Calculate Fee Request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.CalculateFeeRequest" + } + } + ], + "responses": { + "200": { + "description": "Calculation result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/calculate/help": { + "get": { + "description": "Calculate help information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Calculate" + ], + "summary": "Calculate help information", + "responses": { + "200": { + "description": "Successful calculation result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/calculate/reward": { + "post": { + "description": "Calculate the reward based on the provided stake and number of days", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Calculate" + ], + "summary": "Calculate reward", + "parameters": [ + { + "description": "Calculate Reward Request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.CalculateRewardRequest" + } + } + ], + "responses": { + "200": { + "description": "Calculation result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/commands": { + "get": { + "description": "Retrieve a list of all available commands", + "produces": [ + "application/json" + ], + "tags": [ + "Commands" + ], + "summary": "Get available commands", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.GetCommandsResponse" + } + } + } + } + }, + "/help": { + "get": { + "description": "This endpoint runs the 'help' command and returns the result, including color, title, error message, and success status.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Help" + ], + "summary": "Executes a help command and returns the result.", + "responses": { + "200": { + "description": "Help command result", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "Result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/market/help": { + "get": { + "description": "This endpoint processes the \"market help\" command and returns relevant details about the market command.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Market" + ], + "summary": "Get help for the market command", + "responses": { + "200": { + "description": "Command help details", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "Result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/market/price": { + "get": { + "description": "Retrieves the current market price information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Market" + ], + "summary": "Get Market Price", + "responses": { + "200": { + "description": "Successfully retrieved the market price", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/network/health": { + "get": { + "description": "Retrieve the health status of the network", + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Get network health", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/network/help": { + "get": { + "description": "Provides network help information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Provides network help information", + "responses": { + "200": { + "description": "Successfully retrieved network help", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/network/node-info": { + "post": { + "description": "Retrieves information about a network node based on the provided validator address.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Get Network Node Information", + "parameters": [ + { + "description": "Validator Address", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.NetworkNodeInfoRequest" + } + } + ], + "responses": { + "200": { + "description": "The result of the command", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/network/status": { + "post": { + "description": "Retrieves the current status of the network node, including command results such as color, title, error message, and success status.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Network" + ], + "summary": "Get network node status", + "parameters": [ + { + "description": "Request body", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object" + } + } + ], + "responses": { + "200": { + "description": "Successfully retrieved network node status", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/phoenix/faucet": { + "post": { + "description": "Initiates a faucet request for the Phoenix", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Phoenix" + ], + "summary": "Initiates a faucet request for the Phoenix", + "parameters": [ + { + "description": "Faucet request with address", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.PhoenixFaucetRequest" + } + } + ], + "responses": { + "200": { + "description": "Successful response containing faucet status", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/phoenix/help": { + "get": { + "description": "Executes Phoenix help command.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Phoenix" + ], + "summary": "Executes Phoenix help command", + "responses": { + "200": { + "description": "OK", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/http.BasicResponse" + }, + { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + } + ] + } + } + } + } + }, + "/phoenix/status": { + "get": { + "description": "Get Phoenix Status", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Phoenix" + ], + "summary": "Get Phoenix Status", + "responses": { + "200": { + "description": "Status of the Phoenix application", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/claim": { + "post": { + "description": "This endpoint allows users to claim a voucher by providing a valid code and their address.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Claim a voucher using the provided code and address.", + "parameters": [ + { + "description": "Voucher Claim Request", + "name": "voucherClaimRequest", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.VoucherClaimRequest" + } + } + ], + "responses": { + "200": { + "description": "Voucher claim successful", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/create-one": { + "post": { + "description": "Creates a voucher.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Creates a new voucher", + "parameters": [ + { + "description": "Voucher details", + "name": "voucherCreateOneRequest", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.VoucherCreateOneRequest" + } + } + ], + "responses": { + "200": { + "description": "Voucher creation successful", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/help": { + "get": { + "description": "Executes the 'voucher help' command and returns the result including color, title, error, message, and success status.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Get help information for the voucher command", + "responses": { + "200": { + "description": "Successful response with command result", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/voucher/status": { + "post": { + "description": "Accepts a voucher code and returns the status of the voucher, including title, message, error, color, and success information.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Check the status of a voucher", + "parameters": [ + { + "description": "Voucher Status Request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.VoucherStatusRequest" + } + } + ], + "responses": { + "200": { + "description": "Voucher status response", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + }, + "/vouchers/create-bulk": { + "post": { + "description": "This API allows you to create multiple vouchers.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Voucher" + ], + "summary": "Create vouchers in bulk", + "parameters": [ + { + "type": "string", + "description": "File containing voucher data", + "name": "file", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "Notification flag (optional)", + "name": "notify", + "in": "formData" + } + ], + "responses": { + "200": { + "description": "Command result containing the status of the bulk voucher creation", + "schema": { + "$ref": "#/definitions/http.BasicResponse" + } + } + } + } + } + }, + "definitions": { + "command.InputBox": { + "type": "integer", + "enum": [ + 0, + 1, + 2, + 3, + 4 + ], + "x-enum-varnames": [ + "InputBoxText", + "InputBoxNumber", + "InputBoxFile", + "InputBoxAmount", + "InputBoxToggle" + ] + }, + "http.BasicResponse": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/http.CommandResult" + } + } + }, + "http.CalculateFeeRequest": { + "type": "object", + "properties": { + "amount": { + "type": "string" + } + } + }, + "http.CalculateRewardRequest": { + "type": "object", + "properties": { + "days": { + "type": "string" + }, + "stake": { + "type": "string" + } + } + }, + "http.CommandArgsResponse": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + }, + "type": { + "$ref": "#/definitions/command.InputBox" + } + } + }, + "http.CommandResponse": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "$ref": "#/definitions/http.CommandArgsResponse" + } + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "subCommands": { + "type": "array", + "items": { + "$ref": "#/definitions/http.CommandResponse" + } + } + } + }, + "http.CommandResult": { + "type": "object", + "properties": { + "color": { + "type": "string" + }, + "error": { + "type": "string" + }, + "message": { + "type": "string" + }, + "successful": { + "type": "boolean" + }, + "title": { + "type": "string" + } + } + }, + "http.GetCommandsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/http.CommandResponse" + } + } + } + }, + "http.NetworkNodeInfoRequest": { + "type": "object", + "properties": { + "validatorAddress": { + "type": "string" + } + } + }, + "http.PhoenixFaucetRequest": { + "type": "object", + "properties": { + "address": { + "type": "string" + } + } + }, + "http.VoucherClaimRequest": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "code": { + "type": "string" + } + } + }, + "http.VoucherCreateOneRequest": { + "type": "object", + "properties": { + "amount": { + "type": "string" + }, + "description": { + "type": "string" + }, + "recipient": { + "type": "string" + }, + "validMonths": { + "type": "string" + } + } + }, + "http.VoucherStatusRequest": { + "type": "object", + "properties": { + "code": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/docs/swagger.yaml b/docs/swagger.yaml new file mode 100644 index 00000000..0543fa00 --- /dev/null +++ b/docs/swagger.yaml @@ -0,0 +1,495 @@ +definitions: + command.InputBox: + enum: + - 0 + - 1 + - 2 + - 3 + - 4 + type: integer + x-enum-varnames: + - InputBoxText + - InputBoxNumber + - InputBoxFile + - InputBoxAmount + - InputBoxToggle + http.BasicResponse: + properties: + result: + $ref: '#/definitions/http.CommandResult' + type: object + http.CalculateFeeRequest: + properties: + amount: + type: string + type: object + http.CalculateRewardRequest: + properties: + days: + type: string + stake: + type: string + type: object + http.CommandArgsResponse: + properties: + description: + type: string + name: + type: string + optional: + type: boolean + type: + $ref: '#/definitions/command.InputBox' + type: object + http.CommandResponse: + properties: + args: + items: + $ref: '#/definitions/http.CommandArgsResponse' + type: array + description: + type: string + name: + type: string + subCommands: + items: + $ref: '#/definitions/http.CommandResponse' + type: array + type: object + http.CommandResult: + properties: + color: + type: string + error: + type: string + message: + type: string + successful: + type: boolean + title: + type: string + type: object + http.GetCommandsResponse: + properties: + data: + items: + $ref: '#/definitions/http.CommandResponse' + type: array + type: object + http.NetworkNodeInfoRequest: + properties: + validatorAddress: + type: string + type: object + http.PhoenixFaucetRequest: + properties: + address: + type: string + type: object + http.VoucherClaimRequest: + properties: + address: + type: string + code: + type: string + type: object + http.VoucherCreateOneRequest: + properties: + amount: + type: string + description: + type: string + recipient: + type: string + validMonths: + type: string + type: object + http.VoucherStatusRequest: + properties: + code: + type: string + type: object +info: + contact: {} +paths: + /calculate/fee: + post: + consumes: + - application/json + description: Calculate the fee based on the provided amount + parameters: + - description: Calculate Fee Request + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.CalculateFeeRequest' + produces: + - application/json + responses: + "200": + description: Calculation result + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Calculate fee + tags: + - Calculate + /calculate/help: + get: + consumes: + - application/json + description: Calculate help information + produces: + - application/json + responses: + "200": + description: Successful calculation result + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Calculate help information + tags: + - Calculate + /calculate/reward: + post: + consumes: + - application/json + description: Calculate the reward based on the provided stake and number of + days + parameters: + - description: Calculate Reward Request + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.CalculateRewardRequest' + produces: + - application/json + responses: + "200": + description: Calculation result + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Calculate reward + tags: + - Calculate + /commands: + get: + description: Retrieve a list of all available commands + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.GetCommandsResponse' + summary: Get available commands + tags: + - Commands + /help: + get: + consumes: + - application/json + description: This endpoint runs the 'help' command and returns the result, including + color, title, error message, and success status. + produces: + - application/json + responses: + "200": + description: Help command result + schema: + allOf: + - $ref: '#/definitions/http.BasicResponse' + - properties: + Result: + $ref: '#/definitions/http.CommandResult' + type: object + summary: Executes a help command and returns the result. + tags: + - Help + /market/help: + get: + consumes: + - application/json + description: This endpoint processes the "market help" command and returns relevant + details about the market command. + produces: + - application/json + responses: + "200": + description: Command help details + schema: + allOf: + - $ref: '#/definitions/http.BasicResponse' + - properties: + Result: + $ref: '#/definitions/http.CommandResult' + type: object + summary: Get help for the market command + tags: + - Market + /market/price: + get: + consumes: + - application/json + description: Retrieves the current market price information + produces: + - application/json + responses: + "200": + description: Successfully retrieved the market price + schema: + allOf: + - $ref: '#/definitions/http.BasicResponse' + - properties: + result: + $ref: '#/definitions/http.CommandResult' + type: object + summary: Get Market Price + tags: + - Market + /network/health: + get: + description: Retrieve the health status of the network + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Get network health + tags: + - Network + /network/help: + get: + consumes: + - application/json + description: Provides network help information + produces: + - application/json + responses: + "200": + description: Successfully retrieved network help + schema: + allOf: + - $ref: '#/definitions/http.BasicResponse' + - properties: + result: + $ref: '#/definitions/http.CommandResult' + type: object + summary: Provides network help information + tags: + - Network + /network/node-info: + post: + consumes: + - application/json + description: Retrieves information about a network node based on the provided + validator address. + parameters: + - description: Validator Address + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.NetworkNodeInfoRequest' + produces: + - application/json + responses: + "200": + description: The result of the command + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Get Network Node Information + tags: + - Network + /network/status: + post: + consumes: + - application/json + description: Retrieves the current status of the network node, including command + results such as color, title, error message, and success status. + parameters: + - description: Request body + in: body + name: body + required: true + schema: + type: object + produces: + - application/json + responses: + "200": + description: Successfully retrieved network node status + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Get network node status + tags: + - Network + /phoenix/faucet: + post: + consumes: + - application/json + description: Initiates a faucet request for the Phoenix + parameters: + - description: Faucet request with address + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.PhoenixFaucetRequest' + produces: + - application/json + responses: + "200": + description: Successful response containing faucet status + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Initiates a faucet request for the Phoenix + tags: + - Phoenix + /phoenix/help: + get: + consumes: + - application/json + description: Executes Phoenix help command. + produces: + - application/json + responses: + "200": + description: OK + schema: + allOf: + - $ref: '#/definitions/http.BasicResponse' + - properties: + result: + $ref: '#/definitions/http.CommandResult' + type: object + summary: Executes Phoenix help command + tags: + - Phoenix + /phoenix/status: + get: + consumes: + - application/json + description: Get Phoenix Status + produces: + - application/json + responses: + "200": + description: Status of the Phoenix application + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Get Phoenix Status + tags: + - Phoenix + /voucher/claim: + post: + consumes: + - application/json + description: This endpoint allows users to claim a voucher by providing a valid + code and their address. + parameters: + - description: Voucher Claim Request + in: body + name: voucherClaimRequest + required: true + schema: + $ref: '#/definitions/http.VoucherClaimRequest' + produces: + - application/json + responses: + "200": + description: Voucher claim successful + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Claim a voucher using the provided code and address. + tags: + - Voucher + /voucher/create-one: + post: + consumes: + - application/json + description: Creates a voucher. + parameters: + - description: Voucher details + in: body + name: voucherCreateOneRequest + required: true + schema: + $ref: '#/definitions/http.VoucherCreateOneRequest' + produces: + - application/json + responses: + "200": + description: Voucher creation successful + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Creates a new voucher + tags: + - Voucher + /voucher/help: + get: + consumes: + - application/json + description: Executes the 'voucher help' command and returns the result including + color, title, error, message, and success status. + produces: + - application/json + responses: + "200": + description: Successful response with command result + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Get help information for the voucher command + tags: + - Voucher + /voucher/status: + post: + consumes: + - application/json + description: Accepts a voucher code and returns the status of the voucher, including + title, message, error, color, and success information. + parameters: + - description: Voucher Status Request + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.VoucherStatusRequest' + produces: + - application/json + responses: + "200": + description: Voucher status response + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Check the status of a voucher + tags: + - Voucher + /vouchers/create-bulk: + post: + consumes: + - application/json + description: This API allows you to create multiple vouchers. + parameters: + - description: File containing voucher data + in: formData + name: file + required: true + type: string + - description: Notification flag (optional) + in: formData + name: notify + type: string + produces: + - application/json + responses: + "200": + description: Command result containing the status of the bulk voucher creation + schema: + $ref: '#/definitions/http.BasicResponse' + summary: Create vouchers in bulk + tags: + - Voucher +swagger: "2.0" diff --git a/go.mod b/go.mod index 43d4c211..fab6ac1d 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-mail/mail/v2 v2.3.0 github.com/h2non/gock v1.2.0 github.com/jszwec/csvutil v1.10.0 - github.com/labstack/echo/v4 v4.12.0 + github.com/labstack/echo/v4 v4.13.1 github.com/pactus-project/pactus v1.6.0 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 @@ -19,11 +19,26 @@ require ( ) require ( + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/PuerkitoBio/purell v1.2.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/bits-and-blooms/bitset v1.15.0 // indirect github.com/consensys/bavard v0.1.22 // indirect github.com/consensys/gnark-crypto v0.14.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/swaggo/echo-swagger v1.4.1 // indirect + github.com/swaggo/files/v2 v2.0.1 // indirect + github.com/swaggo/swag v1.16.4 // indirect + golang.org/x/tools v0.28.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) @@ -55,11 +70,11 @@ require ( github.com/valyala/fasttemplate v1.2.2 // indirect github.com/x448/float16 v0.8.4 // indirect go.uber.org/mock v0.5.0 - golang.org/x/crypto v0.29.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 + golang.org/x/net v0.32.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/protobuf v1.35.1 diff --git a/go.sum b/go.sum index 44794ad5..f1eebc34 100644 --- a/go.sum +++ b/go.sum @@ -59,9 +59,15 @@ filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/NathanBaulch/protoc-gen-cobra v1.2.1 h1:BOqX9glwicbqDJDGndMnhHhx8psGTSjGdZzRDY1a7A8= github.com/NathanBaulch/protoc-gen-cobra v1.2.1/go.mod h1:ZLPLEPQgV3jP3a7IEp+xxYPk8tF4lhY9ViV0hn6K3iA= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.2.1 h1:QsZ4TjvwiMpat6gBCBxEQI0rcS9ehtkKtSpiUnd9N28= +github.com/PuerkitoBio/purell v1.2.1/go.mod h1:ZwHcC/82TOaovDi//J/804umJFFmbOHPngi8iYYv/Eo= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -155,6 +161,7 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -171,6 +178,14 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-mail/mail/v2 v2.3.0 h1:wha99yf2v3cpUzD1V9ujP404Jbw2uEvs+rBJybkdYcw= github.com/go-mail/mail/v2 v2.3.0/go.mod h1:oE2UK8qebZAjjV1ZYUpY7FPnbi/kIU53l1dmqPRb4go= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= @@ -368,6 +383,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -405,6 +422,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+kI0= github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM= +github.com/labstack/echo/v4 v4.13.1 h1:q3+CpQlYhJwpRr9+08pX0IdlabTIpnIhrg2AKPSKhFE= +github.com/labstack/echo/v4 v4.13.1/go.mod h1:61j7WN2+bp8V21qerqRs4yVlVTGyOagMBpF0vE7VcmM= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= @@ -443,6 +462,8 @@ github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5 github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -654,6 +675,12 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= +github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= +github.com/swaggo/files/v2 v2.0.1 h1:XCVJO/i/VosCDsJu1YLpdejGsGnBE9deRMpjN4pJLHk= +github.com/swaggo/files/v2 v2.0.1/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= +github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= +github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -719,6 +746,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -807,6 +836,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -841,6 +872,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -925,6 +957,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -938,6 +972,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -998,6 +1034,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/platforms/web/http/http.go b/internal/platforms/web/http/http.go index 5c93d8cc..26cd5d24 100644 --- a/internal/platforms/web/http/http.go +++ b/internal/platforms/web/http/http.go @@ -1,10 +1,15 @@ package http import ( + // External packages. "github.com/labstack/echo/v4" + // Internal packages. "github.com/pagu-project/Pagu/config" + // Importing Pagu/docs to include API documentation in the binary. + _ "github.com/pagu-project/Pagu/docs" "github.com/pagu-project/Pagu/internal/engine" "github.com/pagu-project/Pagu/pkg/log" + swagger "github.com/swaggo/echo-swagger" ) type HTTPServer struct { @@ -23,25 +28,26 @@ func NewHTTPServer(be *engine.BotEngine, cfg *config.HTTP) HTTPServer { func (hs *HTTPServer) Start() error { log.Info("Starting HTTP Server", "listen", hs.cfg.Listen) - hs.eServer.GET("/commands", hs.handler.GetCommands) + hs.eServer.GET("/swagger/*", swagger.WrapHandler) + hs.eServer.GET("/api/v1/commands", hs.handler.GetCommands) hs.eServer.POST("/api/v1/calculate/reward", hs.handler.CalculateReward) hs.eServer.POST("/api/v1/calculate/fee", hs.handler.CalculateFee) - hs.eServer.POST("/api/v1/calculate/help", hs.handler.CalculateHelp) - hs.eServer.POST("/api/v1/network/health", hs.handler.NetworkHealth) + hs.eServer.GET("/api/v1/calculate/help", hs.handler.CalculateHelp) + hs.eServer.GET("/api/v1/network/health", hs.handler.NetworkHealth) hs.eServer.POST("/api/v1/network/node-info", hs.handler.NetworkNodeInfo) hs.eServer.POST("/api/v1/network/status", hs.handler.NetworkNodeStatus) - hs.eServer.POST("/api/v1/network/help", hs.handler.NetworkHelp) + hs.eServer.GET("/api/v1/network/help", hs.handler.NetworkHelp) hs.eServer.POST("/api/v1/voucher/claim", hs.handler.VoucherClaim) hs.eServer.POST("/api/v1/voucher/create-one", hs.handler.VoucherCreateOne) hs.eServer.POST("/api/v1/voucher/create-bulk", hs.handler.VoucherCreateBulk) hs.eServer.POST("/api/v1/voucher/status", hs.handler.VoucherStatus) - hs.eServer.POST("/api/v1/voucher/help", hs.handler.VoucherHelp) - hs.eServer.POST("/api/v1/market/price", hs.handler.MarketPrice) - hs.eServer.POST("/api/v1/market/help", hs.handler.MarketHelp) + hs.eServer.GET("/api/v1/voucher/help", hs.handler.VoucherHelp) + hs.eServer.GET("/api/v1/market/price", hs.handler.MarketPrice) + hs.eServer.GET("/api/v1/market/help", hs.handler.MarketHelp) hs.eServer.POST("/api/v1/phoenix/faucet", hs.handler.PhoenixFaucet) - hs.eServer.POST("/api/v1/phoenix/status", hs.handler.PhoenixStatus) - hs.eServer.POST("/api/v1/phoenix/help", hs.handler.PhoenixHelp) - hs.eServer.POST("/api/v1/help", hs.handler.Help) + hs.eServer.GET("/api/v1/phoenix/status", hs.handler.PhoenixStatus) + hs.eServer.GET("/api/v1/phoenix/help", hs.handler.PhoenixHelp) + hs.eServer.GET("/api/v1/help", hs.handler.Help) return hs.eServer.Start(hs.cfg.Listen) } diff --git a/internal/platforms/web/http/http_handler.go b/internal/platforms/web/http/http_handler.go index 1d5a6cbc..e815c536 100644 --- a/internal/platforms/web/http/http_handler.go +++ b/internal/platforms/web/http/http_handler.go @@ -20,6 +20,13 @@ func NewHTTPHandler(be *engine.BotEngine) HTTPHandler { } } +// GetCommands retrieves the list of commands associated with this object. +// @Summary Get available commands +// @Description Retrieve a list of all available commands +// @Tags Commands +// @Produce json +// @Success 200 {object} GetCommandsResponse +// @Router /commands [get]. func (hh *HTTPHandler) GetCommands(c echo.Context) error { log.Info("New request received for GetCommands.") commands := hh.engine.Commands() @@ -32,6 +39,15 @@ func (hh *HTTPHandler) GetCommands(c echo.Context) error { }) } +// CalculateReward calculates the reward for a node. +// @Summary Calculate reward +// @Description Calculate the reward based on the provided stake and number of days +// @Tags Calculate +// @Accept json +// @Produce json +// @Param request body CalculateRewardRequest true "Calculate Reward Request" +// @Success 200 {object} BasicResponse "Calculation result" +// @Router /calculate/reward [post]. func (hh *HTTPHandler) CalculateReward(c echo.Context) error { r := new(CalculateRewardRequest) if err := c.Bind(r); err != nil { @@ -53,6 +69,15 @@ func (hh *HTTPHandler) CalculateReward(c echo.Context) error { }) } +// CalculateFee calculates transaction fee based on provided amount. +// @Summary Calculate fee +// @Description Calculate the fee based on the provided amount +// @Tags Calculate +// @Accept json +// @Produce json +// @Param request body CalculateFeeRequest true "Calculate Fee Request" +// @Success 200 {object} BasicResponse "Calculation result" +// @Router /calculate/fee [post]. func (hh *HTTPHandler) CalculateFee(c echo.Context) error { r := new(CalculateFeeRequest) if err := c.Bind(r); err != nil { @@ -73,6 +98,14 @@ func (hh *HTTPHandler) CalculateFee(c echo.Context) error { }) } +// CalculateHelp returns help for calcuate commands. +// @Summary Calculate help information +// @Description Calculate help information +// @Tags Calculate +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse "Successful calculation result" +// @Router /calculate/help [get]. func (hh *HTTPHandler) CalculateHelp(c echo.Context) error { commands := []string{"calculate", "help"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -87,6 +120,13 @@ func (hh *HTTPHandler) CalculateHelp(c echo.Context) error { }) } +// NetworkHealth returns the health status. +// @Summary Get network health +// @Description Retrieve the health status of the network +// @Tags Network +// @Produce json +// @Success 200 {object} BasicResponse +// @Router /network/health [get]. func (hh *HTTPHandler) NetworkHealth(c echo.Context) error { commands := []string{"network", "health"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -101,6 +141,15 @@ func (hh *HTTPHandler) NetworkHealth(c echo.Context) error { }) } +// NetworkNodeInfo returns info of a node. +// @Summary Get Network Node Information +// @Description Retrieves information about a network node based on the provided validator address. +// @Tags Network +// @Accept json +// @Produce json +// @Param request body NetworkNodeInfoRequest true "Validator Address" +// @Success 200 {object} BasicResponse "The result of the command" +// @Router /network/node-info [post]. func (hh *HTTPHandler) NetworkNodeInfo(c echo.Context) error { r := new(NetworkNodeInfoRequest) if err := c.Bind(r); err != nil { @@ -121,6 +170,15 @@ func (hh *HTTPHandler) NetworkNodeInfo(c echo.Context) error { }) } +// NetworkNodeStatus returns status of network. +// @Summary Get network node status +// @Description Retrieves the current status of the network node. +// @Tags Network +// @Accept json +// @Produce json +// @Param body object true "Request body" +// @Success 200 {object} BasicResponse "Successfully retrieved network node status" +// @Router /network/status [post]. func (hh *HTTPHandler) NetworkNodeStatus(c echo.Context) error { commands := []string{"network", "status"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -135,6 +193,14 @@ func (hh *HTTPHandler) NetworkNodeStatus(c echo.Context) error { }) } +// NetworkHelp return help data for network commands. +// @Summary Provides network help information +// @Description Provides network help information +// @Tags Network +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse{result=CommandResult} "Successfully retrieved network help" +// @Router /network/help [get]. func (hh *HTTPHandler) NetworkHelp(c echo.Context) error { commands := []string{"network", "help"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -149,6 +215,15 @@ func (hh *HTTPHandler) NetworkHelp(c echo.Context) error { }) } +// VoucherClaim claim a voucher. +// @Summary Claim a voucher using the provided code and address. +// @Description This endpoint allows users to claim a voucher by providing a valid code and their address. +// @Tags Voucher +// @Accept json +// @Produce json +// @Param voucherClaimRequest body VoucherClaimRequest true "Voucher Claim Request" +// @Success 200 {object} BasicResponse "Voucher claim successful" +// @Router /voucher/claim [post]. func (hh *HTTPHandler) VoucherClaim(c echo.Context) error { r := new(VoucherClaimRequest) if err := c.Bind(r); err != nil { @@ -170,6 +245,15 @@ func (hh *HTTPHandler) VoucherClaim(c echo.Context) error { }) } +// VoucherCreateOne creates a voucher. +// @Summary Creates a new voucher +// @Description Creates a voucher. +// @Tags Voucher +// @Accept json +// @Produce json +// @Param voucherCreateOneRequest body VoucherCreateOneRequest true "Voucher details" +// @Success 200 {object} BasicResponse "Voucher creation successful" +// @Router /voucher/create-one [post]. func (hh *HTTPHandler) VoucherCreateOne(c echo.Context) error { r := new(VoucherCreateOneRequest) if err := c.Bind(r); err != nil { @@ -193,6 +277,16 @@ func (hh *HTTPHandler) VoucherCreateOne(c echo.Context) error { }) } +// VoucherCreateBulk crreates bulk vouchers. +// @Summary Create vouchers in bulk +// @Description This API allows you to create multiple vouchers. +// @Tags Voucher +// @Accept json +// @Produce json +// @Param file formData string true "File containing voucher data" +// @Param notify formData string false "Notification flag (optional)" +// @Success 200 {object} BasicResponse "Command result containing the status of the bulk voucher creation" +// @Router /vouchers/create-bulk [post]. func (hh *HTTPHandler) VoucherCreateBulk(c echo.Context) error { r := new(VoucherCreateBulkRequest) if err := c.Bind(r); err != nil { @@ -214,6 +308,15 @@ func (hh *HTTPHandler) VoucherCreateBulk(c echo.Context) error { }) } +// VoucherStatus checks the status of a voucher. +// @Summary Check the status of a voucher +// @Description Accepts a voucher code and returns the status of the voucher. +// @Tags Voucher +// @Accept json +// @Produce json +// @Param request body VoucherStatusRequest true "Voucher Status Request" +// @Success 200 {object} BasicResponse "Voucher status response" +// @Router /voucher/status [post]. func (hh *HTTPHandler) VoucherStatus(c echo.Context) error { r := new(VoucherStatusRequest) if err := c.Bind(r); err != nil { @@ -234,6 +337,14 @@ func (hh *HTTPHandler) VoucherStatus(c echo.Context) error { }) } +// VoucherHelp returns help information for the voucher command. +// @Summary Get help information for the voucher command +// @Description Executes the 'voucher help' command and returns the result. +// @Tags Voucher +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse "Successful response with command result" +// @Router /voucher/help [get]. func (hh *HTTPHandler) VoucherHelp(c echo.Context) error { commands := []string{"voucher", "help"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -248,6 +359,14 @@ func (hh *HTTPHandler) VoucherHelp(c echo.Context) error { }) } +// MarketPrice returns market price. +// @Summary Get Market Price +// @Description Retrieves the current market price information +// @Tags Market +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse{result=CommandResult} "Successfully retrieved the market price" +// @Router /market/price [get]. func (hh *HTTPHandler) MarketPrice(c echo.Context) error { commands := []string{"market", "price"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -262,6 +381,14 @@ func (hh *HTTPHandler) MarketPrice(c echo.Context) error { }) } +// MarketHelp returns market help information. +// @Summary Get help for the market command +// @Description This endpoint processes the "market help" command and returns relevant details about the market command. +// @Tags Market +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse{Result=CommandResult} "Command help details" +// @Router /market/help [get]. func (hh *HTTPHandler) MarketHelp(c echo.Context) error { commands := []string{"market", "help"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -276,6 +403,15 @@ func (hh *HTTPHandler) MarketHelp(c echo.Context) error { }) } +// PhoenixFaucet initiates a faucet request for the Phoenix. +// @Summary Initiates a faucet request for the Phoenix +// @Description Initiates a faucet request for the Phoenix +// @Tags Phoenix +// @Accept json +// @Produce json +// @Param request body PhoenixFaucetRequest true "Faucet request with address" +// @Success 200 {object} BasicResponse "Successful response containing faucet status" +// @Router /phoenix/faucet [post]. func (hh *HTTPHandler) PhoenixFaucet(c echo.Context) error { r := new(PhoenixFaucetRequest) if err := c.Bind(r); err != nil { @@ -296,6 +432,14 @@ func (hh *HTTPHandler) PhoenixFaucet(c echo.Context) error { }) } +// PhoenixStatus returns phonix status. +// @Summary Get Phoenix Status +// @Description Get Phoenix Status +// @Tags Phoenix +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse "Status of the Phoenix application" +// @Router /phoenix/status [get]. func (hh *HTTPHandler) PhoenixStatus(c echo.Context) error { commands := []string{"phoenix", "status"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -310,6 +454,14 @@ func (hh *HTTPHandler) PhoenixStatus(c echo.Context) error { }) } +// PhoenixHelp returns phoenix help information. +// @Summary Executes Phoenix help command +// @Description Executes Phoenix help command. +// @Tags Phoenix +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse{result=CommandResult} +// @Router /phoenix/help [get]. func (hh *HTTPHandler) PhoenixHelp(c echo.Context) error { commands := []string{"phoenix", "help"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) @@ -324,6 +476,14 @@ func (hh *HTTPHandler) PhoenixHelp(c echo.Context) error { }) } +// Help returns help information. +// @Summary Executes a help command and returns the result. +// @Description This endpoint runs the 'help' command and returns the result. +// @Tags Help +// @Accept json +// @Produce json +// @Success 200 {object} BasicResponse{Result=CommandResult} "Help command result" +// @Router /help [get]. func (hh *HTTPHandler) Help(c echo.Context) error { commands := []string{"help"} cmdResult := hh.engine.Run(entity.AppIDHTTP, c.RealIP(), commands, nil) diff --git a/vendor/github.com/KyleBanks/depth/.gitignore b/vendor/github.com/KyleBanks/depth/.gitignore new file mode 100644 index 00000000..8c2e171e --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +bin/ diff --git a/vendor/github.com/KyleBanks/depth/.travis.yml b/vendor/github.com/KyleBanks/depth/.travis.yml new file mode 100644 index 00000000..ab506f93 --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/.travis.yml @@ -0,0 +1,9 @@ +language: go +sudo: false +go: + - 1.9.x +before_install: + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +#script: go test $(go list ./... | grep -v vendor/) diff --git a/vendor/github.com/KyleBanks/depth/LICENSE b/vendor/github.com/KyleBanks/depth/LICENSE new file mode 100644 index 00000000..070b426b --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Kyle Banks + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/KyleBanks/depth/Makefile b/vendor/github.com/KyleBanks/depth/Makefile new file mode 100644 index 00000000..acd35bb4 --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/Makefile @@ -0,0 +1,32 @@ +VERSION = 1.2.1 + +RELEASE_PKG = ./cmd/depth +INSTALL_PKG = $(RELEASE_PKG) + + +# Remote includes require 'mmake' +# github.com/tj/mmake +include github.com/KyleBanks/make/go/install +include github.com/KyleBanks/make/go/sanity +include github.com/KyleBanks/make/go/release +include github.com/KyleBanks/make/go/bench +include github.com/KyleBanks/make/git/precommit + +# Runs a number of depth commands as examples of what's possible. +example: | install + depth github.com/KyleBanks/depth/cmd/depth strings ./ + + depth -internal strings + + depth -json github.com/KyleBanks/depth/cmd/depth + + depth -test github.com/KyleBanks/depth/cmd/depth + + depth -test -internal strings + + depth -test -internal -max 3 strings + + depth . + + depth ./cmd/depth +.PHONY: example diff --git a/vendor/github.com/KyleBanks/depth/README.md b/vendor/github.com/KyleBanks/depth/README.md new file mode 100644 index 00000000..0de954fd --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/README.md @@ -0,0 +1,232 @@ +# depth + +[![GoDoc](https://godoc.org/github.com/KyleBanks/depth?status.svg)](https://godoc.org/github.com/KyleBanks/depth)  +[![Build Status](https://travis-ci.org/KyleBanks/depth.svg?branch=master)](https://travis-ci.org/KyleBanks/depth)  +[![Go Report Card](https://goreportcard.com/badge/github.com/KyleBanks/depth)](https://goreportcard.com/report/github.com/KyleBanks/depth)  +[![Coverage Status](https://coveralls.io/repos/github/KyleBanks/depth/badge.svg?branch=master)](https://coveralls.io/github/KyleBanks/depth?branch=master) + +`depth` is tool to retrieve and visualize Go source code dependency trees. + +## Install + +Download the appropriate binary for your platform from the [Releases](https://github.com/KyleBanks/depth/releases) page, or: + +```sh +go get github.com/KyleBanks/depth/cmd/depth +``` + +## Usage + +`depth` can be used as a standalone command-line application, or as a package within your own project. + +### Command-Line + +Simply execute `depth` with one or more package names to visualize. You can use the fully qualified import path of the package, like so: + +```sh +$ depth github.com/KyleBanks/depth/cmd/depth +github.com/KyleBanks/depth/cmd/depth + ├ encoding/json + ├ flag + ├ fmt + ├ io + ├ log + ├ os + ├ strings + └ github.com/KyleBanks/depth + ├ fmt + ├ go/build + ├ path + ├ sort + └ strings +12 dependencies (11 internal, 1 external, 0 testing). +``` + +Or you can use a relative path, for example: + +```sh +$ depth . +$ depth ./cmd/depth +$ depth ../ +``` + +You can also use `depth` on the Go standard library: + +```sh +$ depth strings +strings + ├ errors + ├ io + ├ unicode + └ unicode/utf8 +5 dependencies (5 internal, 0 external, 0 testing). +``` + +Visualizing multiple packages at a time is supported by simply naming the packages you'd like to visualize: + +```sh +$ depth strings github.com/KyleBanks/depth +strings + ├ errors + ├ io + ├ unicode + └ unicode/utf8 +5 dependencies (5 internal, 0 external, 0 testing). +github.com/KyleBanks/depth + ├ fmt + ├ go/build + ├ path + ├ sort + └ strings +7 dependencies (7 internal, 0 external, 0 testing). +``` + +#### `-internal` + +By default, `depth` only resolves the top level of dependencies for standard library packages, however you can use the `-internal` flag to visualize all internal dependencies: + +```sh +$ depth -internal strings +strings + ├ errors + ├ io + ├ errors + └ sync + ├ internal/race + └ unsafe + ├ runtime + ├ runtime/internal/atomic + └ unsafe + ├ runtime/internal/sys + └ unsafe + ├ sync/atomic + └ unsafe + └ unsafe + ├ unicode + └ unicode/utf8 +12 dependencies (12 internal, 0 external, 0 testing). +``` + +#### `-max` + +The `-max` flag limits the dependency tree to the maximum depth provided. For example, if you supply `-max 1` on the `depth` package, your output would look like so: + +``` +$ depth -max 1 github.com/KyleBanks/depth/cmd/depth +github.com/KyleBanks/depth/cmd/depth + ├ encoding/json + ├ flag + ├ fmt + ├ io + ├ log + ├ os + ├ strings + └ github.com/KyleBanks/depth +7 dependencies (6 internal, 1 external, 0 testing). +``` + +The `-max` flag is particularly useful in conjunction with the `-internal` flag which can lead to very deep dependency trees. + +#### `-test` + +By default, `depth` ignores dependencies that are only required for testing. However, you can view test dependencies using the `-test` flag: + +```sh +$ depth -test strings +strings + ├ bytes + ├ errors + ├ fmt + ├ io + ├ io/ioutil + ├ math/rand + ├ reflect + ├ sync + ├ testing + ├ unicode + ├ unicode/utf8 + └ unsafe +13 dependencies (13 internal, 0 external, 8 testing). +``` + +#### `-explain target-package` + +The `-explain` flag instructs `depth` to print import chains in which the +`target-package` is found: + +```sh +$ depth -explain strings github.com/KyleBanks/depth/cmd/depth +github.com/KyleBanks/depth/cmd/depth -> strings +github.com/KyleBanks/depth/cmd/depth -> github.com/KyleBanks/depth -> strings +``` + +#### `-json` + +The `-json` flag instructs `depth` to output dependencies in JSON format: + +```sh +$ depth -json github.com/KyleBanks/depth/cmd/depth +{ + "name": "github.com/KyleBanks/depth/cmd/depth", + "deps": [ + { + "name": "encoding/json", + "internal": true, + "deps": null + }, + ... + { + "name": "github.com/KyleBanks/depth", + "internal": false, + "deps": [ + { + "name": "go/build", + "internal": true, + "deps": null + }, + ... + ] + } + ] +} +``` + +### Integrating With Your Project + +The `depth` package can easily be used to retrieve the dependency tree for a particular package in your own project. For example, here's how you would retrieve the dependency tree for the `strings` package: + +```go +import "github.com/KyleBanks/depth" + +var t depth.Tree +err := t.Resolve("strings") +if err != nil { + log.Fatal(err) +} + +// Output: "'strings' has 4 dependencies." +log.Printf("'%v' has %v dependencies.", t.Root.Name, len(t.Root.Deps)) +``` + +For additional customization, simply set the appropriate flags on the `Tree` before resolving: + +```go +import "github.com/KyleBanks/depth" + +t := depth.Tree { + ResolveInternal: true, + ResolveTest: true, + MaxDepth: 10, +} + + +err := t.Resolve("strings") +``` + +## Author + +`depth` was developed by [Kyle Banks](https://twitter.com/kylewbanks). + +## License + +`depth` is available under the [MIT](./LICENSE) license. diff --git a/vendor/github.com/KyleBanks/depth/depth.go b/vendor/github.com/KyleBanks/depth/depth.go new file mode 100644 index 00000000..115c28ac --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/depth.go @@ -0,0 +1,129 @@ +// Package depth provides the ability to traverse and retrieve Go source code dependencies in the form of +// internal and external packages. +// +// For example, the dependencies of the stdlib `strings` package can be resolved like so: +// +// import "github.com/KyleBanks/depth" +// +// var t depth.Tree +// err := t.Resolve("strings") +// if err != nil { +// log.Fatal(err) +// } +// +// // Output: "strings has 4 dependencies." +// log.Printf("%v has %v dependencies.", t.Root.Name, len(t.Root.Deps)) +// +// For additional customization, simply set the appropriate flags on the `Tree` before resolving: +// +// import "github.com/KyleBanks/depth" +// +// t := depth.Tree { +// ResolveInternal: true, +// ResolveTest: true, +// MaxDepth: 10, +// } +// err := t.Resolve("strings") +package depth + +import ( + "errors" + "go/build" + "os" +) + +// ErrRootPkgNotResolved is returned when the root Pkg of the Tree cannot be resolved, +// typically because it does not exist. +var ErrRootPkgNotResolved = errors.New("unable to resolve root package") + +// Importer defines a type that can import a package and return its details. +type Importer interface { + Import(name, srcDir string, im build.ImportMode) (*build.Package, error) +} + +// Tree represents the top level of a Pkg and the configuration used to +// initialize and represent its contents. +type Tree struct { + Root *Pkg + + ResolveInternal bool + ResolveTest bool + MaxDepth int + + Importer Importer + + importCache map[string]struct{} +} + +// Resolve recursively finds all dependencies for the root Pkg name provided, +// and the packages it depends on. +func (t *Tree) Resolve(name string) error { + pwd, err := os.Getwd() + if err != nil { + return err + } + + t.Root = &Pkg{ + Name: name, + Tree: t, + SrcDir: pwd, + Test: false, + } + + // Reset the import cache each time to ensure a reused Tree doesn't + // reuse the same cache. + t.importCache = nil + + // Allow custom importers, but use build.Default if none is provided. + if t.Importer == nil { + t.Importer = &build.Default + } + + t.Root.Resolve(t.Importer) + if !t.Root.Resolved { + return ErrRootPkgNotResolved + } + + return nil +} + +// shouldResolveInternal determines if internal packages should be further resolved beyond the +// current parent. +// +// For example, if the parent Pkg is `github.com/foo/bar` and true is returned, all the +// internal dependencies it relies on will be resolved. If for example `strings` is one of those +// dependencies, and it is passed as the parent here, false may be returned and its internal +// dependencies will not be resolved. +func (t *Tree) shouldResolveInternal(parent *Pkg) bool { + if t.ResolveInternal { + return true + } + + return parent == t.Root +} + +// isAtMaxDepth returns true when the depth of the Pkg provided is at or beyond the maximum +// depth allowed by the tree. +// +// If the Tree has a MaxDepth of zero, true is never returned. +func (t *Tree) isAtMaxDepth(p *Pkg) bool { + if t.MaxDepth == 0 { + return false + } + + return p.depth() >= t.MaxDepth +} + +// hasSeenImport returns true if the import name provided has already been seen within the tree. +// This function only returns false for a name once. +func (t *Tree) hasSeenImport(name string) bool { + if t.importCache == nil { + t.importCache = make(map[string]struct{}) + } + + if _, ok := t.importCache[name]; ok { + return true + } + t.importCache[name] = struct{}{} + return false +} diff --git a/vendor/github.com/KyleBanks/depth/pkg.go b/vendor/github.com/KyleBanks/depth/pkg.go new file mode 100644 index 00000000..1d54d717 --- /dev/null +++ b/vendor/github.com/KyleBanks/depth/pkg.go @@ -0,0 +1,184 @@ +package depth + +import ( + "bytes" + "go/build" + "path" + "sort" + "strings" +) + +// Pkg represents a Go source package, and its dependencies. +type Pkg struct { + Name string `json:"name"` + SrcDir string `json:"-"` + + Internal bool `json:"internal"` + Resolved bool `json:"resolved"` + Test bool `json:"-"` + + Tree *Tree `json:"-"` + Parent *Pkg `json:"-"` + Deps []Pkg `json:"deps"` + + Raw *build.Package `json:"-"` +} + +// Resolve recursively finds all dependencies for the Pkg and the packages it depends on. +func (p *Pkg) Resolve(i Importer) { + // Resolved is always true, regardless of if we skip the import, + // it is only false if there is an error while importing. + p.Resolved = true + + name := p.cleanName() + if name == "" { + return + } + + // Stop resolving imports if we've reached max depth or found a duplicate. + var importMode build.ImportMode + if p.Tree.hasSeenImport(name) || p.Tree.isAtMaxDepth(p) { + importMode = build.FindOnly + } + + pkg, err := i.Import(name, p.SrcDir, importMode) + if err != nil { + // TODO: Check the error type? + p.Resolved = false + return + } + p.Raw = pkg + + // Update the name with the fully qualified import path. + p.Name = pkg.ImportPath + + // If this is an internal dependency, we may need to skip it. + if pkg.Goroot { + p.Internal = true + if !p.Tree.shouldResolveInternal(p) { + return + } + } + + //first we set the regular dependencies, then we add the test dependencies + //sharing the same set. This allows us to mark all test-only deps linearly + unique := make(map[string]struct{}) + p.setDeps(i, pkg.Imports, pkg.Dir, unique, false) + if p.Tree.ResolveTest { + p.setDeps(i, append(pkg.TestImports, pkg.XTestImports...), pkg.Dir, unique, true) + } +} + +// setDeps takes a slice of import paths and the source directory they are relative to, +// and creates the Deps of the Pkg. Each dependency is also further resolved prior to being added +// to the Pkg. +func (p *Pkg) setDeps(i Importer, imports []string, srcDir string, unique map[string]struct{}, isTest bool) { + for _, imp := range imports { + // Mostly for testing files where cyclic imports are allowed. + if imp == p.Name { + continue + } + + // Skip duplicates. + if _, ok := unique[imp]; ok { + continue + } + unique[imp] = struct{}{} + + p.addDep(i, imp, srcDir, isTest) + } + + sort.Sort(byInternalAndName(p.Deps)) +} + +// addDep creates a Pkg and it's dependencies from an imported package name. +func (p *Pkg) addDep(i Importer, name string, srcDir string, isTest bool) { + dep := Pkg{ + Name: name, + SrcDir: srcDir, + Tree: p.Tree, + Parent: p, + Test: isTest, + } + dep.Resolve(i) + + p.Deps = append(p.Deps, dep) +} + +// isParent goes recursively up the chain of Pkgs to determine if the name provided is ever a +// parent of the current Pkg. +func (p *Pkg) isParent(name string) bool { + if p.Parent == nil { + return false + } + + if p.Parent.Name == name { + return true + } + + return p.Parent.isParent(name) +} + +// depth returns the depth of the Pkg within the Tree. +func (p *Pkg) depth() int { + if p.Parent == nil { + return 0 + } + + return p.Parent.depth() + 1 +} + +// cleanName returns a cleaned version of the Pkg name used for resolving dependencies. +// +// If an empty string is returned, dependencies should not be resolved. +func (p *Pkg) cleanName() string { + name := p.Name + + // C 'package' cannot be resolved. + if name == "C" { + return "" + } + + // Internal golang_org/* packages must be prefixed with vendor/ + // + // Thanks to @davecheney for this: + // https://github.com/davecheney/graphpkg/blob/master/main.go#L46 + if strings.HasPrefix(name, "golang_org") { + name = path.Join("vendor", name) + } + + return name +} + +// String returns a string representation of the Pkg containing the Pkg name and status. +func (p *Pkg) String() string { + b := bytes.NewBufferString(p.Name) + + if !p.Resolved { + b.Write([]byte(" (unresolved)")) + } + + return b.String() +} + +// byInternalAndName ensures a slice of Pkgs are sorted such that the internal stdlib +// packages are always above external packages (ie. github.com/whatever). +type byInternalAndName []Pkg + +func (b byInternalAndName) Len() int { + return len(b) +} + +func (b byInternalAndName) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byInternalAndName) Less(i, j int) bool { + if b[i].Internal && !b[j].Internal { + return true + } else if !b[i].Internal && b[j].Internal { + return false + } + + return b[i].Name < b[j].Name +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000..e256a31e --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000..0e9d6edc --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000..7805d36d --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000..0200f75b --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000..58600740 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000..4fb4054a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 00000000..3152da69 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 00000000..769c2440 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 00000000..22f8d21c --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 00000000..0108f1d5 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,19 @@ +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) + +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 00000000..d970c7cf --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,531 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() +var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (any, error) +} + +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, any) error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document any, value any) (any, error) { + return document, p.set(document, value, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document any, decodedToken string, value any) (any, error) { + return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) +} + +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } + + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) + } + + switch kind { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + switch rValue.Kind() { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node = r + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return errors.New("only structs, pointers, maps and slices are supported for setting values") + } + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + + switch kind { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q", decodedToken) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + + } + + return nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + } + } + + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return err + } + case '[': + if err = drainSingle(dec); err != nil { + return err + } + } + } + } + + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) + return step2 +} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 00000000..769c2440 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml new file mode 100644 index 00000000..22f8d21c --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 00000000..c7fc2049 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,19 @@ +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) + +An implementation of JSON Reference - Go language + +## Status +Feature complete. Stable API + +## Dependencies +* https://github.com/go-openapi/jsonpointer + +## References + +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 00000000..f0610cf1 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,69 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 00000000..cfdef03e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,158 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + internal.NormalizeURL(parsed) + + r.referenceURL = parsed + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 00000000..3152da69 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 00000000..f47cb204 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1 @@ +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 00000000..22f8d21c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 00000000..7fd2810c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,54 @@ +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 00000000..122993b4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,98 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]interface{}, len(s.store)) + s.lock.RLock() + for k, v := range s.store { + store[k] = v + } + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 00000000..2f7bb219 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 00000000..fc889f6d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 00000000..1f428475 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,17 @@ +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 00000000..6992c7ba --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,19 @@ +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 00000000..b81a5699 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,607 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" +) + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, 10) + parentRefs = append(parentRefs, "#/definitions/"+key) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + + if root == nil { + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]interface{}{} + } + + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, 10) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, sch, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil && sch == nil { // nothing to do + return nil + } + + parentRefs := make([]string, 0, 10) + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) + } + + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + } + } + + // $ref expansion or rebasing is performed by expandSchema below + if ref != nil { + *ref = Ref{} + } + + // expand schema + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 00000000..88add91b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 00000000..9dfd17b1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,203 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 00000000..582f0fd4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,184 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 00000000..e2afb213 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,234 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 00000000..b42f8036 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 00000000..e8b60099 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if strings.HasPrefix(u.Path, docPath) { + newBase.Path = strings.TrimPrefix(u.Path, docPath) + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 00000000..f19f1a8f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(_ *url.URL, _ string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 00000000..a66c532d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,154 @@ +// -build windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 00000000..a69cca88 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,400 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 00000000..bd4f1cdb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,326 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 00000000..68fc8e90 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 00000000..9dc82a29 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 00000000..91d2435f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,91 @@ +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Name string + Schema +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 00000000..b0ef9bd9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,193 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 00000000..47d1ee13 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,127 @@ +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag" +) + +func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + newSch := new(Schema) + if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 00000000..0340b60d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` + }{ + Description: r.ResponseProps.Description, + Schema: r.ResponseProps.Schema, + Examples: r.ResponseProps.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 00000000..16c3076f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 00000000..4e9be857 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,645 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 00000000..0059b99a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,331 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res interface{} + data interface{} + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + data, fromCache := r.cache.Get(normalized) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc interface{} + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 00000000..bcbb8474 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 00000000..ebe10ed3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 00000000..9d0bdae9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,170 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 00000000..876aa127 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 00000000..1590fd17 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) > 0 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !bytes.Equal(data, []byte("false")) + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 00000000..faa3d3de --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 00000000..5bdfe40b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,11 @@ +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 00000000..6360a8ea --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,215 @@ +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value interface{} +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, interface{})) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 5) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 00000000..945a4670 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig new file mode 100644 index 00000000..3152da69 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 00000000..49ad5276 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore new file mode 100644 index 00000000..c4b1b64f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +vendor +Godeps +.idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml new file mode 100644 index 00000000..80e2be00 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -0,0 +1,60 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 3 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 00000000..e7f28ed6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md new file mode 100644 index 00000000..a7292229 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/README.md @@ -0,0 +1,23 @@ +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) + +Contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + +* convert between value and pointers for builtin types +* convert from string to builtin types (wraps strconv) +* fast json concatenation +* search in path +* load from file or http +* name mangling + + +This repo has only few dependencies outside of the standard library: + +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go new file mode 100644 index 00000000..fc085aeb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -0,0 +1,208 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 +) + +// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + fa := math.Abs(f) + g := float64(uint64(f)) + ga := math.Abs(g) + + diff := math.Abs(f - g) + + // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases + switch { + case f == g: // best case + return true + case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case + return true + case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values + return diff < (epsilon * math.SmallestNonzeroFloat64) + } + // check the relative error + return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon +} + +var evaluatesAsTrue map[string]struct{} + +func init() { + evaluatesAsTrue = map[string]struct{}{ + "true": {}, + "1": {}, + "yes": {}, + "ok": {}, + "y": {}, + "on": {}, + "selected": {}, + "checked": {}, + "t": {}, + "enabled": {}, + } +} + +// ConvertBool turn a string into a boolean +func ConvertBool(str string) (bool, error) { + _, ok := evaluatesAsTrue[strings.ToLower(str)] + return ok, nil +} + +// ConvertFloat32 turn a string into a float32 +func ConvertFloat32(str string) (float32, error) { + f, err := strconv.ParseFloat(str, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// ConvertFloat64 turn a string into a float64 +func ConvertFloat64(str string) (float64, error) { + return strconv.ParseFloat(str, 64) +} + +// ConvertInt8 turn a string into an int8 +func ConvertInt8(str string) (int8, error) { + i, err := strconv.ParseInt(str, 10, 8) + if err != nil { + return 0, err + } + return int8(i), nil +} + +// ConvertInt16 turn a string into an int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turn a string into an int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turn a string into an int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turn a string into an uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turn a string into an uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turn a string into an uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turn a string into an uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} + +// FormatBool turns a boolean into a string +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} + +// FormatFloat32 turns a float32 into a string +func FormatFloat32(value float32) string { + return strconv.FormatFloat(float64(value), 'f', -1, 32) +} + +// FormatFloat64 turns a float64 into a string +func FormatFloat64(value float64) string { + return strconv.FormatFloat(value, 'f', -1, 64) +} + +// FormatInt8 turns an int8 into a string +func FormatInt8(value int8) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt16 turns an int16 into a string +func FormatInt16(value int16) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt32 turns an int32 into a string +func FormatInt32(value int32) string { + return strconv.Itoa(int(value)) +} + +// FormatInt64 turns an int64 into a string +func FormatInt64(value int64) string { + return strconv.FormatInt(value, 10) +} + +// FormatUint8 turns an uint8 into a string +func FormatUint8(value uint8) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint16 turns an uint16 into a string +func FormatUint16(value uint16) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint32 turns an uint32 into a string +func FormatUint32(value uint32) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint64 turns an uint64 into a string +func FormatUint64(value uint64) string { + return strconv.FormatUint(value, 10) +} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go new file mode 100644 index 00000000..c49cc473 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -0,0 +1,730 @@ +package swag + +import "time" + +// This file was taken from the aws go sdk + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to of the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to of the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Uint returns a pointer to of the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values into a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers into a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values into a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers into a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to of the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to of the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to of the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 00000000..55094cb7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,31 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling + +This repo has only few dependencies outside of the standard library: + + - YAML utilities depend on gopkg.in/yaml.v2 +*/ +package swag diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 00000000..16accc55 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 00000000..20a359bb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go new file mode 100644 index 00000000..7e9902ca --- /dev/null +++ b/vendor/github.com/go-openapi/swag/json.go @@ -0,0 +1,312 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "bytes" + "encoding/json" + "log" + "reflect" + "strings" + "sync" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + +// DefaultJSONNameProvider the default cache for types +var DefaultJSONNameProvider = NewNameProvider() + +const comma = byte(',') + +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } +} + +type ejMarshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +type ejUnmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler +// so it takes the fastest option available. +func WriteJSON(data interface{}) ([]byte, error) { + if d, ok := data.(ejMarshaler); ok { + jw := new(jwriter.Writer) + d.MarshalEasyJSON(jw) + return jw.BuildBytes() + } + if d, ok := data.(json.Marshaler); ok { + return d.MarshalJSON() + } + return json.Marshal(data) +} + +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler +// so it takes the fastest option available +func ReadJSON(data []byte, value interface{}) error { + trimmedData := bytes.Trim(data, "\x00") + if d, ok := value.(ejUnmarshaler); ok { + jl := &jlexer.Lexer{Data: trimmedData} + d.UnmarshalEasyJSON(jl) + return jl.Error() + } + if d, ok := value.(json.Unmarshaler); ok { + return d.UnmarshalJSON(trimmedData) + } + return json.Unmarshal(trimmedData, value) +} + +// DynamicJSONToStruct converts an untyped json structure into a struct +func DynamicJSONToStruct(data interface{}, target interface{}) error { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := WriteJSON(data) + if err != nil { + return err + } + return ReadJSON(b, target) +} + +// ConcatJSON concatenates multiple json objects efficiently +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last-- + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { + return blobs[0] + } + + var opening, closing byte + var idx, a int + buf := bytes.NewBuffer(nil) + + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + if len(b) < 3 { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + if err := buf.WriteByte(comma); err != nil { + log.Println(err) + } + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + if _, err := buf.Write(b[idx : len(b)-1]); err != nil { + log.Println(err) + } + } else { // last one, strip only the leading bracket + if _, err := buf.Write(b[idx:]); err != nil { + log.Println(err) + } + } + a++ + } + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 { + if err := buf.WriteByte(opening); err != nil { + log.Println(err) + } + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } + } + return buf.Bytes() +} + +// ToDynamicJSON turns an object into a properly JSON typed structure +func ToDynamicJSON(data interface{}) interface{} { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } + var res interface{} + if err := json.Unmarshal(b, &res); err != nil { + log.Println(err) + } + return res +} + +// FromDynamicJSON turns an object into a properly JSON typed structure +func FromDynamicJSON(data, target interface{}) error { + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } + return json.Unmarshal(b, target) +} + +// NameProvider represents an object capable of translating from go property names +// to json property names +// This type is thread-safe. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject interface{}) []string { + n.lock.Lock() + defer n.lock.Unlock() + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + res := make([]string, 0, len(names.jsonNames)) + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go new file mode 100644 index 00000000..783442fd --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -0,0 +1,176 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "time" +) + +// LoadHTTPTimeout the default timeout for load requests +var LoadHTTPTimeout = 30 * time.Second + +// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth +var LoadHTTPBasicAuthUsername = "" + +// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth +var LoadHTTPBasicAuthPassword = "" + +// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests +var LoadHTTPCustomHeaders = map[string]string{} + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) +} + +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { + return remote + } + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) + if err != nil { + return nil, err + } + + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) + } + } + + return local(filepath.FromSlash(upth)) + } +} + +func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { + return func(path string) ([]byte, error) { + client := &http.Client{Timeout: timeout} + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx + if err != nil { + return nil, err + } + + if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" { + req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword) + } + + for key, val := range LoadHTTPCustomHeaders { + req.Header.Set(key, val) + } + + resp, err := client.Do(req) + defer func() { + if resp != nil { + if e := resp.Body.Close(); e != nil { + log.Println(e) + } + } + }() + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + } + + return io.ReadAll(resp.Body) + } +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go new file mode 100644 index 00000000..8bb64ac3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -0,0 +1,93 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "unicode" + "unicode/utf8" +) + +type ( + lexemKind uint8 + + nameLexem struct { + original string + matchedInitialism string + kind lexemKind + } +) + +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName +) + +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, + original: original, + } +} + +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) + + for i, orig := range l.original { + if i == 0 { + first = orig + continue + } + + if i > 0 { + rest = l.original[i:] + break + } + } + + if len(l.original) > 1 { + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() + } + + return l.original +} + +func (l nameLexem) GetOriginal() string { + return l.original +} + +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName +} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go new file mode 100644 index 00000000..821235f8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/net.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// The port is -1 when there is no port to be found +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go new file mode 100644 index 00000000..941bd017 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/path.go @@ -0,0 +1,59 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +const ( + // GOPATHKey represents the env key for gopath + GOPATHKey = "GOPATH" +) + +// FindInSearchPath finds a package in a provided lists of paths +func FindInSearchPath(searchPath, pkg string) string { + pathsList := filepath.SplitList(searchPath) + for _, path := range pathsList { + if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { + if _, err := os.Stat(evaluatedPath); err == nil { + return evaluatedPath + } + } + } + return "" +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +func FindInGoSearchPath(pkg string) string { + return FindInSearchPath(FullGoSearchPath(), pkg) +} + +// FullGoSearchPath gets the search paths for finding packages +func FullGoSearchPath() string { + allPaths := os.Getenv(GOPATHKey) + if allPaths == "" { + allPaths = filepath.Join(os.Getenv("HOME"), "go") + } + if allPaths != "" { + allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") + } else { + allPaths = runtime.GOROOT() + } + return allPaths +} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go new file mode 100644 index 00000000..274727a8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/split.go @@ -0,0 +1,508 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "bytes" + "sync" + "unicode" + "unicode/utf8" +) + +type ( + splitter struct { + initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool + } + + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } +) + +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options +func split(str string) []string { + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) + + for _, lexem := range *lexems { + result = append(result, lexem.GetOriginal()) + } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) + + return result + +} + +func newSplitter(options ...splitterOption) splitter { + s := splitter{ + postSplitInitialismCheck: false, + initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, + } + + for _, option := range options { + option(&s) + } + + return s +} + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) { + s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) + } + + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { + nameRunes := []rune(name) + matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + + return s.mapMatchesToNameLexems(nameRunes, matches) +} + +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches + + for currentRunePosition, currentRune := range nameRunes { + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() + + // check current initialism matches + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } + + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } + + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + *newMatches = append(*newMatches, match) + } + } + + // check for new initialism matches + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ + start: currentRunePosition, + body: initialismRunes, + complete: false, + }) + } + } + + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } + matches = newMatches + } + + // up to the caller to redeem this last slice + return matches +} + +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() + + var lastAcceptedMatch initialismMatch + for _, match := range *matches { + if !match.complete { + continue + } + + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + + continue + } + + if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { + continue + } + + middle := nameRunes[lastAcceptedMatch.end+1 : match.start] + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + } + + // we have not found any accepted matches + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + s.appendBrokenDownCasualString(nameLexems, rest) + } + + poolOfMatches.RedeemMatches(matches) + + return nameLexems +} + +func (s splitter) breakInitialism(original string) nameLexem { + return newInitialismNameLexem(original, original) +} + +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() + + addCasualNameLexem := func(original string) { + *segments = append(*segments, newCasualNameLexem(original)) + } + + addInitialismNameLexem := func(original, match string) { + *segments = append(*segments, newInitialismNameLexem(original, match)) + } + + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + + return + } + } + + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem + } + + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() + } + + if replace != "" { + addNameLexem(replace) + } + + continue + } + + if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() + } + + continue + } + + if unicode.IsUpper(rn) { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + } + currentSegment.Reset() + } + + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } + + i += size + } + + return true +} diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 00000000..90745d5c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go new file mode 100644 index 00000000..5051401c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/util.go @@ -0,0 +1,364 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// The prefix function is assumed to return a string that starts with an upper case letter. +// +// e.g. to help convert "123" into "{prefix}123" +// +// The default is to prefix with "X" +var GoNamePrefixFunc func(string) string + +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in + } + + return GoNamePrefixFunc(name) + in +} + +const ( + // collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return data + default: + sep = "," + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return nil + default: + sep = "," + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} + +// Removes leading whitespaces +func trim(str string) string { + return strings.TrimSpace(str) +} + +// Shortcut to strings.ToUpper() +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// Shortcut to strings.ToLower() +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// Camelize an uppercased word +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + + for pos, ru := range []rune(word) { + if pos > 0 { + camelized.WriteRune(unicode.ToLower(ru)) + } else { + camelized.WriteRune(unicode.ToUpper(ru)) + } + } + return camelized.String() +} + +// ToFileName lowercases and underscores a go type name +func ToFileName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + + return strings.Join(out, "_") +} + +// ToCommandName lowercases and underscores a go type name +func ToCommandName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human series of words +func ToHumanNameLower(name string) string { + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) + + for _, w := range *in { + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) + } else { + out = append(out, trim(w.GetOriginal())) + } + } + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized +func ToHumanNameTitle(name string) string { + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) + if !w.IsInitialism() { + out = append(out, Camelize(original)) + } else { + out = append(out, original) + } + } + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToJSONName camelcases a name which can be underscored or pascal cased +func ToJSONName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for i, w := range in { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, Camelize(trim(w))) + } + return strings.Join(out, "") +} + +// ToVarName camelcases a name which can be underscored or pascal cased +func ToVarName(name string) string { + res := ToGoName(name) + if isInitialism(res) { + return lower(res) + } + if len(res) <= 1 { + return lower(res) + } + return lower(res[:1]) + res[1:] +} + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes +func ToGoName(name string) string { + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } + + for _, lexem := range lexemes[1:] { + goName := lexem.GetUnsafeGoName() + + // to support old behavior + if lexem.IsInitialism() { + goName = upper(goName) + } + result.WriteString(goName) + } + + return result.String() +} + +// ContainsStrings searches a slice of strings for a case-sensitive match +func ContainsStrings(coll []string, item string) bool { + for _, a := range coll { + if a == item { + return true + } + } + return false +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + for _, a := range coll { + if strings.EqualFold(a, item) { + return true + } + } + return false +} + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + + // continue with slightly more complex reflection + switch v.Kind() { //nolint:exhaustive + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + default: + return false + } +} + +// CommandLineOptionsGroup represents a group of user-defined command line options +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options interface{} +} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go new file mode 100644 index 00000000..f59e0259 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -0,0 +1,481 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "encoding/json" + "errors" + "fmt" + "path/filepath" + "reflect" + "sort" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" + yaml "gopkg.in/yaml.v3" +) + +// YAMLMatcher matches yaml +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +func YAMLToJSON(data interface{}) (json.RawMessage, error) { + jm, err := transformData(data) + if err != nil { + return nil, err + } + b, err := WriteJSON(jm) + return json.RawMessage(b), err +} + +// BytesToYAMLDoc converts a byte slice into a YAML document +func BytesToYAMLDoc(data []byte) (interface{}, error) { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, errors.New("only YAML documents that are objects are supported") + } + return &document, nil +} + +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil //nolint:nilnil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) + } +} + +// JSONMapSlice represent a JSON object, with the order of keys maintained +type JSONMapSlice []JSONMapItem + +// MarshalJSON renders a JSONMapSlice as JSON +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON +func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { + w.RawByte('{') + + ln := len(s) + last := ln - 1 + for i := 0; i < ln; i++ { + s[i].MarshalEasyJSON(w) + if i != last { // last item + w.RawByte(',') + } + } + + w.RawByte('}') +} + +// UnmarshalJSON makes a JSONMapSlice from JSON +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON +func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { + if in.IsNull() { + in.Skip() + return + } + + var result JSONMapSlice + in.Delim('{') + for !in.IsDelim('}') { + var mi JSONMapItem + mi.UnmarshalEasyJSON(in) + result = append(result, mi) + } + *s = result +} + +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) + } +} + +// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice +type JSONMapItem struct { + Key string + Value interface{} +} + +// MarshalJSON renders a JSONMapItem as JSON +func (s JSONMapItem) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON +func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { + w.String(s.Key) + w.RawByte(':') + w.Raw(WriteJSON(s.Value)) +} + +// UnmarshalJSON makes a JSONMapItem from JSON +func (s *JSONMapItem) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON +func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { + key := in.UnsafeString() + in.WantColon() + value := in.Interface() + in.WantComma() + s.Key = key + s.Value = value +} + +func transformData(input interface{}) (out interface{}, err error) { + format := func(t interface{}) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T", k) + } + } + + switch in := input.(type) { + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) + case map[interface{}]interface{}: + o := make(JSONMapSlice, 0, len(in)) + for ke, va := range in { + var nmi JSONMapItem + if nmi.Key, err = format(ke); err != nil { + return nil, err + } + + v, ert := transformData(va) + if ert != nil { + return nil, ert + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []interface{}: + len1 := len(in) + o := make([]interface{}, len1) + for i := 0; i < len1; i++ { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json +func YAMLDoc(path string) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path) + if err != nil { + return nil, err + } + + data, err := YAMLToJSON(yamlDoc) + if err != nil { + return nil, err + } + + return data, nil +} + +// YAMLData loads a yaml document from either http or a file +func YAMLData(path string) (interface{}, error) { + data, err := LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + + return BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md new file mode 100644 index 00000000..ffc44b21 --- /dev/null +++ b/vendor/github.com/josharian/intern/README.md @@ -0,0 +1,5 @@ +Docs: https://godoc.org/github.com/josharian/intern + +See also [Go issue 5160](https://golang.org/issue/5160). + +License: MIT diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go new file mode 100644 index 00000000..7acb1fe9 --- /dev/null +++ b/vendor/github.com/josharian/intern/intern.go @@ -0,0 +1,44 @@ +// Package intern interns strings. +// Interning is best effort only. +// Interned strings may be removed automatically +// at any time without notification. +// All functions may be called concurrently +// with themselves and each other. +package intern + +import "sync" + +var ( + pool sync.Pool = sync.Pool{ + New: func() interface{} { + return make(map[string]string) + }, + } +) + +// String returns s, interned. +func String(s string) string { + m := pool.Get().(map[string]string) + c, ok := m[s] + if ok { + pool.Put(m) + return c + } + m[s] = s + pool.Put(m) + return s +} + +// Bytes returns b converted to a string, interned. +func Bytes(b []byte) string { + m := pool.Get().(map[string]string) + c, ok := m[string(b)] + if ok { + pool.Put(m) + return c + } + s := string(b) + m[s] = s + pool.Put(m) + return s +} diff --git a/vendor/github.com/josharian/intern/license.md b/vendor/github.com/josharian/intern/license.md new file mode 100644 index 00000000..353d3055 --- /dev/null +++ b/vendor/github.com/josharian/intern/license.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/labstack/echo/v4/CHANGELOG.md b/vendor/github.com/labstack/echo/v4/CHANGELOG.md index de3857ff..55f24418 100644 --- a/vendor/github.com/labstack/echo/v4/CHANGELOG.md +++ b/vendor/github.com/labstack/echo/v4/CHANGELOG.md @@ -1,5 +1,45 @@ # Changelog +## v4.13.1 - 2024-12-11 + +**Fixes** + +* Fix BindBody ignoring `Transfer-Encoding: chunked` requests by @178inaba in https://github.com/labstack/echo/pull/2717 + + + +## v4.13.0 - 2024-12-04 + +**BREAKING CHANGE** JWT Middleware Removed from Core use [labstack/echo-jwt](https://github.com/labstack/echo-jwt) instead + +The JWT middleware has been **removed from Echo core** due to another security vulnerability, [CVE-2024-51744](https://nvd.nist.gov/vuln/detail/CVE-2024-51744). For more details, refer to issue [#2699](https://github.com/labstack/echo/issues/2699). A drop-in replacement is available in the [labstack/echo-jwt](https://github.com/labstack/echo-jwt) repository. + +**Important**: Direct assignments like `token := c.Get("user").(*jwt.Token)` will now cause a panic due to an invalid cast. Update your code accordingly. Replace the current imports from `"github.com/golang-jwt/jwt"` in your handlers to the new middleware version using `"github.com/golang-jwt/jwt/v5"`. + + +Background: + +The version of `golang-jwt/jwt` (v3.2.2) previously used in Echo core has been in an unmaintained state for some time. This is not the first vulnerability affecting this library; earlier issues were addressed in [PR #1946](https://github.com/labstack/echo/pull/1946). +JWT middleware was marked as deprecated in Echo core as of [v4.10.0](https://github.com/labstack/echo/releases/tag/v4.10.0) on 2022-12-27. If you did not notice that, consider leveraging tools like [Staticcheck](https://staticcheck.dev/) to catch such deprecations earlier in you dev/CI flow. For bonus points - check out [gosec](https://github.com/securego/gosec). + +We sincerely apologize for any inconvenience caused by this change. While we strive to maintain backward compatibility within Echo core, recurring security issues with third-party dependencies have forced this decision. + +**Enhancements** + +* remove jwt middleware by @stevenwhitehead in https://github.com/labstack/echo/pull/2701 +* optimization: struct alignment by @behnambm in https://github.com/labstack/echo/pull/2636 +* bind: Maintain backwards compatibility for map[string]interface{} binding by @thesaltree in https://github.com/labstack/echo/pull/2656 +* Add Go 1.23 to CI by @aldas in https://github.com/labstack/echo/pull/2675 +* improve `MultipartForm` test by @martinyonatann in https://github.com/labstack/echo/pull/2682 +* `bind` : add support of multipart multi files by @martinyonatann in https://github.com/labstack/echo/pull/2684 +* Add TemplateRenderer struct to ease creating renderers for `html/template` and `text/template` packages. by @aldas in https://github.com/labstack/echo/pull/2690 +* Refactor TestBasicAuth to utilize table-driven test format by @ErikOlson in https://github.com/labstack/echo/pull/2688 +* Remove broken header by @aldas in https://github.com/labstack/echo/pull/2705 +* fix(bind body): content-length can be -1 by @phamvinhdat in https://github.com/labstack/echo/pull/2710 +* CORS middleware should compile allowOrigin regexp at creation by @aldas in https://github.com/labstack/echo/pull/2709 +* Shorten Github issue template and add test example by @aldas in https://github.com/labstack/echo/pull/2711 + + ## v4.12.0 - 2024-04-15 **Security** diff --git a/vendor/github.com/labstack/echo/v4/Makefile b/vendor/github.com/labstack/echo/v4/Makefile index f9e5afb0..c07d8bb4 100644 --- a/vendor/github.com/labstack/echo/v4/Makefile +++ b/vendor/github.com/labstack/echo/v4/Makefile @@ -31,6 +31,6 @@ benchmark: ## Run benchmarks help: ## Display this help screen @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' -goversion ?= "1.19" -test_version: ## Run tests inside Docker with given version (defaults to 1.19 oldest supported). Example: make test_version goversion=1.19 +goversion ?= "1.20" +test_version: ## Run tests inside Docker with given version (defaults to 1.20 oldest supported). Example: make test_version goversion=1.20 @docker run --rm -it -v $(shell pwd):/project golang:$(goversion) /bin/sh -c "cd /project && make init check" diff --git a/vendor/github.com/labstack/echo/v4/README.md b/vendor/github.com/labstack/echo/v4/README.md index 351ba3c5..5381898d 100644 --- a/vendor/github.com/labstack/echo/v4/README.md +++ b/vendor/github.com/labstack/echo/v4/README.md @@ -1,5 +1,3 @@ - - [![Sourcegraph](https://sourcegraph.com/github.com/labstack/echo/-/badge.svg?style=flat-square)](https://sourcegraph.com/github.com/labstack/echo?badge) [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/labstack/echo/v4) [![Go Report Card](https://goreportcard.com/badge/github.com/labstack/echo?style=flat-square)](https://goreportcard.com/report/github.com/labstack/echo) @@ -77,6 +75,7 @@ package main import ( "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" + "log/slog" "net/http" ) @@ -92,7 +91,9 @@ func main() { e.GET("/", hello) // Start server - e.Logger.Fatal(e.Start(":1323")) + if err := e.Start(":8080"); err != nil && !errors.Is(err, http.ErrServerClosed) { + slog.Error("failed to start server", "error", err) + } } // Handler diff --git a/vendor/github.com/labstack/echo/v4/bind.go b/vendor/github.com/labstack/echo/v4/bind.go index 507def3e..ed7ca324 100644 --- a/vendor/github.com/labstack/echo/v4/bind.go +++ b/vendor/github.com/labstack/echo/v4/bind.go @@ -8,6 +8,7 @@ import ( "encoding/xml" "errors" "fmt" + "mime/multipart" "net/http" "reflect" "strconv" @@ -45,7 +46,7 @@ func (b *DefaultBinder) BindPathParams(c Context, i interface{}) error { for i, name := range names { params[name] = []string{values[i]} } - if err := b.bindData(i, params, "param"); err != nil { + if err := b.bindData(i, params, "param", nil); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } return nil @@ -53,7 +54,7 @@ func (b *DefaultBinder) BindPathParams(c Context, i interface{}) error { // BindQueryParams binds query params to bindable object func (b *DefaultBinder) BindQueryParams(c Context, i interface{}) error { - if err := b.bindData(i, c.QueryParams(), "query"); err != nil { + if err := b.bindData(i, c.QueryParams(), "query", nil); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } return nil @@ -70,9 +71,12 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { return } - ctype := req.Header.Get(HeaderContentType) - switch { - case strings.HasPrefix(ctype, MIMEApplicationJSON): + // mediatype is found like `mime.ParseMediaType()` does it + base, _, _ := strings.Cut(req.Header.Get(HeaderContentType), ";") + mediatype := strings.TrimSpace(base) + + switch mediatype { + case MIMEApplicationJSON: if err = c.Echo().JSONSerializer.Deserialize(c, i); err != nil { switch err.(type) { case *HTTPError: @@ -81,7 +85,7 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } } - case strings.HasPrefix(ctype, MIMEApplicationXML), strings.HasPrefix(ctype, MIMETextXML): + case MIMEApplicationXML, MIMETextXML: if err = xml.NewDecoder(req.Body).Decode(i); err != nil { if ute, ok := err.(*xml.UnsupportedTypeError); ok { return NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unsupported type error: type=%v, error=%v", ute.Type, ute.Error())).SetInternal(err) @@ -90,12 +94,20 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { } return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } - case strings.HasPrefix(ctype, MIMEApplicationForm), strings.HasPrefix(ctype, MIMEMultipartForm): + case MIMEApplicationForm: params, err := c.FormParams() if err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } - if err = b.bindData(i, params, "form"); err != nil { + if err = b.bindData(i, params, "form", nil); err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) + } + case MIMEMultipartForm: + params, err := c.MultipartForm() + if err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) + } + if err = b.bindData(i, params.Value, "form", params.File); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } default: @@ -106,7 +118,7 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { // BindHeaders binds HTTP headers to a bindable object func (b *DefaultBinder) BindHeaders(c Context, i interface{}) error { - if err := b.bindData(i, c.Request().Header, "header"); err != nil { + if err := b.bindData(i, c.Request().Header, "header", nil); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } return nil @@ -132,10 +144,11 @@ func (b *DefaultBinder) Bind(i interface{}, c Context) (err error) { } // bindData will bind data ONLY fields in destination struct that have EXPLICIT tag -func (b *DefaultBinder) bindData(destination interface{}, data map[string][]string, tag string) error { - if destination == nil || len(data) == 0 { +func (b *DefaultBinder) bindData(destination interface{}, data map[string][]string, tag string, dataFiles map[string][]*multipart.FileHeader) error { + if destination == nil || (len(data) == 0 && len(dataFiles) == 0) { return nil } + hasFiles := len(dataFiles) > 0 typ := reflect.TypeOf(destination).Elem() val := reflect.ValueOf(destination).Elem() @@ -159,6 +172,10 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri for k, v := range data { if isElemString { val.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v[0])) + } else if isElemInterface { + // To maintain backward compatibility, we always bind to the first string value + // and not the slice of strings when dealing with map[string]interface{}{} + val.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v[0])) } else { val.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v)) } @@ -175,7 +192,7 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri return errors.New("binding element must be a struct") } - for i := 0; i < typ.NumField(); i++ { + for i := 0; i < typ.NumField(); i++ { // iterate over all destination fields typeField := typ.Field(i) structField := val.Field(i) if typeField.Anonymous { @@ -194,10 +211,10 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri } if inputFieldName == "" { - // If tag is nil, we inspect if the field is a not BindUnmarshaler struct and try to bind data into it (might contains fields with tags). + // If tag is nil, we inspect if the field is a not BindUnmarshaler struct and try to bind data into it (might contain fields with tags). // structs that implement BindUnmarshaler are bound only when they have explicit tag if _, ok := structField.Addr().Interface().(BindUnmarshaler); !ok && structFieldKind == reflect.Struct { - if err := b.bindData(structField.Addr().Interface(), data, tag); err != nil { + if err := b.bindData(structField.Addr().Interface(), data, tag, dataFiles); err != nil { return err } } @@ -205,10 +222,20 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri continue } + if hasFiles { + if ok, err := isFieldMultipartFile(structField.Type()); err != nil { + return err + } else if ok { + if ok := setMultipartFileHeaderTypes(structField, inputFieldName, dataFiles); ok { + continue + } + } + } + inputValue, exists := data[inputFieldName] if !exists { - // Go json.Unmarshal supports case insensitive binding. However the - // url params are bound case sensitive which is inconsistent. To + // Go json.Unmarshal supports case-insensitive binding. However the + // url params are bound case-sensitive which is inconsistent. To // fix this we must check all of the map values in a // case-insensitive search. for k, v := range data { @@ -390,3 +417,50 @@ func setFloatField(value string, bitSize int, field reflect.Value) error { } return err } + +var ( + // NOT supported by bind as you can NOT check easily empty struct being actual file or not + multipartFileHeaderType = reflect.TypeOf(multipart.FileHeader{}) + // supported by bind as you can check by nil value if file existed or not + multipartFileHeaderPointerType = reflect.TypeOf(&multipart.FileHeader{}) + multipartFileHeaderSliceType = reflect.TypeOf([]multipart.FileHeader(nil)) + multipartFileHeaderPointerSliceType = reflect.TypeOf([]*multipart.FileHeader(nil)) +) + +func isFieldMultipartFile(field reflect.Type) (bool, error) { + switch field { + case multipartFileHeaderPointerType, + multipartFileHeaderSliceType, + multipartFileHeaderPointerSliceType: + return true, nil + case multipartFileHeaderType: + return true, errors.New("binding to multipart.FileHeader struct is not supported, use pointer to struct") + default: + return false, nil + } +} + +func setMultipartFileHeaderTypes(structField reflect.Value, inputFieldName string, files map[string][]*multipart.FileHeader) bool { + fileHeaders := files[inputFieldName] + if len(fileHeaders) == 0 { + return false + } + + result := true + switch structField.Type() { + case multipartFileHeaderPointerSliceType: + structField.Set(reflect.ValueOf(fileHeaders)) + case multipartFileHeaderSliceType: + headers := make([]multipart.FileHeader, len(fileHeaders)) + for i, fileHeader := range fileHeaders { + headers[i] = *fileHeader + } + structField.Set(reflect.ValueOf(headers)) + case multipartFileHeaderPointerType: + structField.Set(reflect.ValueOf(fileHeaders[0])) + default: + result = false + } + + return result +} diff --git a/vendor/github.com/labstack/echo/v4/binder.go b/vendor/github.com/labstack/echo/v4/binder.go index ebabeaf9..da15ae82 100644 --- a/vendor/github.com/labstack/echo/v4/binder.go +++ b/vendor/github.com/labstack/echo/v4/binder.go @@ -69,9 +69,9 @@ import ( type BindingError struct { // Field is the field name where value binding failed Field string `json:"field"` + *HTTPError // Values of parameter that failed to bind. Values []string `json:"-"` - *HTTPError } // NewBindingError creates new instance of binding error @@ -94,16 +94,15 @@ func (be *BindingError) Error() string { // ValueBinder provides utility methods for binding query or path parameter to various Go built-in types type ValueBinder struct { - // failFast is flag for binding methods to return without attempting to bind when previous binding already failed - failFast bool - errors []error - // ValueFunc is used to get single parameter (first) value from request ValueFunc func(sourceParam string) string // ValuesFunc is used to get all values for parameter from request. i.e. `/api/search?ids=1&ids=2` ValuesFunc func(sourceParam string) []string // ErrorFunc is used to create errors. Allows you to use your own error type, that for example marshals to your specific json response ErrorFunc func(sourceParam string, values []string, message interface{}, internalError error) error + errors []error + // failFast is flag for binding methods to return without attempting to bind when previous binding already failed + failFast bool } // QueryParamsBinder creates query parameter value binder diff --git a/vendor/github.com/labstack/echo/v4/context.go b/vendor/github.com/labstack/echo/v4/context.go index 4edaa2ee..f5dd5a69 100644 --- a/vendor/github.com/labstack/echo/v4/context.go +++ b/vendor/github.com/labstack/echo/v4/context.go @@ -200,31 +200,31 @@ type Context interface { } type context struct { + logger Logger request *http.Request response *Response query url.Values echo *Echo - logger Logger store Map lock sync.RWMutex // following fields are set by Router + handler HandlerFunc // path is route path that Router matched. It is empty string where there is no route match. // Route registered with RouteNotFound is considered as a match and path therefore is not empty. path string - // pnames length is tied to param count for the matched route - pnames []string - // Usually echo.Echo is sizing pvalues but there could be user created middlewares that decide to // overwrite parameter by calling SetParamNames + SetParamValues. // When echo.Echo allocated that slice it length/capacity is tied to echo.Echo.maxParam value. // // It is important that pvalues size is always equal or bigger to pnames length. pvalues []string - handler HandlerFunc + + // pnames length is tied to param count for the matched route + pnames []string } const ( diff --git a/vendor/github.com/labstack/echo/v4/echo.go b/vendor/github.com/labstack/echo/v4/echo.go index ab66b0da..c86ce699 100644 --- a/vendor/github.com/labstack/echo/v4/echo.go +++ b/vendor/github.com/labstack/echo/v4/echo.go @@ -45,7 +45,6 @@ import ( "encoding/json" "errors" "fmt" - "io" stdLog "log" "net" "net/http" @@ -91,10 +90,6 @@ type Echo struct { Listener net.Listener TLSListener net.Listener AutoTLSManager autocert.Manager - DisableHTTP2 bool - Debug bool - HideBanner bool - HidePort bool HTTPErrorHandler HTTPErrorHandler Binder Binder JSONSerializer JSONSerializer @@ -106,6 +101,10 @@ type Echo struct { // OnAddRouteHandler is called when Echo adds new route to specific host router. OnAddRouteHandler func(host string, route Route, handler HandlerFunc, middleware []MiddlewareFunc) + DisableHTTP2 bool + Debug bool + HideBanner bool + HidePort bool } // Route contains a handler and information for matching against requests. @@ -117,9 +116,9 @@ type Route struct { // HTTPError represents an error that occurred while handling a request. type HTTPError struct { - Code int `json:"-"` - Message interface{} `json:"message"` Internal error `json:"-"` // Stores the error returned by an external dependency + Message interface{} `json:"message"` + Code int `json:"-"` } // MiddlewareFunc defines a function to process middleware. @@ -142,11 +141,6 @@ type JSONSerializer interface { Deserialize(c Context, i interface{}) error } -// Renderer is the interface that wraps the Render function. -type Renderer interface { - Render(io.Writer, string, interface{}, Context) error -} - // Map defines a generic map of type `map[string]interface{}`. type Map map[string]interface{} @@ -265,7 +259,7 @@ const ( const ( // Version of Echo - Version = "4.12.0" + Version = "4.13.1" website = "https://echo.labstack.com" // http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo banner = ` diff --git a/vendor/github.com/labstack/echo/v4/echo_fs.go b/vendor/github.com/labstack/echo/v4/echo_fs.go index a7b231f3..0ffc4b0b 100644 --- a/vendor/github.com/labstack/echo/v4/echo_fs.go +++ b/vendor/github.com/labstack/echo/v4/echo_fs.go @@ -102,8 +102,8 @@ func StaticFileHandler(file string, filesystem fs.FS) HandlerFunc { // traverse up from current executable run path. // NB: private because you really should use fs.FS implementation instances type defaultFS struct { - prefix string fs fs.FS + prefix string } func newDefaultFS() *defaultFS { diff --git a/vendor/github.com/labstack/echo/v4/group.go b/vendor/github.com/labstack/echo/v4/group.go index eca25c94..cb37b123 100644 --- a/vendor/github.com/labstack/echo/v4/group.go +++ b/vendor/github.com/labstack/echo/v4/group.go @@ -14,8 +14,8 @@ type Group struct { common host string prefix string - middleware []MiddlewareFunc echo *Echo + middleware []MiddlewareFunc } // Use implements `Echo#Use()` for sub-routes within the Group. diff --git a/vendor/github.com/labstack/echo/v4/ip.go b/vendor/github.com/labstack/echo/v4/ip.go index 6aed8d60..393a6c2d 100644 --- a/vendor/github.com/labstack/echo/v4/ip.go +++ b/vendor/github.com/labstack/echo/v4/ip.go @@ -134,10 +134,10 @@ Private IPv6 address ranges: */ type ipChecker struct { + trustExtraRanges []*net.IPNet trustLoopback bool trustLinkLocal bool trustPrivateNet bool - trustExtraRanges []*net.IPNet } // TrustOption is config for which IP address to trust diff --git a/vendor/github.com/labstack/echo/v4/renderer.go b/vendor/github.com/labstack/echo/v4/renderer.go new file mode 100644 index 00000000..44e038f3 --- /dev/null +++ b/vendor/github.com/labstack/echo/v4/renderer.go @@ -0,0 +1,29 @@ +package echo + +import "io" + +// Renderer is the interface that wraps the Render function. +type Renderer interface { + Render(io.Writer, string, interface{}, Context) error +} + +// TemplateRenderer is helper to ease creating renderers for `html/template` and `text/template` packages. +// Example usage: +// +// e.Renderer = &echo.TemplateRenderer{ +// Template: template.Must(template.ParseGlob("templates/*.html")), +// } +// +// e.Renderer = &echo.TemplateRenderer{ +// Template: template.Must(template.New("hello").Parse("Hello, {{.}}!")), +// } +type TemplateRenderer struct { + Template interface { + ExecuteTemplate(wr io.Writer, name string, data any) error + } +} + +// Render renders the template with given data. +func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c Context) error { + return t.Template.ExecuteTemplate(w, name, data) +} diff --git a/vendor/github.com/labstack/echo/v4/response.go b/vendor/github.com/labstack/echo/v4/response.go index a795ce36..0f174536 100644 --- a/vendor/github.com/labstack/echo/v4/response.go +++ b/vendor/github.com/labstack/echo/v4/response.go @@ -14,10 +14,10 @@ import ( // by an HTTP handler to construct an HTTP response. // See: https://golang.org/pkg/net/http/#ResponseWriter type Response struct { + Writer http.ResponseWriter echo *Echo beforeFuncs []func() afterFuncs []func() - Writer http.ResponseWriter Status int Size int64 Committed bool @@ -86,7 +86,7 @@ func (r *Response) Write(b []byte) (n int, err error) { // buffered data to the client. // See [http.Flusher](https://golang.org/pkg/net/http/#Flusher) func (r *Response) Flush() { - err := responseControllerFlush(r.Writer) + err := http.NewResponseController(r.Writer).Flush() if err != nil && errors.Is(err, http.ErrNotSupported) { panic(errors.New("response writer flushing is not supported")) } @@ -96,7 +96,7 @@ func (r *Response) Flush() { // take over the connection. // See [http.Hijacker](https://golang.org/pkg/net/http/#Hijacker) func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return responseControllerHijack(r.Writer) + return http.NewResponseController(r.Writer).Hijack() } // Unwrap returns the original http.ResponseWriter. diff --git a/vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go b/vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go deleted file mode 100644 index 782dab3a..00000000 --- a/vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build !go1.20 - -package echo - -import ( - "bufio" - "fmt" - "net" - "net/http" -) - -// TODO: remove when Go 1.23 is released and we do not support 1.19 anymore -func responseControllerFlush(rw http.ResponseWriter) error { - for { - switch t := rw.(type) { - case interface{ FlushError() error }: - return t.FlushError() - case http.Flusher: - t.Flush() - return nil - case interface{ Unwrap() http.ResponseWriter }: - rw = t.Unwrap() - default: - return fmt.Errorf("%w", http.ErrNotSupported) - } - } -} - -// TODO: remove when Go 1.23 is released and we do not support 1.19 anymore -func responseControllerHijack(rw http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - for { - switch t := rw.(type) { - case http.Hijacker: - return t.Hijack() - case interface{ Unwrap() http.ResponseWriter }: - rw = t.Unwrap() - default: - return nil, nil, fmt.Errorf("%w", http.ErrNotSupported) - } - } -} diff --git a/vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go b/vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go deleted file mode 100644 index 6d77c07f..00000000 --- a/vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build go1.20 - -package echo - -import ( - "bufio" - "net" - "net/http" -) - -func responseControllerFlush(rw http.ResponseWriter) error { - return http.NewResponseController(rw).Flush() -} - -func responseControllerHijack(rw http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - return http.NewResponseController(rw).Hijack() -} diff --git a/vendor/github.com/labstack/echo/v4/router.go b/vendor/github.com/labstack/echo/v4/router.go index 03267315..49b56966 100644 --- a/vendor/github.com/labstack/echo/v4/router.go +++ b/vendor/github.com/labstack/echo/v4/router.go @@ -18,32 +18,31 @@ type Router struct { } type node struct { - kind kind - label byte - prefix string - parent *node - staticChildren children - originalPath string - methods *routeMethods - paramChild *node - anyChild *node - paramsCount int + methods *routeMethods + parent *node + paramChild *node + anyChild *node + // notFoundHandler is handler registered with RouteNotFound method and is executed for 404 cases + notFoundHandler *routeMethod + prefix string + originalPath string + staticChildren children + paramsCount int + label byte + kind kind // isLeaf indicates that node does not have child routes isLeaf bool // isHandler indicates that node has at least one handler registered to it isHandler bool - - // notFoundHandler is handler registered with RouteNotFound method and is executed for 404 cases - notFoundHandler *routeMethod } type kind uint8 type children []*node type routeMethod struct { + handler HandlerFunc ppath string pnames []string - handler HandlerFunc } type routeMethods struct { @@ -242,18 +241,18 @@ func (r *Router) insert(method, path string, h HandlerFunc) { if i == lcpIndex { // path node is last fragment of route path. ie. `/users/:id` - r.insertNode(method, path[:i], paramKind, routeMethod{ppath, pnames, h}) + r.insertNode(method, path[:i], paramKind, routeMethod{ppath: ppath, pnames: pnames, handler: h}) } else { r.insertNode(method, path[:i], paramKind, routeMethod{}) } } else if path[i] == '*' { r.insertNode(method, path[:i], staticKind, routeMethod{}) pnames = append(pnames, "*") - r.insertNode(method, path[:i+1], anyKind, routeMethod{ppath, pnames, h}) + r.insertNode(method, path[:i+1], anyKind, routeMethod{ppath: ppath, pnames: pnames, handler: h}) } } - r.insertNode(method, path, staticKind, routeMethod{ppath, pnames, h}) + r.insertNode(method, path, staticKind, routeMethod{ppath: ppath, pnames: pnames, handler: h}) } func (r *Router) insertNode(method, path string, t kind, rm routeMethod) { diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 00000000..fbff658f --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 00000000..598a54af --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,278 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "net" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size >= config.PooledSize { + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) < s { + b.ensureSpaceSlow(s) + } +} + +func (b *Buffer) ensureSpaceSlow(s int) { + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + b.EnsureSpace(1) + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendBytesSlow(data) + } +} + +func (b *Buffer) appendBytesSlow(data []byte) { + for len(data) > 0 { + b.EnsureSpace(1) + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendString appends a string to buffer. +func (b *Buffer) AppendString(data string) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendStringSlow(data) + } +} + +func (b *Buffer) appendStringSlow(data string) { + for len(data) > 0 { + b.EnsureSpace(1) + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + bufs := net.Buffers(b.bufs) + if len(b.Buf) > 0 { + bufs = append(bufs, b.Buf) + } + n, err := bufs.WriteTo(w) + + for _, buf := range b.bufs { + putBuf(buf) + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return int(n), err +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enough, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 00000000..ff7b27c5 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 00000000..864d1be6 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 00000000..e90ec40d --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 00000000..b5f5e261 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,1244 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "github.com/josharian/intern" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValueCloned bool // true if byteValue was allocated and does not refer to original json body + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. +} + +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.fatalError = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (isValid bool, length int) { + for { + idx := bytes.IndexByte(data, '"') + if idx == -1 { + return false, len(data) + } + if idx == 0 || (idx > 0 && data[idx-1] != '\\') { + return true, length + idx + } + + // count \\\\\\\ sequences. even number of slashes means quote is not really escaped + cnt := 1 + for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { + cnt++ + } + if cnt%2 == 0 { + return true, length + idx + } + + length += idx + 1 + data = data[idx+1:] + } +} + +// unescapeStringToken performs unescaping of string token. +// if no escaping is needed, original string is returned, otherwise - a new one allocated +func (r *Lexer) unescapeStringToken() (err error) { + data := r.token.byteValue + var unescapedData []byte + + for { + i := bytes.IndexByte(data, '\\') + if i == -1 { + break + } + + escapedRune, escapedBytes, err := decodeEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return err + } + + if unescapedData == nil { + unescapedData = make([]byte, 0, len(r.token.byteValue)) + } + + var d [4]byte + s := utf8.EncodeRune(d[:], escapedRune) + unescapedData = append(unescapedData, data[:i]...) + unescapedData = append(unescapedData, d[:s]...) + + data = data[i+escapedBytes:] + } + + if unescapedData != nil { + r.token.byteValue = append(unescapedData, data...) + r.token.byteValueCloned = true + } + return +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + +// decodeEscape processes a single escape sequence and returns number of bytes processed. +func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { + if len(data) < 2 { + return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") + } + + c := data[1] + switch c { + case '"', '/', '\\': + return rune(c), 2, nil + case 'b': + return '\b', 2, nil + case 'f': + return '\f', 2, nil + case 'n': + return '\n', 2, nil + case 'r': + return '\r', 2, nil + case 't': + return '\t', 2, nil + case 'u': + rr := getu4(data) + if rr < 0 { + return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") + } + + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + return rr, read, nil + } + + return 0, 0, errors.New("incorrectly escaped bytes") +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + isValid, length := findStringLen(data) + if !isValid { + r.pos += length + r.errParse("unterminated string literal") + return + } + r.token.byteValue = data[:length] + r.pos += length + 1 // skip closing '"' as well +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.fatalError != nil { + return + } + + r.FetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.byteValueCloned = false + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.fatalError == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.fatalError == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim + } + r.addNonfatalError(&LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return + } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. + r.errInvalidToken(string([]byte{c})) + } else { + r.consume() + } +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + var start, end byte + startPos := r.start + + switch r.token.delimValue { + case '{': + start, end = '{', '}' + case '[': + start, end = '[', ']' + default: + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } + return + } + case c == '\\' && inQuotes: + wasEscape = !wasEscape + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.AddError(&LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + }) + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "", nil + } + if !skipUnescape { + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "", nil + } + } + + bytes := r.token.byteValue + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString(false) + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString(false) + return ret +} + +// UnsafeFieldName returns current member name string token +func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { + ret, _ := r.unsafeString(skipUnescape) + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "" + } + var ret string + if r.token.byteValueCloned { + ret = bytesToStr(r.token.byteValue) + } else { + ret = string(r.token.byteValue) + } + r.consume() + return ret +} + +// StringIntern reads a string literal, and performs string interning on it. +func (r *Lexer) StringIntern() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "" + } + ret := intern.Bytes(r.token.byteValue) + r.consume() + return ret +} + +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:n] +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) UintptrStr() uintptr { + return uintptr(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return float32(n) +} + +func (r *Lexer) Float32Str() float32 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Float64Str() float64 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) Error() error { + return r.fatalError +} + +func (r *Lexer) AddError(e error) { + if r.fatalError == nil { + r.fatalError = e + } +} + +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + +// JsonNumber fetches and json.Number from 'encoding/json' package. +// Both int, float or string, contains them are valid values +func (r *Lexer) JsonNumber() json.Number { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() { + r.errInvalidToken("json.Number") + return json.Number("") + } + + switch r.token.kind { + case tokenString: + return json.Number(r.String()) + case tokenNumber: + return json.Number(r.Raw()) + case tokenNull: + r.Null() + return json.Number("") + default: + r.errSyntax() + return json.Number("") + } +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + ret := []interface{}{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 00000000..2c5b2010 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,405 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + w.base64(data) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintptrStr(n uintptr) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func getTable(falseValues ...int) [128]bool { + table := [128]bool{} + + for i := 0; i < 128; i++ { + table[i] = true + } + + for _, v := range falseValues { + table[v] = false + } + + return table +} + +var ( + htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') + htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') +) + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + escapeTable := &htmlEscapeTable + if w.NoEscapeHTML { + escapeTable = &htmlNoEscapeTable + } + + for i := 0; i < len(s); { + c := s[i] + + if c < utf8.RuneSelf { + if escapeTable[c] { + // single-width character, no escaping is required + i++ + continue + } + + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} + +const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" +const padChar = '=' + +func (w *Writer) base64(in []byte) { + + if len(in) == 0 { + return + } + + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) + + si := 0 + n := (len(in) / 3) * 3 + + for si < n { + // Convert 3x 8bit source bytes into 4 bytes + val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) + + si += 3 + } + + remain := len(in) - si + if remain == 0 { + return + } + + // Add the remaining small block + val := uint(in[si+0]) << 16 + if remain == 2 { + val |= uint(in[si+1]) << 8 + } + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) + + switch remain { + case 2: + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) + case 1: + w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) + } +} diff --git a/vendor/github.com/swaggo/echo-swagger/.gitignore b/vendor/github.com/swaggo/echo-swagger/.gitignore new file mode 100644 index 00000000..73006fa1 --- /dev/null +++ b/vendor/github.com/swaggo/echo-swagger/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +dist + +.idea +vendor +.envrc + diff --git a/vendor/github.com/swaggo/echo-swagger/.goreleaser.yml b/vendor/github.com/swaggo/echo-swagger/.goreleaser.yml new file mode 100644 index 00000000..3a538866 --- /dev/null +++ b/vendor/github.com/swaggo/echo-swagger/.goreleaser.yml @@ -0,0 +1,10 @@ +builds: + - skip: true +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/swaggo/echo-swagger/LICENSE b/vendor/github.com/swaggo/echo-swagger/LICENSE new file mode 100644 index 00000000..9751ab50 --- /dev/null +++ b/vendor/github.com/swaggo/echo-swagger/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Swaggo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/swaggo/echo-swagger/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/swaggo/echo-swagger/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..fd84126d --- /dev/null +++ b/vendor/github.com/swaggo/echo-swagger/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +**Describe the PR** +e.g. add cool parser. + +**Relation issue** +e.g. https://github.com/swaggo/gin-swagger/pull/123/files + +**Additional context** +Add any other context about the problem here. diff --git a/vendor/github.com/swaggo/echo-swagger/README.md b/vendor/github.com/swaggo/echo-swagger/README.md new file mode 100644 index 00000000..d645ba6c --- /dev/null +++ b/vendor/github.com/swaggo/echo-swagger/README.md @@ -0,0 +1,89 @@ +# echo-swagger + +echo middleware to automatically generate RESTful API documentation with Swagger 2.0. + +[![Build Status](https://github.com/swaggo/echo-swagger/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) +[![Codecov branch](https://img.shields.io/codecov/c/github/swaggo/echo-swagger/master.svg)](https://codecov.io/gh/swaggo/echo-swagger) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/echo-swagger)](https://goreportcard.com/report/github.com/swaggo/echo-swagger) +[![Release](https://img.shields.io/github/release/swaggo/echo-swagger.svg?style=flat-square)](https://github.com/swaggo/echo-swagger/releases) + + +## Usage + +### Start using it +1. Add comments to your API source code, [See Declarative Comments Format](https://github.com/swaggo/swag#declarative-comments-format). +2. Download [Swag](https://github.com/swaggo/swag) for Go by using: +```sh +$ go get -d github.com/swaggo/swag/cmd/swag + +# 1.16 or newer +$ go install github.com/swaggo/swag/cmd/swag@latest +``` +3. Run the [Swag](https://github.com/swaggo/swag) in your Go project root folder which contains `main.go` file, [Swag](https://github.com/swaggo/swag) will parse comments and generate required files(`docs` folder and `docs/doc.go`). +```sh_ "github.com/swaggo/echo-swagger/v2/example/docs" +$ swag init +``` +4. Download [echo-swagger](https://github.com/swaggo/echo-swagger) by using: +```sh +$ go get -u github.com/swaggo/echo-swagger +``` + +And import following in your code: +```go +import "github.com/swaggo/echo-swagger" // echo-swagger middleware +``` + +### Canonical example: + +```go +package main + +import ( + "github.com/labstack/echo/v4" + "github.com/swaggo/echo-swagger" + + _ "github.com/swaggo/echo-swagger/example/docs" // docs is generated by Swag CLI, you have to import it. +) + +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server Petstore server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host petstore.swagger.io +// @BasePath /v2 +func main() { + e := echo.New() + + e.GET("/swagger/*", echoSwagger.WrapHandler) + + e.Logger.Fatal(e.Start(":1323")) +} + +``` + +5. Run it, and browser to http://localhost:1323/swagger/index.html, you can see Swagger 2.0 Api documents. + +![swagger_index.html](https://user-images.githubusercontent.com/8943871/36250587-40834072-1279-11e8-8bb7-02a2e2fdd7a7.png) + +Note: If you are using Gzip middleware you should add the swagger endpoint to skipper + +### Example + +``` +e.Use(middleware.GzipWithConfig(middleware.GzipConfig{ + Skipper: func(c echo.Context) bool { + if strings.Contains(c.Request().URL.Path, "swagger") { + return true + } + return false + }, + })) +``` diff --git a/vendor/github.com/swaggo/echo-swagger/swagger.go b/vendor/github.com/swaggo/echo-swagger/swagger.go new file mode 100644 index 00000000..b8dc937d --- /dev/null +++ b/vendor/github.com/swaggo/echo-swagger/swagger.go @@ -0,0 +1,311 @@ +package echoSwagger + +import ( + "html/template" + "net/http" + "path/filepath" + "regexp" + + "github.com/ghodss/yaml" + "github.com/labstack/echo/v4" + swaggerFiles "github.com/swaggo/files/v2" + "github.com/swaggo/swag" +) + +// Config stores echoSwagger configuration variables. +type Config struct { + // The url pointing to API definition (normally swagger.json or swagger.yaml). Default is `mockedSwag.json`. + URLs []string + DocExpansion string + DomID string + InstanceName string + DeepLinking bool + PersistAuthorization bool + SyntaxHighlight bool + + // The information for OAuth2 integration, if any. + OAuth *OAuthConfig +} + +// OAuthConfig stores configuration for Swagger UI OAuth2 integration. See +// https://swagger.io/docs/open-source-tools/swagger-ui/usage/oauth2/ for further details. +type OAuthConfig struct { + // The ID of the client sent to the OAuth2 IAM provider. + ClientId string + + // The OAuth2 realm that the client should operate in. If not applicable, use empty string. + Realm string + + // The name to display for the application in the authentication popup. + AppName string +} + +// URL presents the url pointing to API definition (normally swagger.json or swagger.yaml). +func URL(url string) func(*Config) { + return func(c *Config) { + c.URLs = append(c.URLs, url) + } +} + +// DeepLinking true, false. +func DeepLinking(deepLinking bool) func(*Config) { + return func(c *Config) { + c.DeepLinking = deepLinking + } +} + +// SyntaxHighlight true, false. +func SyntaxHighlight(syntaxHighlight bool) func(*Config) { + return func(c *Config) { + c.SyntaxHighlight = syntaxHighlight + } +} + +// DocExpansion list, full, none. +func DocExpansion(docExpansion string) func(*Config) { + return func(c *Config) { + c.DocExpansion = docExpansion + } +} + +// DomID #swagger-ui. +func DomID(domID string) func(*Config) { + return func(c *Config) { + c.DomID = domID + } +} + +// InstanceName specified swag instance name. +func InstanceName(instanceName string) func(*Config) { + return func(c *Config) { + c.InstanceName = instanceName + } +} + +// PersistAuthorization Persist authorization information over browser close/refresh. +// Defaults to false. +func PersistAuthorization(persistAuthorization bool) func(*Config) { + return func(c *Config) { + c.PersistAuthorization = persistAuthorization + } +} + +func OAuth(config *OAuthConfig) func(*Config) { + return func(c *Config) { + c.OAuth = config + } +} + +func newConfig(configFns ...func(*Config)) *Config { + config := Config{ + URLs: []string{"doc.json", "doc.yaml"}, + DocExpansion: "list", + DomID: "swagger-ui", + InstanceName: "swagger", + DeepLinking: true, + PersistAuthorization: false, + SyntaxHighlight: true, + } + + for _, fn := range configFns { + fn(&config) + } + + if config.InstanceName == "" { + config.InstanceName = swag.Name + } + + return &config +} + +// WrapHandler wraps swaggerFiles.Handler and returns echo.HandlerFunc +var WrapHandler = EchoWrapHandler() + +// EchoWrapHandler wraps `http.Handler` into `echo.HandlerFunc`. +func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { + config := newConfig(options...) + + // create a template with name + index, _ := template.New("swagger_index.html").Parse(indexTemplate) + + var re = regexp.MustCompile(`^(.*/)([^?].*)?[?|.]*$`) + + return func(c echo.Context) error { + if c.Request().Method != http.MethodGet { + return echo.NewHTTPError(http.StatusMethodNotAllowed, http.StatusText(http.StatusMethodNotAllowed)) + } + + matches := re.FindStringSubmatch(c.Request().RequestURI) + path := matches[2] + + switch filepath.Ext(path) { + case ".html": + c.Response().Header().Set("Content-Type", "text/html; charset=utf-8") + case ".css": + c.Response().Header().Set("Content-Type", "text/css; charset=utf-8") + case ".js": + c.Response().Header().Set("Content-Type", "application/javascript") + case ".json": + c.Response().Header().Set("Content-Type", "application/json; charset=utf-8") + case ".yaml": + c.Response().Header().Set("Content-Type", "text/plain; charset=utf-8") + case ".png": + c.Response().Header().Set("Content-Type", "image/png") + } + + response := c.Response() + // This check fixes an error introduced here: https://github.com/labstack/echo/blob/8da8e161380fd926d4341721f0328f1e94d6d0a2/response.go#L86-L88 + if _, ok := response.Writer.(http.Flusher); ok { + defer response.Flush() + } + + switch path { + case "": + _ = c.Redirect(http.StatusMovedPermanently, matches[1]+"/"+"index.html") + case "index.html": + _ = index.Execute(c.Response().Writer, config) + case "doc.json": + doc, err := swag.ReadDoc(config.InstanceName) + if err != nil { + c.Error(err) + + return nil + } + + _, _ = c.Response().Writer.Write([]byte(doc)) + case "doc.yaml": + jsonString, err := swag.ReadDoc(config.InstanceName) + if err != nil { + c.Error(err) + + return nil + } + doc, err := yaml.JSONToYAML([]byte(jsonString)) + if err != nil { + c.Error(err) + + return nil + } + _, _ = c.Response().Writer.Write(doc) + default: + c.Request().URL.Path = matches[2] + http.FileServer(http.FS(swaggerFiles.FS)).ServeHTTP(c.Response(), c.Request()) + } + + return nil + } +} + +const indexTemplate = ` + + + + + Swagger UI + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +` diff --git a/vendor/github.com/swaggo/files/v2/.gitmodules b/vendor/github.com/swaggo/files/v2/.gitmodules new file mode 100644 index 00000000..dda71757 --- /dev/null +++ b/vendor/github.com/swaggo/files/v2/.gitmodules @@ -0,0 +1,3 @@ +[submodule "swagger-ui"] + path = swagger-ui + url = https://github.com/swagger-api/swagger-ui.git diff --git a/vendor/github.com/swaggo/files/v2/LICENSE b/vendor/github.com/swaggo/files/v2/LICENSE new file mode 100644 index 00000000..1667ee95 --- /dev/null +++ b/vendor/github.com/swaggo/files/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Swaggo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/swaggo/files/v2/Makefile b/vendor/github.com/swaggo/files/v2/Makefile new file mode 100644 index 00000000..9ee79423 --- /dev/null +++ b/vendor/github.com/swaggo/files/v2/Makefile @@ -0,0 +1,10 @@ +all: build + +.PHONY: init +init: + git submodule update --init --recursive + +.PHONY: build +build: + rm -rf dist/* + cp -r swagger-ui/dist/* dist/ \ No newline at end of file diff --git a/vendor/github.com/swaggo/files/v2/README.md b/vendor/github.com/swaggo/files/v2/README.md new file mode 100644 index 00000000..7732bdba --- /dev/null +++ b/vendor/github.com/swaggo/files/v2/README.md @@ -0,0 +1,9 @@ +# swaggerFiles + +[![Build Status](https://github.com/swaggo/files/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/files)](https://goreportcard.com/report/github.com/swaggo/files) + +Generate swagger ui embedded files by using: +``` + make +``` \ No newline at end of file diff --git a/vendor/github.com/swaggo/files/v2/files.go b/vendor/github.com/swaggo/files/v2/files.go new file mode 100644 index 00000000..d7b452e7 --- /dev/null +++ b/vendor/github.com/swaggo/files/v2/files.go @@ -0,0 +1,12 @@ +package swaggerFiles + +import ( + "embed" + "io/fs" +) + +//go:embed dist/* +var dist embed.FS + +// FS holds embedded swagger ui files +var FS, _ = fs.Sub(dist, "dist") diff --git a/vendor/github.com/swaggo/swag/.gitignore b/vendor/github.com/swaggo/swag/.gitignore new file mode 100644 index 00000000..865a6f35 --- /dev/null +++ b/vendor/github.com/swaggo/swag/.gitignore @@ -0,0 +1,24 @@ +dist +testdata/simple*/docs +testdata/quotes/docs +testdata/quotes/quotes.so +testdata/delims/docs +testdata/delims/delims.so +example/basic/docs/* +example/celler/docs/* +cover.out + + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +.idea +.vscode + +# Etc +.DS_Store + +/swag +/swag.exe diff --git a/vendor/github.com/swaggo/swag/.goreleaser.yml b/vendor/github.com/swaggo/swag/.goreleaser.yml new file mode 100644 index 00000000..a431d5db --- /dev/null +++ b/vendor/github.com/swaggo/swag/.goreleaser.yml @@ -0,0 +1,30 @@ +build: + main: cmd/swag/main.go + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + - 386 + env: + - CGO_ENABLED=0 + +archives: + - id: foo + name_template: >- + {{ .ProjectName }}_ + {{- .Version }}_ + {{- if eq .Os "linux"}}Linux{{ else if eq .Os "darwin"}}Darwin{{ else }}{{ .Os }}{{ end }}_ + {{- if eq .Arch "386" }}i386{{ else if eq .Arch "amd64" }}x86_64{{ else }}{{ .Arch }}{{ end }} + +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/swaggo/swag/CODE_OF_CONDUCT.md b/vendor/github.com/swaggo/swag/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..717d7952 --- /dev/null +++ b/vendor/github.com/swaggo/swag/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [gitter.im/swaggo/swag](https://gitter.im/swaggo/swag).The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/swaggo/swag/CONTRIBUTING.md b/vendor/github.com/swaggo/swag/CONTRIBUTING.md new file mode 100644 index 00000000..f3a3a15b --- /dev/null +++ b/vendor/github.com/swaggo/swag/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish to make via issue, +email, or any other method with the owners of this repository before making a change. + +Please note we have a code of conduct, please follow it in all your interactions with the project. + +## Pull Request Process + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +Please make an issue first if the change is likely to increase. diff --git a/vendor/github.com/swaggo/swag/Dockerfile b/vendor/github.com/swaggo/swag/Dockerfile new file mode 100644 index 00000000..5ea91343 --- /dev/null +++ b/vendor/github.com/swaggo/swag/Dockerfile @@ -0,0 +1,36 @@ +# Dockerfile References: https://docs.docker.com/engine/reference/builder/ + +# Start from the latest golang base image +FROM --platform=$BUILDPLATFORM golang:1.21-alpine as builder + +# Set the Current Working Directory inside the container +WORKDIR /app + +# Copy go mod and sum files +COPY go.mod go.sum ./ + +# Download all dependencies. Dependencies will be cached if the go.mod and go.sum files are not changed +RUN go mod download + +# Copy the source from the current directory to the Working Directory inside the container +COPY . . + +# Configure go compiler target platform +ARG TARGETOS +ARG TARGETARCH +ENV GOARCH=$TARGETARCH \ + GOOS=$TARGETOS + +# Build the Go app +RUN CGO_ENABLED=0 GOOS=linux go build -v -a -installsuffix cgo -o swag cmd/swag/main.go + + +######## Start a new stage from scratch ####### +FROM --platform=$TARGETPLATFORM scratch + +WORKDIR /code/ + +# Copy the Pre-built binary file from the previous stage +COPY --from=builder /app/swag /bin/swag + +ENTRYPOINT ["/bin/swag"] diff --git a/vendor/github.com/swaggo/swag/Makefile b/vendor/github.com/swaggo/swag/Makefile new file mode 100644 index 00000000..85dc3623 --- /dev/null +++ b/vendor/github.com/swaggo/swag/Makefile @@ -0,0 +1,78 @@ +GOCMD:=$(shell which go) +GOLINT:=$(shell which golint) +GOIMPORT:=$(shell which goimports) +GOFMT:=$(shell which gofmt) +GOBUILD:=$(GOCMD) build +GOINSTALL:=$(GOCMD) install +GOCLEAN:=$(GOCMD) clean +GOTEST:=$(GOCMD) test +GOMODTIDY:=$(GOCMD) mod tidy +GOGET:=$(GOCMD) get +GOLIST:=$(GOCMD) list +GOVET:=$(GOCMD) vet +GOPATH:=$(shell $(GOCMD) env GOPATH) +u := $(if $(update),-u) + +BINARY_NAME:=swag +PACKAGES:=$(shell $(GOLIST) github.com/swaggo/swag github.com/swaggo/swag/cmd/swag github.com/swaggo/swag/gen github.com/swaggo/swag/format) +GOFILES:=$(shell find . -name "*.go" -type f) + +all: test build + +.PHONY: build +build: deps + $(GOBUILD) -o $(BINARY_NAME) ./cmd/swag + +.PHONY: install +install: deps + $(GOINSTALL) ./cmd/swag + +.PHONY: test +test: + echo "mode: count" > coverage.out + for PKG in $(PACKAGES); do \ + $(GOCMD) test -v -covermode=count -coverprofile=profile.out $$PKG > tmp.out; \ + cat tmp.out; \ + if grep -q "^--- FAIL" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "build failed" tmp.out; then \ + rm tmp.out; \ + exit; \ + fi; \ + if [ -f profile.out ]; then \ + cat profile.out | grep -v "mode:" >> coverage.out; \ + rm profile.out; \ + fi; \ + done + +.PHONY: clean +clean: + $(GOCLEAN) + rm -f $(BINARY_NAME) + +.PHONY: deps +deps: + $(GOMODTIDY) + +.PHONY: vet +vet: deps + $(GOVET) $(PACKAGES) + +.PHONY: fmt +fmt: + $(GOFMT) -s -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + @diff=$$($(GOFMT) -s -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +.PHONY: view-covered +view-covered: + $(GOTEST) -coverprofile=cover.out $(TARGET) + $(GOCMD) tool cover -html=cover.out diff --git a/vendor/github.com/swaggo/swag/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/swaggo/swag/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..697de1f4 --- /dev/null +++ b/vendor/github.com/swaggo/swag/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +**Describe the PR** +e.g. add cool parser. + +**Relation issue** +e.g. https://github.com/swaggo/swag/pull/118/files + +**Additional context** +Add any other context about the problem here. diff --git a/vendor/github.com/swaggo/swag/README.md b/vendor/github.com/swaggo/swag/README.md new file mode 100644 index 00000000..7504f6e7 --- /dev/null +++ b/vendor/github.com/swaggo/swag/README.md @@ -0,0 +1,1004 @@ +# swag + +🌍 *[English](README.md) ∙ [简体中文](README_zh-CN.md) ∙ [Português](README_pt.md)* + + + +[![Build Status](https://github.com/swaggo/swag/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) +[![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) +[![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) +[![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) +[![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) + + +Swag converts Go annotations to Swagger Documentation 2.0. We've created a variety of plugins for popular [Go web frameworks](#supported-web-frameworks). This allows you to quickly integrate with an existing Go project (using Swagger UI). + +## Contents + - [Getting started](#getting-started) + - [Supported Web Frameworks](#supported-web-frameworks) + - [How to use it with Gin](#how-to-use-it-with-gin) + - [The swag formatter](#the-swag-formatter) + - [Implementation Status](#implementation-status) + - [Declarative Comments Format](#declarative-comments-format) + - [General API Info](#general-api-info) + - [API Operation](#api-operation) + - [Security](#security) + - [Examples](#examples) + - [Descriptions over multiple lines](#descriptions-over-multiple-lines) + - [User defined structure with an array type](#user-defined-structure-with-an-array-type) + - [Function scoped struct declaration](#function-scoped-struct-declaration) + - [Model composition in response](#model-composition-in-response) + - [Add request headers](#add-request-headers) + - [Add response headers](#add-response-headers) + - [Use multiple path params](#use-multiple-path-params) + - [Example value of struct](#example-value-of-struct) + - [SchemaExample of body](#schemaexample-of-body) + - [Description of struct](#description-of-struct) + - [Use swaggertype tag to supported custom type](#use-swaggertype-tag-to-supported-custom-type) + - [Use global overrides to support a custom type](#use-global-overrides-to-support-a-custom-type) + - [Use swaggerignore tag to exclude a field](#use-swaggerignore-tag-to-exclude-a-field) + - [Add extension info to struct field](#add-extension-info-to-struct-field) + - [Rename model to display](#rename-model-to-display) + - [How to use security annotations](#how-to-use-security-annotations) + - [Add a description for enum items](#add-a-description-for-enum-items) + - [Generate only specific docs file types](#generate-only-specific-docs-file-types) + - [How to use Go generic types](#how-to-use-generics) +- [About the Project](#about-the-project) + +## Getting started + +1. Add comments to your API source code, See [Declarative Comments Format](#declarative-comments-format). + +2. Install swag by using: +```sh +go install github.com/swaggo/swag/cmd/swag@latest +``` +To build from source you need [Go](https://golang.org/dl/) (1.19 or newer). + +Alternatively you can run the docker image: +```sh +docker run --rm -v $(pwd):/code ghcr.io/swaggo/swag:latest +``` + +Or download a pre-compiled binary from the [release page](https://github.com/swaggo/swag/releases). + +3. Run `swag init` in the project's root folder which contains the `main.go` file. This will parse your comments and generate the required files (`docs` folder and `docs/docs.go`). +```sh +swag init +``` + + Make sure to import the generated `docs/docs.go` so that your specific configuration gets `init`'ed. If your General API annotations do not live in `main.go`, you can let swag know with `-g` flag. + ```go + import _ "example-module-name/docs" + ``` + ```sh + swag init -g http/api.go + ``` + +4. (optional) Use `swag fmt` format the SWAG comment. (Please upgrade to the latest version) + + ```sh + swag fmt + ``` + +## swag cli + +```sh +swag init -h +NAME: + swag init - Create docs.go + +USAGE: + swag init [command options] [arguments...] + +OPTIONS: + --quiet, -q Make the logger quiet. (default: false) + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") + --dir value, -d value Directories you want to parse,comma separated and general-info file must be in the first one (default: "./") + --exclude value Exclude directories and files when searching, comma separated + --propertyStrategy value, -p value Property Naming Strategy like snakecase,camelcase,pascalcase (default: "camelcase") + --output value, -o value Output directory for all the generated files(swagger.json, swagger.yaml and docs.go) (default: "./docs") + --outputTypes value, --ot value Output types of generated files (docs.go, swagger.json, swagger.yaml) like go,json,yaml (default: "go,json,yaml") + --parseVendor Parse go files in 'vendor' folder, disabled by default (default: false) + --parseDependency, --pd Parse go files inside dependency folder, disabled by default (default: false) + --parseDependencyLevel, --pdl Enhancement of '--parseDependency', parse go files inside dependency folder, 0 disabled, 1 only parse models, 2 only parse operations, 3 parse all (default: 0) + --markdownFiles value, --md value Parse folder containing markdown files to use as description, disabled by default + --codeExampleFiles value, --cef value Parse folder containing code example files to use for the x-codeSamples extension, disabled by default + --parseInternal Parse go files in internal packages, disabled by default (default: false) + --generatedTime Generate timestamp at the top of docs.go, disabled by default (default: false) + --parseDepth value Dependency parse depth (default: 100) + --requiredByDefault Set validation required for all fields by default (default: false) + --instanceName value This parameter can be used to name different swagger document instances. It is optional. + --overridesFile value File to read global type overrides from. (default: ".swaggo") + --parseGoList Parse dependency via 'go list' (default: true) + --tags value, -t value A comma-separated list of tags to filter the APIs for which the documentation is generated.Special case if the tag is prefixed with the '!' character then the APIs with that tag will be excluded + --templateDelims value, --td value Provide custom delimiters for Go template generation. The format is leftDelim,rightDelim. For example: "[[,]]" + --collectionFormat value, --cf value Set default collection format (default: "csv") + --state value Initial state for the state machine (default: ""), @HostState in root file, @State in other files + --parseFuncBody Parse API info within body of functions in go files, disabled by default (default: false) + --help, -h show help (default: false) +``` + +```bash +swag fmt -h +NAME: + swag fmt - format swag comments + +USAGE: + swag fmt [command options] [arguments...] + +OPTIONS: + --dir value, -d value Directories you want to parse,comma separated and general-info file must be in the first one (default: "./") + --exclude value Exclude directories and files when searching, comma separated + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") + --help, -h show help (default: false) + +``` + +## Supported Web Frameworks + +- [gin](http://github.com/swaggo/gin-swagger) +- [echo](http://github.com/swaggo/echo-swagger) +- [buffalo](https://github.com/swaggo/buffalo-swagger) +- [net/http](https://github.com/swaggo/http-swagger) +- [gorilla/mux](https://github.com/swaggo/http-swagger) +- [go-chi/chi](https://github.com/swaggo/http-swagger) +- [flamingo](https://github.com/i-love-flamingo/swagger) +- [fiber](https://github.com/gofiber/swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) +- [hertz](https://github.com/hertz-contrib/swagger) + +## How to use it with Gin + +Find the example source code [here](https://github.com/swaggo/swag/tree/master/example/celler). + +Finish the steps in [Getting started](#getting-started) +1. After using `swag init` to generate Swagger 2.0 docs, import the following packages: +```go +import "github.com/swaggo/gin-swagger" // gin-swagger middleware +import "github.com/swaggo/files" // swagger embed files +``` + +2. Add [General API](#general-api-info) annotations in `main.go` code: + +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.basic BasicAuth + +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + r := gin.Default() + + c := controller.NewController() + + v1 := r.Group("/api/v1") + { + accounts := v1.Group("/accounts") + { + accounts.GET(":id", c.ShowAccount) + accounts.GET("", c.ListAccounts) + accounts.POST("", c.AddAccount) + accounts.DELETE(":id", c.DeleteAccount) + accounts.PATCH(":id", c.UpdateAccount) + accounts.POST(":id/images", c.UploadAccountImage) + } + //... + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + r.Run(":8080") +} +//... +``` + +Additionally some general API info can be set dynamically. The generated code package `docs` exports `SwaggerInfo` variable which we can use to set the title, description, version, host and base path programmatically. Example using Gin: + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/swaggo/files" + "github.com/swaggo/gin-swagger" + + "./docs" // docs is generated by Swag CLI, you have to import it. +) + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +func main() { + + // programmatically set swagger info + docs.SwaggerInfo.Title = "Swagger Example API" + docs.SwaggerInfo.Description = "This is a sample server Petstore server." + docs.SwaggerInfo.Version = "1.0" + docs.SwaggerInfo.Host = "petstore.swagger.io" + docs.SwaggerInfo.BasePath = "/v2" + docs.SwaggerInfo.Schemes = []string{"http", "https"} + + r := gin.New() + + // use ginSwagger middleware to serve the API docs + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.Run() +} +``` + +3. Add [API Operation](#api-operation) annotations in `controller` code + +``` go +package controller + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" +) + +// ShowAccount godoc +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] +func (c *Controller) ShowAccount(ctx *gin.Context) { + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) +} + +// ListAccounts godoc +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) +} +//... +``` + +```console +swag init +``` + +4. Run your app, and browse to http://localhost:8080/swagger/index.html. You will see Swagger 2.0 Api documents as shown below: + +![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) + +## The swag formatter + +The Swag Comments can be automatically formatted, just like 'go fmt'. +Find the result of formatting [here](https://github.com/swaggo/swag/tree/master/example/celler). + +Usage: +```shell +swag fmt +``` + +Exclude folder: +```shell +swag fmt -d ./ --exclude ./internal +``` + +When using `swag fmt`, you need to ensure that you have a doc comment for the function to ensure correct formatting. +This is due to `swag fmt` indenting swag comments with tabs, which is only allowed *after* a standard doc comment. + +For example, use + +```go +// ListAccounts lists all existing accounts +// +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { +``` + +## Implementation Status + +[Swagger 2.0 document](https://swagger.io/docs/specification/2-0/basic-structure/) + +- [x] Basic Structure +- [x] API Host and Base Path +- [x] Paths and Operations +- [x] Describing Parameters +- [x] Describing Request Body +- [x] Describing Responses +- [x] MIME Types +- [x] Authentication + - [x] Basic Authentication + - [x] API Keys +- [x] Adding Examples +- [x] File Upload +- [x] Enums +- [x] Grouping Operations With Tags +- [ ] Swagger Extensions + +# Declarative Comments Format + +## General API Info + +**Example** +[celler/main.go](https://github.com/swaggo/swag/blob/master/example/celler/main.go) + +| annotation | description | example | +|-------------|--------------------------------------------|---------------------------------| +| title | **Required.** The title of the application.| // @title Swagger Example API | +| version | **Required.** Provides the version of the application API.| // @version 1.0 | +| description | A short description of the application. |// @description This is a sample server celler server. | +| tag.name | Name of a tag.| // @tag.name This is the name of the tag | +| tag.description | Description of the tag | // @tag.description Cool Description | +| tag.docs.url | Url of the external Documentation of the tag | // @tag.docs.url https://example.com| +| tag.docs.description | Description of the external Documentation of the tag| // @tag.docs.description Best example documentation | +| termsOfService | The Terms of Service for the API.| // @termsOfService http://swagger.io/terms/ | +| contact.name | The contact information for the exposed API.| // @contact.name API Support | +| contact.url | The URL pointing to the contact information. MUST be in the format of a URL. | // @contact.url http://www.swagger.io/support| +| contact.email| The email address of the contact person/organization. MUST be in the format of an email address.| // @contact.email support@swagger.io | +| license.name | **Required.** The license name used for the API.|// @license.name Apache 2.0| +| license.url | A URL to the license used for the API. MUST be in the format of a URL. | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | +| host | The host (name or ip) serving the API. | // @host localhost:8080 | +| BasePath | The base path on which the API is served. | // @BasePath /api/v1 | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | // @accept json | +| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | // @produce json | +| query.collection.format | The default collection(array) param format in query,enums:csv,multi,pipes,tsv,ssv. If not set, csv is the default.| // @query.collection.format multi +| schemes | The transfer protocol for the operation that separated by spaces. | // @schemes http https | +| externalDocs.description | Description of the external document. | // @externalDocs.description OpenAPI | +| externalDocs.url | URL of the external document. | // @externalDocs.url https://swagger.io/resources/open-api/ | +| x-name | The extension key, must be start by x- and take only json value | // @x-example-key {"key": "value"} | + +### Using markdown descriptions +When a short string in your documentation is insufficient, or you need images, code examples and things like that you may want to use markdown descriptions. In order to use markdown descriptions use the following annotations. + + +| annotation | description | example | +|-------------|--------------------------------------------|---------------------------------| +| title | **Required.** The title of the application.| // @title Swagger Example API | +| version | **Required.** Provides the version of the application API.| // @version 1.0 | +| description.markdown | A short description of the application. Parsed from the api.md file. This is an alternative to @description |// @description.markdown No value needed, this parses the description from api.md | +| tag.name | Name of a tag.| // @tag.name This is the name of the tag | +| tag.description.markdown | Description of the tag this is an alternative to tag.description. The description will be read from a file named like tagname.md | // @tag.description.markdown | + + +## API Operation + +**Example** +[celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) + + +| annotation | description | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| description | A verbose explanation of the operation behavior. | +| description.markdown | A short description of the application. The description will be read from a file. E.g. `@description.markdown details` will load `details.md` | // @description.file endpoint.description.markdown | +| id | A unique string used to identify the operation. Must be unique among all API operations. | +| tags | A list of tags to each API operation that separated by commas. | +| summary | A short summary of what the operation does. | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | +| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | +| param | Parameters that separated by spaces. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | [Security](#security) to each API operation. | +| success | Success response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| failure | Failure response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| response | As same as `success` and `failure` | +| header | Header in response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | +| router | Path definition that separated by spaces. `path`,`[httpMethod]` | +| deprecatedrouter | As same as router, but deprecated. | +| x-name | The extension key, must be start by x- and take only json value. | +| x-codeSample | Optional Markdown usage. take `file` as parameter. This will then search for a file named like the summary in the given folder. | +| deprecated | Mark endpoint as deprecated. | + + + +## Mime Types + +`swag` accepts all MIME Types which are in the correct format, that is, match `*/*`. +Besides that, `swag` also accepts aliases for some MIME Types as follows: + +| Alias | MIME Type | +|-----------------------|-----------------------------------| +| json | application/json | +| xml | text/xml | +| plain | text/plain | +| html | text/html | +| mpfd | multipart/form-data | +| x-www-form-urlencoded | application/x-www-form-urlencoded | +| json-api | application/vnd.api+json | +| json-stream | application/x-json-stream | +| octet-stream | application/octet-stream | +| png | image/png | +| jpeg | image/jpeg | +| gif | image/gif | + + + +## Param Type + +- query +- path +- header +- body +- formData + +## Data Type + +- string (string) +- integer (int, uint, uint32, uint64) +- number (float32) +- boolean (bool) +- file (param data type when uploading) +- user defined struct + +## Security +| annotation | description | parameters | example | +|------------|-------------|------------|---------| +| securitydefinitions.basic | [Basic](https://swagger.io/docs/specification/2-0/authentication/basic-authentication/) auth. | | // @securityDefinitions.basic BasicAuth | +| securitydefinitions.apikey | [API key](https://swagger.io/docs/specification/2-0/authentication/api-keys/) auth. | in, name, description | // @securityDefinitions.apikey ApiKeyAuth | +| securitydefinitions.oauth2.application | [OAuth2 application](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.application OAuth2Application | +| securitydefinitions.oauth2.implicit | [OAuth2 implicit](https://swagger.io/docs/specification/authentication/oauth2/) auth. | authorizationUrl, scope, description | // @securitydefinitions.oauth2.implicit OAuth2Implicit | +| securitydefinitions.oauth2.password | [OAuth2 password](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.password OAuth2Password | +| securitydefinitions.oauth2.accessCode | [OAuth2 access code](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, authorizationUrl, scope, description | // @securitydefinitions.oauth2.accessCode OAuth2AccessCode | + + +| parameters annotation | example | +|---------------------------------|-------------------------------------------------------------------------| +| in | // @in header | +| name | // @name Authorization | +| tokenUrl | // @tokenUrl https://example.com/oauth/token | +| authorizationurl | // @authorizationurl https://example.com/oauth/authorize | +| scope.hoge | // @scope.write Grants write access | +| description | // @description OAuth protects our entity endpoints | + +## Attribute + +```go +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param example query string false "string example" example(string) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) +``` + +It also works for the struct fields: + +```go +type Foo struct { + Bar string `minLength:"4" maxLength:"16" example:"random string"` + Baz int `minimum:"10" maximum:"20" default:"15"` + Qux []string `enums:"foo,bar,baz"` +} +``` + +### Available + +Field Name | Type | Description +---|:---:|--- +validate | `string` | Determines the validation for the parameter. Possible values are: `required,optional`. +default | * | Declares the value of the parameter that the server will use if none is provided, for example a "count" to control the number of results per page might default to 100 if not supplied by the client in the request. (Note: "default" has no meaning for required parameters.) See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. Unlike JSON Schema this value MUST conform to the defined [`type`](#parameterType) for this parameter. +maximum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. +minimum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. +multipleOf | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. +maxLength | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. +minLength | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. +enums | [\*] | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. +format | `string` | The extending format for the previously mentioned [`type`](#parameterType). See [Data Type Formats](https://swagger.io/specification/v2/#dataTypeFormat) for further details. +collectionFormat | `string` |Determines the format of the array if type array is used. Possible values are: Default value is `csv`. +example | * | Declares the example for the parameter value +extensions | `string` | Add extension to parameters. + +### Future + +Field Name | Type | Description +---|:---:|--- +pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. +maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. +minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. +uniqueItems | `boolean` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.4. + +## Examples + +### Descriptions over multiple lines + +You can add descriptions spanning multiple lines in either the general api description or routes definitions like so: + +```go +// @description This is the first line +// @description This is the second line +// @description And so forth. +``` + +### User defined structure with an array type + +```go +// @Success 200 {array} model.Account <-- This is a user defined struct. +``` + +```go +package model + +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` +} +``` + + +### Function scoped struct declaration + +You can declare your request response structs inside a function body. +You must have to follow the naming convention `.. `. + +```go +package main + +// @Param request body main.MyHandler.request true "query params" +// @Success 200 {object} main.MyHandler.response +// @Router /test [post] +func MyHandler() { + type request struct { + RequestField string + } + + type response struct { + ResponseField string + } +} +``` + + +### Model composition in response +```go +// JSONResult's data field will be overridden by the specific type proto.Order +@success 200 {object} jsonresult.JSONResult{data=proto.Order} "desc" +``` + +```go +type JSONResult struct { + Code int `json:"code" ` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +type Order struct { //in `proto` package + Id uint `json:"id"` + Data interface{} `json:"data"` +} +``` + +- also support array of objects and primitive types as nested response +```go +@success 200 {object} jsonresult.JSONResult{data=[]proto.Order} "desc" +@success 200 {object} jsonresult.JSONResult{data=string} "desc" +@success 200 {object} jsonresult.JSONResult{data=[]string} "desc" +``` + +- overriding multiple fields. field will be added if not exists +```go +@success 200 {object} jsonresult.JSONResult{data1=string,data2=[]string,data3=proto.Order,data4=[]proto.Order} "desc" +``` +- overriding deep-level fields +```go +type DeepObject struct { //in `proto` package + ... +} +@success 200 {object} jsonresult.JSONResult{data1=proto.Order{data=proto.DeepObject},data2=[]proto.Order{data=[]proto.DeepObject}} "desc" +``` +### Add response request + +```go +// @Param X-MyHeader header string true "MyHeader must be set for valid response" +// @Param X-API-VERSION header string true "API version eg.: 1.0" +``` + +### Add response headers + +```go +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" +``` + +### Use multiple path params + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" +// ... +// @Router /examples/groups/{group_id}/accounts/{account_id} [get] +``` + +### Add multiple paths + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param user_id path int true "User ID" +// ... +// @Router /examples/groups/{group_id}/user/{user_id}/address [put] +// @Router /examples/user/{user_id}/address [put] +``` + +### Example value of struct + +```go +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` + PhotoUrls []string `json:"photo_urls" example:"http://test/image/1.jpg,http://test/image/2.jpg"` +} +``` + +### SchemaExample of body + +```go +// @Param email body string true "message/rfc822" SchemaExample(Subject: Testmail\r\n\r\nBody Message\r\n) +``` + +### Description of struct + +```go +// Account model info +// @Description User account information +// @Description with user id and username +type Account struct { + // ID this is userid + ID int `json:"id"` + Name string `json:"name"` // This is Name +} +``` + +[#708](https://github.com/swaggo/swag/issues/708) The parser handles only struct comments starting with `@Description` attribute. +But it writes all struct field comments as is. + +So, generated swagger doc as follows: +```json +"Account": { + "type":"object", + "description": "User account information with user id and username" + "properties": { + "id": { + "type": "integer", + "description": "ID this is userid" + }, + "name": { + "type":"string", + "description": "This is Name" + } + } +} +``` + +### Use swaggertype tag to supported custom type +[#201](https://github.com/swaggo/swag/issues/201#issuecomment-475479409) + +```go +type TimestampTime struct { + time.Time +} + +///implement encoding.JSON.Marshaler interface +func (t *TimestampTime) MarshalJSON() ([]byte, error) { + bin := make([]byte, 16) + bin = strconv.AppendInt(bin[:0], t.Time.Unix(), 10) + return bin, nil +} + +func (t *TimestampTime) UnmarshalJSON(bin []byte) error { + v, err := strconv.ParseInt(string(bin), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(v, 0) + return nil +} +/// + +type Account struct { + // Override primitive type by simply specifying it via `swaggertype` tag + ID sql.NullInt64 `json:"id" swaggertype:"integer"` + + // Override struct type to a primitive type 'integer' by specifying it via `swaggertype` tag + RegisterTime TimestampTime `json:"register_time" swaggertype:"primitive,integer"` + + // Array types can be overridden using "array," format + Coeffs []big.Float `json:"coeffs" swaggertype:"array,number"` +} +``` + +[#379](https://github.com/swaggo/swag/issues/379) +```go +type CerticateKeyPair struct { + Crt []byte `json:"crt" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` + Key []byte `json:"key" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` +} +``` +generated swagger doc as follows: +```go +"api.MyBinding": { + "type":"object", + "properties":{ + "crt":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + }, + "key":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + } + } +} + +``` + +### Use global overrides to support a custom type + +If you are using generated files, the [`swaggertype`](#use-swaggertype-tag-to-supported-custom-type) or `swaggerignore` tags may not be possible. + +By passing a mapping to swag with `--overridesFile` you can tell swag to use one type in place of another wherever it appears. By default, if a `.swaggo` file is present in the current directory it will be used. + +Go code: +```go +type MyStruct struct { + ID sql.NullInt64 `json:"id"` + Name sql.NullString `json:"name"` +} +``` + +`.swaggo`: +``` +// Replace all NullInt64 with int +replace database/sql.NullInt64 int + +// Don't include any fields of type database/sql.NullString in the swagger docs +skip database/sql.NullString +``` + +Possible directives are comments (beginning with `//`), `replace path/to/a.type path/to/b.type`, and `skip path/to/a.type`. + +(Note that the full paths to any named types must be provided to prevent problems when multiple packages define a type with the same name) + +Rendered: +```go +"types.MyStruct": { + "id": "integer" +} +``` + + +### Use swaggerignore tag to exclude a field + +```go +type Account struct { + ID string `json:"id"` + Name string `json:"name"` + Ignored int `swaggerignore:"true"` +} +``` + +### Add extension info to struct field + +```go +type Account struct { + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // extensions fields must start with "x-" +} +``` + +generate swagger doc as follows: + +```go +"Account": { + "type": "object", + "properties": { + "id": { + "type": "string", + "x-nullable": true, + "x-abc": "def", + "x-omitempty": false + } + } +} +``` +### Rename model to display + +```golang +type Resp struct { + Code int +}//@name Response +``` + +### How to use security annotations + +General API info. + +```go +// @securityDefinitions.basic BasicAuth + +// @securitydefinitions.oauth2.application OAuth2Application +// @tokenUrl https://example.com/oauth/token +// @scope.write Grants write access +// @scope.admin Grants read and write access to administrative information +``` + +Each API operation. + +```go +// @Security ApiKeyAuth +``` + +Make it AND condition + +```go +// @Security ApiKeyAuth +// @Security OAuth2Application[write, admin] +``` + +Make it OR condition + +```go +// @Security ApiKeyAuth || firebase +// @Security OAuth2Application[write, admin] || APIKeyAuth +``` + + +### Add a description for enum items + +```go +type Example struct { + // Sort order: + // * asc - Ascending, from A to Z. + // * desc - Descending, from Z to A. + Order string `enums:"asc,desc"` +} +``` + +### Generate only specific docs file types + +By default `swag` command generates Swagger specification in three different files/file types: +- docs.go +- swagger.json +- swagger.yaml + +If you would like to limit a set of file types which should be generated you can use `--outputTypes` (short `-ot`) flag. Default value is `go,json,yaml` - output types separated with comma. To limit output only to `go` and `yaml` files, you would write `go,yaml`. With complete command that would be `swag init --outputTypes go,yaml`. + +### How to use Generics + +```go +// @Success 200 {object} web.GenericNestedResponse[types.Post] +// @Success 204 {object} web.GenericNestedResponse[types.Post, Types.AnotherOne] +// @Success 201 {object} web.GenericNestedResponse[web.GenericInnerType[types.Post]] +func GetPosts(w http.ResponseWriter, r *http.Request) { + _ = web.GenericNestedResponse[types.Post]{} +} +``` +See [this file](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) for more details +and other examples. + +### Change the default Go Template action delimiters +[#980](https://github.com/swaggo/swag/issues/980) +[#1177](https://github.com/swaggo/swag/issues/1177) + +If your swagger annotations or struct fields contain "{{" or "}}", the template generation will most likely fail, as these are the default delimiters for [go templates](https://pkg.go.dev/text/template#Template.Delims). + +To make the generation work properly, you can change the default delimiters with `-td`. For example: +```console +swag init -g http/api.go -td "[[,]]" +``` +The new delimiter is a string with the format "``,``". + +### Parse Internal and Dependency Packages + +If the struct is defined in a dependency package, use `--parseDependency`. + +If the struct is defined in your main project, use `--parseInternal`. + +if you want to include both internal and from dependencies use both flags +``` +swag init --parseDependency --parseInternal +``` + +## About the Project +This project was inspired by [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) but we simplified the usage and added support a variety of [web frameworks](#supported-web-frameworks). Gopher image source is [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). It has licenses [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). +## Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + + + +## Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/swag#backer)] + + + + +## Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/swag#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_large) diff --git a/vendor/github.com/swaggo/swag/README_pt.md b/vendor/github.com/swaggo/swag/README_pt.md new file mode 100644 index 00000000..060b2c97 --- /dev/null +++ b/vendor/github.com/swaggo/swag/README_pt.md @@ -0,0 +1,968 @@ +# swag + +🌍 *[English](README.md) ∙ [简体中文](README_zh-CN.md) ∙ [Português](README_pt.md)* + + + +[![Build Status](https://github.com/swaggo/swag/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) +[![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) +[![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) +[![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) +[![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) + +Swag converte anotações Go para Documentação Swagger 2.0. Criámos uma variedade de plugins para populares [Go web frameworks](#supported-web-frameworks). Isto permite uma integração rápida com um projecto Go existente (utilizando a Swagger UI). + +## Conteúdo +- [Começando](#começando) + - [Estruturas Web Suportadas](#estruturas-web-suportadas) + - [Como utilizá-lo com Gin](#como-como-ser-como-gin) + - [O formatador de swag](#a-formatação-de-swag) + - [Estado de Implementação](#implementação-estado) + - [Formato dos comentários declarativos](#formato-dos-comentarios-declarativos) + - [Informações Gerais API](#informações-gerais-api) + - [Operação API](#api-operacao) + - [Segurança](#seguranca) + - [Exemplos](#exemplos) + - [Descrições em múltiplas linhas](#descricoes-sobre-múltiplas-linhas) + - [Estrutura definida pelo utilizador com um tipo de matriz](#-estrutura-definida-pelo-utilizador-com-um-um-tipo) + - [Declaração de estruturação de funções](#function-scoped-struct-declaration) + - [Composição do modelo em resposta](#model-composição-em-resposta) + - [Adicionar um cabeçalho em resposta](#add-a-headers-in-response) + - [Utilizar parâmetros de caminhos múltiplos](#use-multiple-path-params) + - [Exemplo de valor de estrutura](#exemplo-do-valor-de-estrutura) + - [Schema Exemplo do corpo](#schemaexample-of-body) + - [Descrição da estrutura](#descrição-da-estrutura) + - [Usar etiqueta do tipo swaggertype para suportar o tipo personalizado](#use-swaggertype-tag-to-supported-custom-type) + - [Utilizar anulações globais para suportar um tipo personalizado](#use-global-overrides-to-support-a-custom-type) + - [Use swaggerignore tag para excluir um campo](#use-swaggerignore-tag-to-excluir-um-campo) + - [Adicionar informações de extensão ao campo de estruturação](#add-extension-info-to-struct-field) + - [Renomear modelo a expor](#renome-modelo-a-exibir) + - [Como utilizar as anotações de segurança](#como-utilizar-as-anotações-de-segurança) + - [Adicionar uma descrição para enumerar artigos](#add-a-description-for-enum-items) + - [Gerar apenas tipos de ficheiros de documentos específicos](#generate-only-specific-docs-file-file-types) + - [Como usar tipos genéricos](#como-usar-tipos-genéricos) +- [Sobre o projecto](#sobre-o-projecto) + +## Começando + +1. Adicione comentários ao código-fonte da API, consulte [Formato dos comentários declarativos](#declarative-comments-format). + +2. Descarregue o swag utilizando: +```sh +go install github.com/swaggo/swag/cmd/swag@latest +``` +Para construir a partir da fonte é necessário [Go](https://golang.org/dl/) (1.19 ou mais recente). + +Ou descarregar um binário pré-compilado a partir da [página de lançamento](https://github.com/swaggo/swag/releases). + +3. Executar `swag init` na pasta raiz do projecto que contém o ficheiro `main.go`. Isto irá analisar os seus comentários e gerar os ficheiros necessários (pasta `docs` e `docs/docs.go`). +```sh +swag init +``` + +Certifique-se de importar os `docs/docs.go` gerados para que a sua configuração específica fique "init" ed. Se as suas anotações API gerais não viverem em `main.go`, pode avisar a swag com a bandeira `-g`. +```sh +swag init -g http/api.go +``` + +4. (opcional) Utilizar o formato `swag fmt` no comentário SWAG. (Por favor, actualizar para a versão mais recente) + +```sh +swag fmt +``` + +## swag cli + +```sh +swag init -h +NOME: + swag init - Criar docs.go + +UTILIZAÇÃO: + swag init [opções de comando] [argumentos...] + +OPÇÕES: + --quiet, -q Fazer o logger ficar quiet (por padrão: falso) + --generalInfo valor, -g valor Go caminho do ficheiro em que 'swagger general API Info' está escrito (por padrão: "main.go") + --dir valor, -d valor Os directórios que deseja analisar, separados por vírgulas e de informação geral devem estar no primeiro (por padrão: "./") + --exclude valor Excluir directórios e ficheiros ao pesquisar, separados por vírgulas + -propertyStrategy da estratégia, -p valor da propriedadeEstratégia de nomeação de propriedades como snakecase,camelcase,pascalcase (por padrão: "camelcase") + --output de saída, -o valor directório de saída para todos os ficheiros gerados(swagger.json, swagger.yaml e docs.go) (por padrão: "./docs") + --outputTypes valor de saídaTypes, -- valor de saída Tipos de ficheiros gerados (docs.go, swagger.json, swagger.yaml) como go,json,yaml (por padrão: "go,json,yaml") + --parseVendor ParseVendor Parse go files na pasta 'vendor', desactivado por padrão (padrão: falso) + --parseInternal Parse go ficheiros em pacotes internos, desactivados por padrão (padrão: falso) + --generatedTime Gerar timestamp no topo dos docs.go, desactivado por padrão (padrão: falso) + --parteDepth value Dependência profundidade parse (por padrão: 100) + --templateDelims value, --td value fornecem delimitadores personalizados para a geração de modelos Go. O formato é leftDelim,rightDelim. Por exemplo: "[[,]]" + ... + + --help, -h mostrar ajuda (por padrão: falso) +``` + +```bash +swag fmt -h +NOME: + swag fmt - formato swag comentários + +UTILIZAÇÃO: + swag fmt [opções de comando] [argumentos...] + +OPÇÕES: + --dir valor, -d valor Os directórios que pretende analisar, separados por vírgulas e de informação geral devem estar no primeiro (por padrão: "./") + --excluir valor Excluir directórios e ficheiros ao pesquisar, separados por vírgulas + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (por padrão: "main.go") + --ajuda, -h mostrar ajuda (por padrão: falso) + +``` + +## Estruturas Web Suportadas + +- [gin](http://github.com/swaggo/gin-swagger) +- [echo](http://github.com/swaggo/echo-swagger) +- [buffalo](https://github.com/swaggo/buffalo-swagger) +- [net/http](https://github.com/swaggo/http-swagger) +- [gorilla/mux](https://github.com/swaggo/http-swagger) +- [go-chi/chi](https://github.com/swaggo/http-swagger) +- [flamingo](https://github.com/i-love-flamingo/swagger) +- [fiber](https://github.com/gofiber/swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) +- [hertz](https://github.com/hertz-contrib/swagger) + +## Como utilizá-lo com Gin + +Encontrar o código fonte de exemplo [aqui](https://github.com/swaggo/swag/tree/master/example/celler). + +1. Depois de utilizar `swag init` para gerar os documentos Swagger 2.0, importar os seguintes pacotes: +```go +import "github.com/swaggo/gin-swagger" // gin-swagger middleware +import "github.com/swaggo/files" // swagger embed files +``` + +2. Adicionar [Informações Gerais API](#general-api-info) anotações em código `main.go`: + + +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.basic BasicAuth + +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + r := gin.Default() + + c := controller.NewController() + + v1 := r.Group("/api/v1") + { + accounts := v1.Group("/accounts") + { + accounts.GET(":id", c.ShowAccount) + accounts.GET("", c.ListAccounts) + accounts.POST("", c.AddAccount) + accounts.DELETE(":id", c.DeleteAccount) + accounts.PATCH(":id", c.UpdateAccount) + accounts.POST(":id/images", c.UploadAccountImage) + } + //... + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + r.Run(":8080") +} +//... +``` + +Além disso, algumas informações API gerais podem ser definidas de forma dinâmica. O pacote de código gerado `docs` exporta a variável `SwaggerInfo` que podemos utilizar para definir programticamente o título, descrição, versão, hospedeiro e caminho base. Exemplo utilizando Gin: + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/swaggo/files" + "github.com/swaggo/gin-swagger" + + "./docs" // docs is generated by Swag CLI, you have to import it. +) + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +func main() { + + // programmatically set swagger info + docs.SwaggerInfo.Title = "Swagger Example API" + docs.SwaggerInfo.Description = "This is a sample server Petstore server." + docs.SwaggerInfo.Version = "1.0" + docs.SwaggerInfo.Host = "petstore.swagger.io" + docs.SwaggerInfo.BasePath = "/v2" + docs.SwaggerInfo.Schemes = []string{"http", "https"} + + r := gin.New() + + // use ginSwagger middleware to serve the API docs + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.Run() +} +``` + +3. Adicionar [Operação API](#api-operacao) anotações em código `controller` + +```go +package controller + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" +) + +// ShowAccount godoc +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] +func (c *Controller) ShowAccount(ctx *gin.Context) { + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) +} + +// ListAccounts godoc +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) +} +//... +``` + +```console +swag init +``` + +4. Execute a sua aplicação, e navegue para http://localhost:8080/swagger/index.html. Verá os documentos Swagger 2.0 Api, como mostrado abaixo: + +![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) + +## O formatador de swag + +Os Swag Comments podem ser formatados automaticamente, assim como 'go fmt'. +Encontre o resultado da formatação [aqui](https://github.com/swaggo/swag/tree/master/example/celler). + +Usage: +```shell +swag fmt +``` + +Exclude folder: +```shell +swag fmt -d ./ --exclude ./internal +``` + +Ao utilizar `swag fmt`, é necessário assegurar-se de que tem um comentário doc para a função a fim de assegurar uma formatação correcta. +Isto deve-se ao `swag fmt` que traça comentários swag com separadores, o que só é permitido *após* um comentário doc padrão. + +Por exemplo, utilizar + +```go +// ListAccounts lists all existing accounts +// +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { +``` + +## Estado de Implementação + +[Documento Swagger 2.0](https://swagger.io/docs/specification/2-0/basic-structure/) + +- [x] Estrutura básica +- [x] Hospedeiro API e Caminho Base +- [x] Caminhos e operações +- [x] Descrição dos parâmetros +- [x] Descrever o corpo do pedido +- [x] Descrição das respostas +- [x] Tipos MIME +- [x] Autenticação + - [x] Autenticação básica + - [x] Chaves API +- [x] Acrescentar exemplos +- [x] Carregamento de ficheiros +- [x] Enums +- [x] Operações de Agrupamento com Etiquetas +- Extensões Swagger + +## Formato dos comentários declarativos + +## Informações Gerais API + +**Exemplo** +[celler/main.go](https://github.com/swaggo/swag/blob/master/example/celler/main.go) + +| anotação | descrição | exemplo | +|-------------|--------------------------------------------|---------------------------------| +| title | **Obrigatório.** O título da aplicação.| // @title Swagger Example API | +| version | **Obrigatório.** Fornece a versão da aplicação API.| // @version 1.0 | +| description | Uma breve descrição da candidatura. |// @descrição Este é um servidor servidor de celas de amostra. | +| tag.name | Nome de uma tag.| // @tag.name Este é o nome da tag | +| tag.description | Descrição da tag | // @tag.description Cool Description | +| tag.docs.url | Url da Documentação externa da tag | // @tag.docs.url https://example.com| +| tag.docs.description | Descrição da documentação externa da tag| // @tag.docs.description Melhor exemplo de documentação | +| TermsOfService | Os Termos de Serviço para o API.| // @termsOfService http://swagger.io/terms/ | +| contact.name | A informação de contacto para a API exposta.| // @contacto.name Suporte API | +| contact.url | O URL que aponta para as informações de contacto. DEVE estar no formato de um URL. | // @contact.url http://www.swagger.io/support| +| contact.email| O endereço de email da pessoa/organização de contacto. DEVE estar no formato de um endereço de correio electrónico.| // @contact.email support@swagger.io | +| license.name | **Obrigatório.** O nome da licença utilizada para a API.|// @licença.name Apache 2.0| +| license.url | Um URL para a licença utilizada para a API. DEVE estar no formato de um URL. | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | +| host | O anfitrião (nome ou ip) que serve o API. | // @host localhost:8080 | +| BasePath | O caminho de base sobre o qual o API é servido. | // @BasePath /api/v1 | +| accept | Uma lista de tipos de MIME que os APIs podem consumir. Note que accept só afecta operações com um organismo de pedido, tais como POST, PUT e PATCH. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | // @accept json | +| produce | Uma lista de tipos de MIME que os APIs podem produce. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | // @produce json | +| query.collection.format | O formato padrão de param de colecção(array) em query,enums:csv,multi,pipes,tsv,ssv. Se não definido, csv é o padrão.| // @query.collection.format multi +| schemes | O protocolo de transferência para a operação que separou por espaços. | // @schemes http https | +| externalDocs.description | Descrição do documento externo. | // @externalDocs.description OpenAPI | +| externalDocs.url | URL do documento externo. | // @externalDocs.url https://swagger.io/resources/open-api/ | +| x-name | A chave de extensão, deve ser iniciada por x- e tomar apenas o valor json | // @x-example-key {"chave": "valor"} | + +### Usando descrições de remarcação para baixo +Quando uma pequena sequência na sua documentação é insuficiente, ou precisa de imagens, exemplos de códigos e coisas do género, pode querer usar descrições de marcação. Para utilizar as descrições markdown, utilize as seguintes anotações. + +| anotação | descrição | exemplo | +|-------------|--------------------------------------------|---------------------------------| +| title | **Obrigatório.** O título da aplicação.| // @title Swagger Example API | +| version | **Obrigatório.** Fornece a versão da aplicação API.| // @versão 1.0 | +| description.markdown | Uma breve descrição da candidatura. Parsed a partir do ficheiro api.md. Esta é uma alternativa a @description |// @description.markdown Sem valor necessário, isto analisa a descrição do ficheiro api.md |. +| tag.name | Nome de uma tag.| // @tag.name Este é o nome da tag | +| tag.description.markdown | Descrição da tag esta é uma alternativa à tag.description. A descrição será lida a partir de um ficheiro nomeado como tagname.md | // @tag.description.markdown | + +## Operação API + +**Exemplo** +[celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) + +| anotação | descrição | +|-------------|----------------------------------------------------------------------------------------------------------------------------| +| descrição | Uma explicação verbosa do comportamento da operação. | +| description.markdown | Uma breve descrição da candidatura. A descrição será lida a partir de um ficheiro. Por exemplo, `@description.markdown details` irá carregar `details.md`| // @description.file endpoint.description.markdown | +| id | Um fio único utilizado para identificar a operação. Deve ser única entre todas as operações API. | +| tags | Uma lista de tags para cada operação API que separou por vírgulas. | +| summary | Um breve resumo do que a operação faz. | +| accept | Uma lista de tipos de MIME que os APIs podem consumir. Note que accept só afecta operações com um organismo de pedido, tais como POST, PUT e PATCH. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | +| produce | Uma lista de tipos de MIME que os APIs podem produce. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | +| param | Parâmetros que se separaram por espaços. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | [Segurança](#security) para cada operação API. | +| success | resposta de sucesso que separou por espaços. `return code or default`,`{param type}`,`data type`,`comment` |. +| failure | Resposta de falha que separou por espaços. `return code or default`,`{param type}`,`data type`,`comment` | +| response | Igual ao `sucesso` e `falha` | +| header | Cabeçalho em resposta que separou por espaços. `código de retorno`,`{tipo de parâmetro}`,`tipo de dados`,`comentário` |. +| router | Definição do caminho que separou por espaços. caminho",`path`,`[httpMethod]` |[httpMethod]` | +| x-name | A chave de extensão, deve ser iniciada por x- e tomar apenas o valor json. | +| x-codeSample | Optional Markdown use. tomar `file` como parâmetro. Isto irá então procurar um ficheiro nomeado como o resumo na pasta dada. | +| deprecated | Marcar o ponto final como depreciado. | + +## Mime Types + +`swag` aceita todos os tipos MIME que estão no formato correcto, ou seja, correspondem `*/*`. +Além disso, `swag` também aceita pseudónimos para alguns tipos de MIME, como se segue: + + +| Alias | MIME Type | +|-----------------------|-----------------------------------| +| json | application/json | +| xml | text/xml | +| plain | text/plain | +| html | text/html | +| mpfd | multipart/form-data | +| x-www-form-urlencoded | application/x-www-form-urlencoded | +| json-api | application/vnd.api+json | +| json-stream | application/x-json-stream | +| octet-stream | application/octet-stream | +| png | image/png | +| jpeg | image/jpeg | +| gif | image/gif | + + + +## Tipo de parâmetro + +- query +- path +- header +- body +- formData + +## Tipo de dados + +- string (string) +- integer (int, uint, uint32, uint64) +- number (float32) +- boolean (bool) +- file (param data type when uploading) +- user defined struct + +## Segurança +| anotação | descrição | parâmetros | exemplo | +|------------|-------------|------------|---------| +| securitydefinitions.basic | [Basic](https://swagger.io/docs/specification/2-0/authentication/basic-authentication/) auth. | | // @securityDefinitions.basicAuth | [Básico]() +| securitydefinitions.apikey | [chave API](https://swagger.io/docs/specification/2-0/authentication/api-keys/) auth. | in, name, description | // @securityDefinitions.apikey ApiKeyAuth | +| securitydefinitions.oauth2.application | [Aplicação OAuth2](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.application OAuth2Application | +| securitydefinitions.oauth2.implicit | [OAuth2 implicit](https://swagger.io/docs/specification/authentication/oauth2/) auth. | authorizationUrl, scope, description | // @securitydefinitions.oauth2.implicit OAuth2Implicit | [OAuth2Implicit]() +| securitydefinitions.oauth2.password | [OAuth2 password](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.password OAuth2Password | +| securitydefinitions.oauth2.accessCode | [código de acesso OAuth2](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, authorizationUrl, scope, description | // @securitydefinitions.oauth2.accessCode OAuth2AccessCode | [código de acesso OAuth2.accessCode]() + + +| anotação de parâmetros | exemplo | +|---------------------------------|-------------------------------------------------------------------------| +| in | // @in header | +| name | // @name Authorization | +| tokenUrl | // @tokenUrl https://example.com/oauth/token | +| authorizationurl | // @authorizationurl https://example.com/oauth/authorize | +| scope.hoge | // @scope.write Grants write access | +| description | // @descrição OAuth protege os pontos finais da nossa entidade | + +## Atributo + +```go +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param example query string false "string example" example(string) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) +``` + +It also works for the struct fields: + +```go +type Foo struct { + Bar string `minLength:"4" maxLength:"16" example:"random string"` + Baz int `minimum:"10" maximum:"20" default:"15"` + Qux []string `enums:"foo,bar,baz"` +} +``` + +### Disponível + +Nome do campo | Tipo | Descrição +---|:---:|--- +validate | `string` | Determina a validação para o parâmetro. Os valores possíveis são: `required,optional`. +default | * | Declara o valor do parâmetro que o servidor utilizará se nenhum for fornecido, por exemplo, uma "contagem" para controlar o número de resultados por página poderá ser por defeito de 100 se não for fornecido pelo cliente no pedido. (Nota: "por defeito" não tem significado para os parâmetros requeridos). +See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. Ao contrário do esquema JSON, este valor DEVE estar em conformidade com o definido [`type`](#parameterType) para este parâmetro. +maximum | `number` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. +minimum | `number` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. +multipleOf | `number` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. +maxLength | `integer` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. +minLength | `integer` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. +enums | [\*] | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. +format | `string` | O formato de extensão para o anteriormente mencionado [`type`](#parameterType). Ver [Data Type Formats](https://swagger.io/specification/v2/#dataTypeFormat) para mais detalhes. +collectionFormat | `string` |Determina o formato da matriz se for utilizada uma matriz de tipos. Os valores possíveis são:
  • `csv` - valores separados por vírgulas `foo,bar`.
  • `ssv` - valores separados por espaço `foo bar`.
  • `tsv` - valores separados por tabulação `foo\tbar`.
  • `pipes` - valores separados por tubo foo|bar.
  • `multi` - corresponde a múltiplas instâncias de parâmetros em vez de múltiplos valores para uma única instância `foo=bar&foo=baz`. This is valid only for parameters [`in`](#parameterIn) "query" or "formData".
Default value is `csv`. +example | * | Declara o exemplo para o valor do parâmetro +extensions | `string` | Acrescentar extensão aos parâmetros. + +### Futuro + +Nome do campo | Tipo | Description +---|:---:|--- +pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. +maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. +minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. +uniqueItems | `boolean` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.4. + +## Exemplos + + +### Descrições em múltiplas linhas + +É possível acrescentar descrições que abranjam várias linhas tanto na descrição geral da api como em definições de rotas como esta: + +```go +// @description This is the first line +// @description This is the second line +// @description And so forth. +``` + +### Estrutura definida pelo utilizador com um tipo de matriz + +```go +// @Success 200 {array} model.Account <-- This is a user defined struct. +``` + +```go +package model + +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` +} +``` + + +### Declaração de estruturação de funções + +Pode declarar as estruturas de resposta do seu pedido dentro de um corpo funcional. +Deve ter de seguir a convenção de nomeação +`.. `. + +```go +package main + +// @Param request body main.MyHandler.request true "query params" +// @Success 200 {object} main.MyHandler.response +// @Router /test [post] +func MyHandler() { + type request struct { + RequestField string + } + + type response struct { + ResponseField string + } +} +``` + + +### Composição do modelo em resposta +```go +// JSONResult's data field will be overridden by the specific type proto.Order +@success 200 {object} jsonresult.JSONResult{data=proto.Order} "desc" +``` + +```go +type JSONResult struct { + Code int `json:"code" ` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +type Order struct { //in `proto` package + Id uint `json:"id"` + Data interface{} `json:"data"` +} +``` + +- também suportam uma variedade de objectos e tipos primitivos como resposta aninhada +```go +@success 200 {object} jsonresult.JSONResult{data=[]proto.Order} "desc" +@success 200 {object} jsonresult.JSONResult{data=string} "desc" +@success 200 {object} jsonresult.JSONResult{data=[]string} "desc" +``` + +- campos múltiplos que se sobrepõem. campo será adicionado se não existir +```go +@success 200 {object} jsonresult.JSONResult{data1=string,data2=[]string,data3=proto.Order,data4=[]proto.Order} "desc" +``` +- overriding deep-level fields +```go +type DeepObject struct { //in `proto` package + ... +} +@success 200 {object} jsonresult.JSONResult{data1=proto.Order{data=proto.DeepObject},data2=[]proto.Order{data=[]proto.DeepObject}} "desc" +``` + +### Adicionar um cabeçalho em resposta + +```go +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" +``` + + +### Utilizar parâmetros de caminhos múltiplos + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" +// ... +// @Router /examples/groups/{group_id}/accounts/{account_id} [get] +``` + +### Adicionar múltiplos caminhos + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param user_id path int true "User ID" +// ... +// @Router /examples/groups/{group_id}/user/{user_id}/address [put] +// @Router /examples/user/{user_id}/address [put] +``` + +### Exemplo de valor de estrutura + +```go +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` + PhotoUrls []string `json:"photo_urls" example:"http://test/image/1.jpg,http://test/image/2.jpg"` +} +``` + +### Schema Exemplo do corpo + +```go +// @Param email body string true "message/rfc822" SchemaExample(Subject: Testmail\r\n\r\nBody Message\r\n) +``` + +### Descrição da estrutura + +```go +// Account model info +// @Description User account information +// @Description with user id and username +type Account struct { + // ID this is userid + ID int `json:"id"` + Name string `json:"name"` // This is Name +} +``` + +[#708](https://github.com/swaggo/swag/issues/708) O analisador trata apenas de comentários estruturais a partir de `@Description` attribute. + +Assim, gerou o doc. de swagger como se segue: +```json +"Account": { + "type":"object", + "description": "User account information with user id and username" + "properties": { + "id": { + "type": "integer", + "description": "ID this is userid" + }, + "name": { + "type":"string", + "description": "This is Name" + } + } +} +``` + +### Usar etiqueta do tipo swaggertype para suportar o tipo personalizado +[#201](https://github.com/swaggo/swag/issues/201#issuecomment-475479409) + +```go +type TimestampTime struct { + time.Time +} + +///implement encoding.JSON.Marshaler interface +func (t *TimestampTime) MarshalJSON() ([]byte, error) { + bin := make([]byte, 16) + bin = strconv.AppendInt(bin[:0], t.Time.Unix(), 10) + return bin, nil +} + +func (t *TimestampTime) UnmarshalJSON(bin []byte) error { + v, err := strconv.ParseInt(string(bin), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(v, 0) + return nil +} +/// + +type Account struct { + // Override primitive type by simply specifying it via `swaggertype` tag + ID sql.NullInt64 `json:"id" swaggertype:"integer"` + + // Override struct type to a primitive type 'integer' by specifying it via `swaggertype` tag + RegisterTime TimestampTime `json:"register_time" swaggertype:"primitive,integer"` + + // Array types can be overridden using "array," format + Coeffs []big.Float `json:"coeffs" swaggertype:"array,number"` +} +``` + +[#379](https://github.com/swaggo/swag/issues/379) +```go +type CerticateKeyPair struct { + Crt []byte `json:"crt" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` + Key []byte `json:"key" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` +} +``` +generated swagger doc as follows: +```go +"api.MyBinding": { + "type":"object", + "properties":{ + "crt":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + }, + "key":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + } + } +} + +``` + +### Utilizar anulações globais para suportar um tipo personalizado + +Se estiver a utilizar ficheiros gerados, as etiquetas [`swaggertype`](#use-swaggertype-tag-to-supported-custom-type) ou `swaggerignore` podem não ser possíveis. + +Ao passar um mapeamento para swag com `--overridesFile` pode dizer swag para utilizar um tipo no lugar de outro onde quer que apareça. Por defeito, se um ficheiro `.swaggo` estiver presente no directório actual, será utilizado. + +Go code: +```go +type MyStruct struct { + ID sql.NullInt64 `json:"id"` + Name sql.NullString `json:"name"` +} +``` + +`.swaggo`: +``` +// Substituir todos os NullInt64 por int +replace database/sql.NullInt64 int + +// Não inclua quaisquer campos do tipo base de database/sql. +NullString no swagger docs +skip database/sql.NullString +``` + +As directivas possíveis são comentários (começando por `//`), `replace path/to/a.type path/to/b.type`, e `skip path/to/a.type`. + +(Note que os caminhos completos para qualquer tipo nomeado devem ser fornecidos para evitar problemas quando vários pacotes definem um tipo com o mesmo nome) + +Entregue em: +```go +"types.MyStruct": { + "id": "integer" +} + +### Use swaggerignore tag para excluir um campo + +```go +type Account struct { + ID string `json:"id"` + Name string `json:"name"` + Ignored int `swaggerignore:"true"` +} +``` + + +### Adicionar informações de extensão ao campo de estruturação + +```go +type Account struct { + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // extensions fields must start with "x-" +} +``` + +gerar doc. de swagger como se segue: + +```go +"Account": { + "type": "object", + "properties": { + "id": { + "type": "string", + "x-nullable": true, + "x-abc": "def", + "x-omitempty": false + } + } +} +``` + + +### Renomear modelo a expor + +```golang +type Resp struct { + Code int +}//@name Response +``` + +### Como utilizar as anotações de segurança + +Informações API gerais. + +```go +// @securityDefinitions.basic BasicAuth + +// @securitydefinitions.oauth2.application OAuth2Application +// @tokenUrl https://example.com/oauth/token +// @scope.write Grants write access +// @scope.admin Grants read and write access to administrative information +``` + +Cada operação API. + +```go +// @Security ApiKeyAuth +``` + +Faça-o AND condicione-o + +```go +// @Security ApiKeyAuth +// @Security OAuth2Application[write, admin] +``` + +Faça-o OR condição + +```go +// @Security ApiKeyAuth || firebase +// @Security OAuth2Application[write, admin] || APIKeyAuth +``` + + + +### Adicionar uma descrição para enumerar artigos + +```go +type Example struct { + // Sort order: + // * asc - Ascending, from A to Z. + // * desc - Descending, from Z to A. + Order string `enums:"asc,desc"` +} +``` + +### Gerar apenas tipos de ficheiros de documentos específicos + +Por defeito, o comando `swag` gera especificação Swagger em três tipos diferentes de ficheiros/arquivos: +- docs.go +- swagger.json +- swagger.yaml + +Se desejar limitar um conjunto de tipos de ficheiros que devem ser gerados pode utilizar a bandeira `--outputTypes` (short `-ot`). O valor por defeito é `go,json,yaml` - tipos de saída separados por vírgula. Para limitar a saída apenas a ficheiros `go` e `yaml`, escrever-se-ia `go,yaml'. Com comando completo que seria `swag init --outputTypes go,yaml`. + +### Como usar tipos genéricos + +```go +// @Success 200 {object} web.GenericNestedResponse[types.Post] +// @Success 204 {object} web.GenericNestedResponse[types.Post, Types.AnotherOne] +// @Success 201 {object} web.GenericNestedResponse[web.GenericInnerType[types.Post]] +func GetPosts(w http.ResponseWriter, r *http.Request) { + _ = web.GenericNestedResponse[types.Post]{} +} +``` +Para mais detalhes e outros exemplos, veja [esse arquivo](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) + +### Alterar os delimitadores de acção padrão Go Template +[#980](https://github.com/swaggo/swag/issues/980) +[#1177](https://github.com/swaggo/swag/issues/1177) + +Se as suas anotações ou campos estruturantes contêm "{{" or "}}", a geração de modelos irá muito provavelmente falhar, uma vez que estes são os delimitadores por defeito para [go templates](https://pkg.go.dev/text/template#Template.Delims). + +Para que a geração funcione correctamente, pode alterar os delimitadores por defeito com `-td'. Por exemplo: +``console +swag init -g http/api.go -td "[[,]" +``` + +O novo delimitador é um fio com o formato "``,``". + +## Sobre o projecto +Este projecto foi inspirado por [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) mas simplificámos a utilização e acrescentámos apoio a uma variedade de [frameworks web](#estruturas-web-suportadas). A fonte de imagem Gopher é [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). Tem licenças [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). + +## Contribuidores + +Este projecto existe graças a todas as pessoas que contribuem. [[Contribute](CONTRIBUTING.md)]. + + + +## Apoios + +Obrigado a todos os nossos apoiantes! 🙏 [[Become a backer](https://opencollective.com/swag#backer)] + + + + +## Patrocinadores + +Apoiar este projecto tornando-se um patrocinador. O seu logótipo aparecerá aqui com um link para o seu website. [[Become a sponsor](https://opencollective.com/swag#sponsor)] + + + + + + + + + + + + + + +## Licença +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_large) diff --git a/vendor/github.com/swaggo/swag/README_zh-CN.md b/vendor/github.com/swaggo/swag/README_zh-CN.md new file mode 100644 index 00000000..24a219c9 --- /dev/null +++ b/vendor/github.com/swaggo/swag/README_zh-CN.md @@ -0,0 +1,771 @@ +# swag + +🌍 *[English](README.md) ∙ [简体中文](README_zh-CN.md)* + + + +[![Travis Status](https://img.shields.io/travis/swaggo/swag/master.svg)](https://travis-ci.org/swaggo/swag) +[![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) +[![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) +[![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) +[![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) + +Swag将Go的注释转换为Swagger2.0文档。我们为流行的 [Go Web Framework](#支持的Web框架) 创建了各种插件,这样可以与现有Go项目快速集成(使用Swagger UI)。 + +## 目录 + +- [快速开始](#快速开始) +- [支持的Web框架](#支持的web框架) +- [如何与Gin集成](#如何与gin集成) +- [格式化说明](#格式化说明) +- [开发现状](#开发现状) +- [声明式注释格式](#声明式注释格式) + - [通用API信息](#通用api信息) + - [API操作](#api操作) + - [安全性](#安全性) +- [样例](#样例) + - [多行的描述](#多行的描述) + - [用户自定义的具有数组类型的结构](#用户自定义的具有数组类型的结构) + - [响应对象中的模型组合](#响应对象中的模型组合) + - [在响应中增加头字段](#在响应中增加头字段) + - [使用多路径参数](#使用多路径参数) + - [结构体的示例值](#结构体的示例值) + - [结构体描述](#结构体描述) + - [使用`swaggertype`标签更改字段类型](#使用`swaggertype`标签更改字段类型) + - [使用`swaggerignore`标签排除字段](#使用swaggerignore标签排除字段) + - [将扩展信息添加到结构字段](#将扩展信息添加到结构字段) + - [对展示的模型重命名](#对展示的模型重命名) + - [如何使用安全性注释](#如何使用安全性注释) +- [项目相关](#项目相关) + +## 快速开始 + +1. 将注释添加到API源代码中,请参阅声明性注释格式。 +2. 使用如下命令下载swag: + +```bash +go install github.com/swaggo/swag/cmd/swag@latest +``` + +从源码开始构建的话,需要有Go环境(1.19及以上版本)。 + +或者从github的release页面下载预编译好的二进制文件。 + +3. 在包含`main.go`文件的项目根目录运行`swag init`。这将会解析注释并生成需要的文件(`docs`文件夹和`docs/docs.go`)。 + +```bash +swag init +``` + +确保导入了生成的`docs/docs.go`文件,这样特定的配置文件才会被初始化。如果通用API注释没有写在`main.go`中,可以使用`-g`标识符来告知swag。 + +```bash +swag init -g http/api.go +``` + +4. (可选) 使用`fmt`格式化 SWAG 注释。(请先升级到最新版本) + +```bash +swag fmt +``` + +## swag cli + +```bash +swag init -h +NAME: + swag init - Create docs.go + +USAGE: + swag init [command options] [arguments...] + +OPTIONS: + --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") + --dir value, -d value API解析目录 (默认: "./") + --exclude value 解析扫描时排除的目录,多个目录可用逗号分隔(默认:空) + --propertyStrategy value, -p value 结构体字段命名规则,三种:snakecase,camelcase,pascalcase (默认: "camelcase") + --output value, -o value 文件(swagger.json, swagger.yaml and doc.go)输出目录 (默认: "./docs") + --parseVendor 是否解析vendor目录里的go源文件,默认不 + --parseDependency 是否解析依赖目录中的go源文件,默认不 + --parseDependencyLevel, --pdl 对'--parseDependency'参数进行增强, 是否解析依赖目录中的go源文件, 0 不解析, 1 只解析对象模型, 2 只解析API, 3 对象模型和API都解析 (default: 0) + --markdownFiles value, --md value 指定API的描述信息所使用的markdown文件所在的目录 + --generatedTime 是否输出时间到输出文件docs.go的顶部,默认是 + --codeExampleFiles value, --cef value 解析包含用于 x-codeSamples 扩展的代码示例文件的文件夹,默认禁用 + --parseInternal 解析 internal 包中的go文件,默认禁用 + --parseDepth value 依赖解析深度 (默认: 100) + --instanceName value 设置文档实例名 (默认: "swagger") +``` + +```bash +swag fmt -h +NAME: + swag fmt - format swag comments + +USAGE: + swag fmt [command options] [arguments...] + +OPTIONS: + --dir value, -d value API解析目录 (默认: "./") + --exclude value 解析扫描时排除的目录,多个目录可用逗号分隔(默认:空) + --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") + --help, -h show help (default: false) + +``` + +## 支持的Web框架 + +- [gin](http://github.com/swaggo/gin-swagger) +- [echo](http://github.com/swaggo/echo-swagger) +- [buffalo](https://github.com/swaggo/buffalo-swagger) +- [net/http](https://github.com/swaggo/http-swagger) +- [gorilla/mux](https://github.com/swaggo/http-swagger) +- [go-chi/chi](https://github.com/swaggo/http-swagger) +- [flamingo](https://github.com/i-love-flamingo/swagger) +- [fiber](https://github.com/gofiber/swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) +- [hertz](https://github.com/hertz-contrib/swagger) + +## 如何与Gin集成 + +[点击此处](https://github.com/swaggo/swag/tree/master/example/celler)查看示例源代码。 + +1. 使用`swag init`生成Swagger2.0文档后,导入如下代码包: + +```go +import "github.com/swaggo/gin-swagger" // gin-swagger middleware +import "github.com/swaggo/files" // swagger embed files +``` + +2. 在`main.go`源代码中添加通用的API注释: + +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.basic BasicAuth + +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + r := gin.Default() + + c := controller.NewController() + + v1 := r.Group("/api/v1") + { + accounts := v1.Group("/accounts") + { + accounts.GET(":id", c.ShowAccount) + accounts.GET("", c.ListAccounts) + accounts.POST("", c.AddAccount) + accounts.DELETE(":id", c.DeleteAccount) + accounts.PATCH(":id", c.UpdateAccount) + accounts.POST(":id/images", c.UploadAccountImage) + } + //... + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + r.Run(":8080") +} +//... +``` + +此外,可以动态设置一些通用的API信息。生成的代码包`docs`导出`SwaggerInfo`变量,使用该变量可以通过编码的方式设置标题、描述、版本、主机和基础路径。使用Gin的示例: + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/swaggo/files" + "github.com/swaggo/gin-swagger" + + "./docs" // docs is generated by Swag CLI, you have to import it. +) + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +func main() { + + // programatically set swagger info + docs.SwaggerInfo.Title = "Swagger Example API" + docs.SwaggerInfo.Description = "This is a sample server Petstore server." + docs.SwaggerInfo.Version = "1.0" + docs.SwaggerInfo.Host = "petstore.swagger.io" + docs.SwaggerInfo.BasePath = "/v2" + docs.SwaggerInfo.Schemes = []string{"http", "https"} + + r := gin.New() + + // use ginSwagger middleware to serve the API docs + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.Run() +} +``` + +3. 在`controller`代码中添加API操作注释: + +```go +package controller + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" +) + +// ShowAccount godoc +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] +func (c *Controller) ShowAccount(ctx *gin.Context) { + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) +} + +// ListAccounts godoc +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) +} +//... +``` + +```bash +swag init +``` + +4. 运行程序,然后在浏览器中访问 http://localhost:8080/swagger/index.html 。将看到Swagger 2.0 Api文档,如下所示: + +![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) + +## 格式化说明 + +可以针对Swag的注释自动格式化,就像`go fmt`。 +此处查看格式化结果 [here](https://github.com/swaggo/swag/tree/master/example/celler). + +示例: +```shell +swag fmt +``` + +排除目录(不扫描)示例: +```shell +swag fmt -d ./ --exclude ./internal +``` + +## 开发现状 + +[Swagger 2.0 文档](https://swagger.io/docs/specification/2-0/basic-structure/) + +- [x] Basic Structure +- [x] API Host and Base Path +- [x] Paths and Operations +- [x] Describing Parameters +- [x] Describing Request Body +- [x] Describing Responses +- [x] MIME Types +- [x] Authentication + - [x] Basic Authentication + - [x] API Keys +- [x] Adding Examples +- [x] File Upload +- [x] Enums +- [x] Grouping Operations With Tags +- [ ] Swagger Extensions + +## 声明式注释格式 + +## 通用API信息 + +**示例** [`celler/main.go`](https://github.com/swaggo/swag/blob/master/example/celler/main.go) + +| 注释 | 说明 | 示例 | +| ----------------------- | ----------------------------------------------------------------------------------------------- | --------------------------------------------------------------- | +| title | **必填** 应用程序的名称。 | // @title Swagger Example API | +| version | **必填** 提供应用程序API的版本。 | // @version 1.0 | +| description | 应用程序的简短描述。 | // @description This is a sample server celler server. | +| tag.name | 标签的名称。 | // @tag.name This is the name of the tag | +| tag.description | 标签的描述。 | // @tag.description Cool Description | +| tag.docs.url | 标签的外部文档的URL。 | // @tag.docs.url https://example.com | +| tag.docs.description | 标签的外部文档说明。 | // @tag.docs.description Best example documentation | +| termsOfService | API的服务条款。 | // @termsOfService http://swagger.io/terms/ | +| contact.name | 公开的API的联系信息。 | // @contact.name API Support | +| contact.url | 联系信息的URL。 必须采用网址格式。 | // @contact.url http://www.swagger.io/support | +| contact.email | 联系人/组织的电子邮件地址。 必须采用电子邮件地址的格式。 | // @contact.email support@swagger.io | +| license.name | **必填** 用于API的许可证名称。 | // @license.name Apache 2.0 | +| license.url | 用于API的许可证的URL。 必须采用网址格式。 | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | +| host | 运行API的主机(主机名或IP地址)。 | // @host localhost:8080 | +| BasePath | 运行API的基本路径。 | // @BasePath /api/v1 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime类型)”中所述。 | // @accept json | +| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime类型)”中所述。 | // @produce json | +| query.collection.format | 请求URI query里数组参数的默认格式:csv,multi,pipes,tsv,ssv。 如果未设置,则默认为csv。 | // @query.collection.format multi | +| schemes | 用空格分隔的请求的传输协议。 | // @schemes http https | +| externalDocs.description | Description of the external document. | // @externalDocs.description OpenAPI | +| externalDocs.url | URL of the external document. | // @externalDocs.url https://swagger.io/resources/open-api/ | +| x-name | 扩展的键必须以x-开头,并且只能使用json值 | // @x-example-key {"key": "value"} | + +### 使用Markdown描述 + +如果文档中的短字符串不足以完整表达,或者需要展示图片,代码示例等类似的内容,则可能需要使用Markdown描述。要使用Markdown描述,请使用一下注释。 + +| 注释 | 说明 | 示例 | +| ------------------------ | ------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------- | +| title | **必填** 应用程序的名称。 | // @title Swagger Example API | +| version | **必填** 提供应用程序API的版本。 | // @version 1.0 | +| description.markdown | 应用程序的简短描述。 从`api.md`文件中解析。 这是`@description`的替代用法。 | // @description.markdown No value needed, this parses the description from api.md | +| tag.name | 标签的名称。 | // @tag.name This is the name of the tag | +| tag.description.markdown | 标签说明,这是`tag.description`的替代用法。 该描述将从名为`tagname.md的`文件中读取。 | // @tag.description.markdown | + +## API操作 + +Example [celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) + +| 注释 | 描述 | +|----------------------|------------------------------------------------------------------------------------------------| +| description | 操作行为的详细说明。 | +| description.markdown | 应用程序的简短描述。该描述将从名为`endpointname.md`的文件中读取。 | +| id | 用于标识操作的唯一字符串。在所有API操作中必须唯一。 | +| tags | 每个API操作的标签列表,以逗号分隔。 | +| summary | 该操作的简短摘要。 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime类型)”中所述。 | +| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime类型)”中所述。 | +| param | 用空格分隔的参数。`param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | 每个API操作的[安全性](#安全性)。 | +| success | 以空格分隔的成功响应。`return code`,`{param type}`,`data type`,`comment` | +| failure | 以空格分隔的故障响应。`return code`,`{param type}`,`data type`,`comment` | +| response | 与success、failure作用相同 | +| header | 以空格分隔的头字段。 `return code`,`{param type}`,`data type`,`comment` | +| router | 以空格分隔的路径定义。 `path`,`[httpMethod]` | +| deprecatedrouter | 与router相同,但是是deprecated的。 | +| x-name | 扩展字段必须以`x-`开头,并且只能使用json值。 | +| deprecated | 将当前API操作的所有路径设置为deprecated | + +## Mime类型 + +`swag` 接受所有格式正确的MIME类型, 即使匹配 `*/*`。除此之外,`swag`还接受某些MIME类型的别名,如下所示: + +| Alias | MIME Type | +| --------------------- | --------------------------------- | +| json | application/json | +| xml | text/xml | +| plain | text/plain | +| html | text/html | +| mpfd | multipart/form-data | +| x-www-form-urlencoded | application/x-www-form-urlencoded | +| json-api | application/vnd.api+json | +| json-stream | application/x-json-stream | +| octet-stream | application/octet-stream | +| png | image/png | +| jpeg | image/jpeg | +| gif | image/gif | + +## 参数类型 + +- query +- path +- header +- body +- formData + +## 数据类型 + +- string (string) +- integer (int, uint, uint32, uint64) +- number (float32) +- boolean (bool) +- user defined struct + +## 安全性 + +| 注释 | 描述 | 参数 | 示例 | +| -------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------- | ------------------------------------------------------------ | +| securitydefinitions.basic | [Basic](https://swagger.io/docs/specification/2-0/authentication/basic-authentication/) auth. | | // @securityDefinitions.basic BasicAuth | +| securitydefinitions.apikey | [API key](https://swagger.io/docs/specification/2-0/authentication/api-keys/) auth. | in, name | // @securityDefinitions.apikey ApiKeyAuth | +| securitydefinitions.oauth2.application | [OAuth2 application](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope | // @securitydefinitions.oauth2.application OAuth2Application | +| securitydefinitions.oauth2.implicit | [OAuth2 implicit](https://swagger.io/docs/specification/authentication/oauth2/) auth. | authorizationUrl, scope | // @securitydefinitions.oauth2.implicit OAuth2Implicit | +| securitydefinitions.oauth2.password | [OAuth2 password](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope | // @securitydefinitions.oauth2.password OAuth2Password | +| securitydefinitions.oauth2.accessCode | [OAuth2 access code](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, authorizationUrl, scope | // @securitydefinitions.oauth2.accessCode OAuth2AccessCode | + +| 参数注释 | 示例 | +| ---------------- | -------------------------------------------------------- | +| in | // @in header | +| name | // @name Authorization | +| tokenUrl | // @tokenUrl https://example.com/oauth/token | +| authorizationurl | // @authorizationurl https://example.com/oauth/authorize | +| scope.hoge | // @scope.write Grants write access | + +## 属性 + +```go +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) +``` + +也适用于结构体字段: + +```go +type Foo struct { + Bar string `minLength:"4" maxLength:"16"` + Baz int `minimum:"10" maximum:"20" default:"15"` + Qux []string `enums:"foo,bar,baz"` +} +``` + +### 当前可用的 + +| 字段名 | 类型 | 描述 | +| ---------------- | --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| default | * | 声明如果未提供任何参数,则服务器将使用的默认参数值,例如,如果请求中的客户端未提供该参数,则用于控制每页结果数的“计数”可能默认为100。 (注意:“default”对于必需的参数没有意义)。参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2。 与JSON模式不同,此值务必符合此参数的定义[类型](#parameterType)。 | +| maximum | `number` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. | +| minimum | `number` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. | +| maxLength | `integer` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. | +| minLength | `integer` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. | +| enums | [\*] | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. | +| format | `string` | 上面提到的[类型](#parameterType)的扩展格式。有关更多详细信息,请参见[数据类型格式](https://swagger.io/specification/v2/#dataTypeFormat)。 | +| collectionFormat | `string` | 指定query数组参数的格式。 可能的值为:
  • `csv` - 逗号分隔值 `foo,bar`.
  • `ssv` - 空格分隔值 `foo bar`.
  • `tsv` - 制表符分隔值 `foo\tbar`.
  • `pipes` - 管道符分隔值 foo|bar.
  • `multi` - 对应于多个参数实例,而不是单个实例 `foo=bar&foo=baz` 的多个值。这仅对“`query`”或“`formData`”中的参数有效。
默认值是 `csv`。 | + +### 进一步的 + +| 字段名 | 类型 | 描述 | +| ----------- | :-------: | ---------------------------------------------------------------------------------- | +| multipleOf | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. | +| pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. | +| maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. | +| minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. | +| uniqueItems | `boolean` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.4. | + +## 样例 + +### 多行的描述 + +可以在常规api描述或路由定义中添加跨越多行的描述,如下所示: + +```go +// @description This is the first line +// @description This is the second line +// @description And so forth. +``` + +### 用户自定义的具有数组类型的结构 + +```go +// @Success 200 {array} model.Account <-- This is a user defined struct. +``` + +```go +package model + +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` +} +``` + +### 响应对象中的模型组合 + +```go +// JSONResult的data字段类型将被proto.Order类型替换 +@success 200 {object} jsonresult.JSONResult{data=proto.Order} "desc" +``` + +```go +type JSONResult struct { + Code int `json:"code" ` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +type Order struct { //in `proto` package + ... +} +``` + +- 还支持对象数组和原始类型作为嵌套响应 + +```go +@success 200 {object} jsonresult.JSONResult{data=[]proto.Order} "desc" +@success 200 {object} jsonresult.JSONResult{data=string} "desc" +@success 200 {object} jsonresult.JSONResult{data=[]string} "desc" +``` + +- 替换多个字段的类型。如果某字段不存在,将添加该字段。 + +```go +@success 200 {object} jsonresult.JSONResult{data1=string,data2=[]string,data3=proto.Order,data4=[]proto.Order} "desc" +``` + +### 在响应中增加头字段 + +```go +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" +``` + +### 使用多路径参数 + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" +// ... +// @Router /examples/groups/{group_id}/accounts/{account_id} [get] +``` + +### 结构体的示例值 + +```go +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` + PhotoUrls []string `json:"photo_urls" example:"http://test/image/1.jpg,http://test/image/2.jpg"` +} +``` + +### 结构体描述 + +```go +type Account struct { + // ID this is userid + ID int `json:"id"` + Name string `json:"name"` // This is Name +} +``` + +### 使用`swaggertype`标签更改字段类型 + +[#201](https://github.com/swaggo/swag/issues/201#issuecomment-475479409) + +```go +type TimestampTime struct { + time.Time +} + +///实现encoding.JSON.Marshaler接口 +func (t *TimestampTime) MarshalJSON() ([]byte, error) { + bin := make([]byte, 16) + bin = strconv.AppendInt(bin[:0], t.Time.Unix(), 10) + return bin, nil +} + +///实现encoding.JSON.Unmarshaler接口 +func (t *TimestampTime) UnmarshalJSON(bin []byte) error { + v, err := strconv.ParseInt(string(bin), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(v, 0) + return nil +} +/// + +type Account struct { + // 使用`swaggertype`标签将别名类型更改为内置类型integer + ID sql.NullInt64 `json:"id" swaggertype:"integer"` + + // 使用`swaggertype`标签更改struct类型为内置类型integer + RegisterTime TimestampTime `json:"register_time" swaggertype:"primitive,integer"` + + // Array types can be overridden using "array," format + Coeffs []big.Float `json:"coeffs" swaggertype:"array,number"` +} +``` + +[#379](https://github.com/swaggo/swag/issues/379) + +```go +type CerticateKeyPair struct { + Crt []byte `json:"crt" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` + Key []byte `json:"key" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` +} +``` + +生成的swagger文档如下: + +```go +"api.MyBinding": { + "type":"object", + "properties":{ + "crt":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + }, + "key":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + } + } +} +``` + +### 使用`swaggerignore`标签排除字段 + +```go +type Account struct { + ID string `json:"id"` + Name string `json:"name"` + Ignored int `swaggerignore:"true"` +} +``` + +### 将扩展信息添加到结构字段 + +```go +type Account struct { + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // 扩展字段必须以"x-"开头 +} +``` + +生成swagger文档,如下所示: + +```go +"Account": { + "type": "object", + "properties": { + "id": { + "type": "string", + "x-nullable": true, + "x-abc": "def", + "x-omitempty": false + } + } +} +``` + +### 对展示的模型重命名 + +```go +type Resp struct { + Code int +}//@name Response +``` + +### 如何使用安全性注释 + +通用API信息。 + +```go +// @securityDefinitions.basic BasicAuth + +// @securitydefinitions.oauth2.application OAuth2Application +// @tokenUrl https://example.com/oauth/token +// @scope.write Grants write access +// @scope.admin Grants read and write access to administrative information +``` + +每个API操作。 + +```go +// @Security ApiKeyAuth +``` + +使用AND条件。 + +```go +// @Security ApiKeyAuth +// @Security OAuth2Application[write, admin] +``` + +## 项目相关 + +This project was inspired by [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) but we simplified the usage and added support a variety of [web frameworks](#supported-web-frameworks). Gopher image source is [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). It has licenses [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). + +## 贡献者 + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + + +## 支持者 + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/swag#backer)] + + + +## 赞助商 + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/swag#sponsor)] + + + + + + + + + + + + +## License + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_large) diff --git a/vendor/github.com/swaggo/swag/const.go b/vendor/github.com/swaggo/swag/const.go new file mode 100644 index 00000000..83755103 --- /dev/null +++ b/vendor/github.com/swaggo/swag/const.go @@ -0,0 +1,567 @@ +package swag + +import ( + "go/ast" + "go/token" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// ConstVariable a model to record a const variable +type ConstVariable struct { + Name *ast.Ident + Type ast.Expr + Value interface{} + Comment *ast.CommentGroup + File *ast.File + Pkg *PackageDefinitions +} + +var escapedChars = map[uint8]uint8{ + 'n': '\n', + 'r': '\r', + 't': '\t', + 'v': '\v', + '\\': '\\', + '"': '"', +} + +// EvaluateEscapedChar parse escaped character +func EvaluateEscapedChar(text string) rune { + if len(text) == 1 { + return rune(text[0]) + } + + if len(text) == 2 && text[0] == '\\' { + return rune(escapedChars[text[1]]) + } + + if len(text) == 6 && text[0:2] == "\\u" { + n, err := strconv.ParseInt(text[2:], 16, 32) + if err == nil { + return rune(n) + } + } + + return 0 +} + +// EvaluateEscapedString parse escaped characters in string +func EvaluateEscapedString(text string) string { + if !strings.ContainsRune(text, '\\') { + return text + } + result := make([]byte, 0, len(text)) + for i := 0; i < len(text); i++ { + if text[i] == '\\' { + i++ + if text[i] == 'u' { + i++ + char, err := strconv.ParseInt(text[i:i+4], 16, 32) + if err == nil { + result = utf8.AppendRune(result, rune(char)) + } + i += 3 + } else if c, ok := escapedChars[text[i]]; ok { + result = append(result, c) + } + } else { + result = append(result, text[i]) + } + } + return string(result) +} + +// EvaluateDataConversion evaluate the type a explicit type conversion +func EvaluateDataConversion(x interface{}, typeName string) interface{} { + switch value := x.(type) { + case int: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int8: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint8: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int16: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint16: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int32: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + case "string": + return string(value) + } + case uint32: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int64: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint64: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case string: + switch typeName { + case "string": + return value + } + } + return nil +} + +// EvaluateUnary evaluate the type and value of a unary expression +func EvaluateUnary(x interface{}, operator token.Token, xtype ast.Expr) (interface{}, ast.Expr) { + switch operator { + case token.SUB: + switch value := x.(type) { + case int: + return -value, xtype + case int8: + return -value, xtype + case int16: + return -value, xtype + case int32: + return -value, xtype + case int64: + return -value, xtype + } + case token.XOR: + switch value := x.(type) { + case int: + return ^value, xtype + case int8: + return ^value, xtype + case int16: + return ^value, xtype + case int32: + return ^value, xtype + case int64: + return ^value, xtype + case uint: + return ^value, xtype + case uint8: + return ^value, xtype + case uint16: + return ^value, xtype + case uint32: + return ^value, xtype + case uint64: + return ^value, xtype + } + } + return nil, nil +} + +// EvaluateBinary evaluate the type and value of a binary expression +func EvaluateBinary(x, y interface{}, operator token.Token, xtype, ytype ast.Expr) (interface{}, ast.Expr) { + if operator == token.SHR || operator == token.SHL { + var rightOperand uint64 + yValue := reflect.ValueOf(y) + if yValue.CanUint() { + rightOperand = yValue.Uint() + } else if yValue.CanInt() { + rightOperand = uint64(yValue.Int()) + } + + switch operator { + case token.SHL: + switch xValue := x.(type) { + case int: + return xValue << rightOperand, xtype + case int8: + return xValue << rightOperand, xtype + case int16: + return xValue << rightOperand, xtype + case int32: + return xValue << rightOperand, xtype + case int64: + return xValue << rightOperand, xtype + case uint: + return xValue << rightOperand, xtype + case uint8: + return xValue << rightOperand, xtype + case uint16: + return xValue << rightOperand, xtype + case uint32: + return xValue << rightOperand, xtype + case uint64: + return xValue << rightOperand, xtype + } + case token.SHR: + switch xValue := x.(type) { + case int: + return xValue >> rightOperand, xtype + case int8: + return xValue >> rightOperand, xtype + case int16: + return xValue >> rightOperand, xtype + case int32: + return xValue >> rightOperand, xtype + case int64: + return xValue >> rightOperand, xtype + case uint: + return xValue >> rightOperand, xtype + case uint8: + return xValue >> rightOperand, xtype + case uint16: + return xValue >> rightOperand, xtype + case uint32: + return xValue >> rightOperand, xtype + case uint64: + return xValue >> rightOperand, xtype + } + } + return nil, nil + } + + evalType := xtype + if evalType == nil { + evalType = ytype + } + + xValue := reflect.ValueOf(x) + yValue := reflect.ValueOf(y) + if xValue.Kind() == reflect.String && yValue.Kind() == reflect.String { + return xValue.String() + yValue.String(), evalType + } + + var targetValue reflect.Value + if xValue.Kind() != reflect.Int { + targetValue = reflect.New(xValue.Type()).Elem() + } else { + targetValue = reflect.New(yValue.Type()).Elem() + } + + switch operator { + case token.ADD: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() + yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() + yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) + yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() + uint64(yValue.Int())) + } + case token.SUB: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() - yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() - yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) - yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() - uint64(yValue.Int())) + } + case token.MUL: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() * yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() * yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) * yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() * uint64(yValue.Int())) + } + case token.QUO: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() / yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() / yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) / yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() / uint64(yValue.Int())) + } + case token.REM: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() % yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() % yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) % yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() % uint64(yValue.Int())) + } + case token.AND: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() & yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() & yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) & yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() & uint64(yValue.Int())) + } + case token.OR: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() | yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() | yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) | yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() | uint64(yValue.Int())) + } + case token.XOR: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() ^ yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() ^ yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) ^ yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() ^ uint64(yValue.Int())) + } + } + return targetValue.Interface(), evalType +} diff --git a/vendor/github.com/swaggo/swag/doc.go b/vendor/github.com/swaggo/swag/doc.go new file mode 100644 index 00000000..564b5c6f --- /dev/null +++ b/vendor/github.com/swaggo/swag/doc.go @@ -0,0 +1,5 @@ +/* +Package swag converts Go annotations to Swagger Documentation 2.0. +See https://github.com/swaggo/swag for more information about swag. +*/ +package swag // import "github.com/swaggo/swag" diff --git a/vendor/github.com/swaggo/swag/enums.go b/vendor/github.com/swaggo/swag/enums.go new file mode 100644 index 00000000..38658f20 --- /dev/null +++ b/vendor/github.com/swaggo/swag/enums.go @@ -0,0 +1,13 @@ +package swag + +const ( + enumVarNamesExtension = "x-enum-varnames" + enumCommentsExtension = "x-enum-comments" +) + +// EnumValue a model to record an enum consts variable +type EnumValue struct { + key string + Value interface{} + Comment string +} diff --git a/vendor/github.com/swaggo/swag/field_parser.go b/vendor/github.com/swaggo/swag/field_parser.go new file mode 100644 index 00000000..de8945de --- /dev/null +++ b/vendor/github.com/swaggo/swag/field_parser.go @@ -0,0 +1,686 @@ +package swag + +import ( + "fmt" + "go/ast" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/go-openapi/spec" +) + +var _ FieldParser = &tagBaseFieldParser{p: nil, field: nil, tag: ""} + +const ( + requiredLabel = "required" + optionalLabel = "optional" + swaggerTypeTag = "swaggertype" + swaggerIgnoreTag = "swaggerignore" +) + +type tagBaseFieldParser struct { + p *Parser + field *ast.Field + tag reflect.StructTag +} + +func newTagBaseFieldParser(p *Parser, field *ast.Field) FieldParser { + fieldParser := tagBaseFieldParser{ + p: p, + field: field, + tag: "", + } + if fieldParser.field.Tag != nil { + fieldParser.tag = reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")) + } + + return &fieldParser +} + +func (ps *tagBaseFieldParser) ShouldSkip() bool { + // Skip non-exported fields. + if ps.field.Names != nil && !ast.IsExported(ps.field.Names[0].Name) { + return true + } + + if ps.field.Tag == nil { + return false + } + + ignoreTag := ps.tag.Get(swaggerIgnoreTag) + if strings.EqualFold(ignoreTag, "true") { + return true + } + + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name == "-" { + return true + } + + return false +} + +func (ps *tagBaseFieldParser) FieldNames() ([]string, error) { + if len(ps.field.Names) <= 1 { + // if embedded but with a json/form name ?? + if ps.field.Tag != nil { + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name != "" { + return []string{name}, nil + } + + // use "form" tag over json tag + name = ps.FormName() + if name != "" { + return []string{name}, nil + } + } + if len(ps.field.Names) == 0 { + return nil, nil + } + } + var names = make([]string, 0, len(ps.field.Names)) + for _, name := range ps.field.Names { + switch ps.p.PropNamingStrategy { + case SnakeCase: + names = append(names, toSnakeCase(name.Name)) + case PascalCase: + names = append(names, name.Name) + default: + names = append(names, toLowerCamelCase(name.Name)) + } + } + return names, nil +} + +func (ps *tagBaseFieldParser) firstTagValue(tag string) string { + if ps.field.Tag != nil { + return strings.TrimRight(strings.TrimSpace(strings.Split(ps.tag.Get(tag), ",")[0]), "[]") + } + return "" +} + +func (ps *tagBaseFieldParser) FormName() string { + return ps.firstTagValue(formTag) +} + +func (ps *tagBaseFieldParser) HeaderName() string { + return ps.firstTagValue(headerTag) +} + +func (ps *tagBaseFieldParser) PathName() string { + return ps.firstTagValue(uriTag) +} + +func toSnakeCase(in string) string { + var ( + runes = []rune(in) + length = len(runes) + out []rune + ) + + for idx := 0; idx < length; idx++ { + if idx > 0 && unicode.IsUpper(runes[idx]) && + ((idx+1 < length && unicode.IsLower(runes[idx+1])) || unicode.IsLower(runes[idx-1])) { + out = append(out, '_') + } + + out = append(out, unicode.ToLower(runes[idx])) + } + + return string(out) +} + +func toLowerCamelCase(in string) string { + var flag bool + + out := make([]rune, len(in)) + + runes := []rune(in) + for i, curr := range runes { + if (i == 0 && unicode.IsUpper(curr)) || (flag && unicode.IsUpper(curr)) { + out[i] = unicode.ToLower(curr) + flag = true + + continue + } + + out[i] = curr + flag = false + } + + return string(out) +} + +func (ps *tagBaseFieldParser) CustomSchema() (*spec.Schema, error) { + if ps.field.Tag == nil { + return nil, nil + } + + typeTag := ps.tag.Get(swaggerTypeTag) + if typeTag != "" { + return BuildCustomSchema(strings.Split(typeTag, ",")) + } + + return nil, nil +} + +type structField struct { + title string + schemaType string + arrayType string + formatType string + maximum *float64 + minimum *float64 + multipleOf *float64 + maxLength *int64 + minLength *int64 + maxItems *int64 + minItems *int64 + exampleValue interface{} + enums []interface{} + enumVarNames []interface{} + unique bool +} + +// splitNotWrapped slices s into all substrings separated by sep if sep is not +// wrapped by brackets and returns a slice of the substrings between those separators. +func splitNotWrapped(s string, sep rune) []string { + openCloseMap := map[rune]rune{ + '(': ')', + '[': ']', + '{': '}', + } + + var ( + result = make([]string, 0) + current = strings.Builder{} + openCount = 0 + openChar rune + ) + + for _, char := range s { + switch { + case openChar == 0 && openCloseMap[char] != 0: + openChar = char + + openCount++ + + current.WriteRune(char) + case char == openChar: + openCount++ + + current.WriteRune(char) + case openCount > 0 && char == openCloseMap[openChar]: + openCount-- + + current.WriteRune(char) + case openCount == 0 && char == sep: + result = append(result, current.String()) + + openChar = 0 + + current = strings.Builder{} + default: + current.WriteRune(char) + } + } + + if current.String() != "" { + result = append(result, current.String()) + } + + return result +} + +// ComplementSchema complement schema with field properties +func (ps *tagBaseFieldParser) ComplementSchema(schema *spec.Schema) error { + types := ps.p.GetSchemaTypePath(schema, 2) + if len(types) == 0 { + return fmt.Errorf("invalid type for field: %s", ps.field.Names[0]) + } + + if IsRefSchema(schema) { + var newSchema = spec.Schema{} + err := ps.complementSchema(&newSchema, types) + if err != nil { + return err + } + if !reflect.ValueOf(newSchema).IsZero() { + *schema = *(newSchema.WithAllOf(*schema)) + } + return nil + } + + return ps.complementSchema(schema, types) +} + +// complementSchema complement schema with field properties +func (ps *tagBaseFieldParser) complementSchema(schema *spec.Schema, types []string) error { + if ps.field.Tag == nil { + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + + return nil + } + + field := &structField{ + schemaType: types[0], + formatType: ps.tag.Get(formatTag), + title: ps.tag.Get(titleTag), + } + + if len(types) > 1 && (types[0] == ARRAY || types[0] == OBJECT) { + field.arrayType = types[1] + } + + jsonTagValue := ps.tag.Get(jsonTag) + + bindingTagValue := ps.tag.Get(bindingTag) + if bindingTagValue != "" { + parseValidTags(bindingTagValue, field) + } + + validateTagValue := ps.tag.Get(validateTag) + if validateTagValue != "" { + parseValidTags(validateTagValue, field) + } + + enumsTagValue := ps.tag.Get(enumsTag) + if enumsTagValue != "" { + err := parseEnumTags(enumsTagValue, field) + if err != nil { + return err + } + } + + if IsNumericType(field.schemaType) || IsNumericType(field.arrayType) { + maximum, err := getFloatTag(ps.tag, maximumTag) + if err != nil { + return err + } + + if maximum != nil { + field.maximum = maximum + } + + minimum, err := getFloatTag(ps.tag, minimumTag) + if err != nil { + return err + } + + if minimum != nil { + field.minimum = minimum + } + + multipleOf, err := getFloatTag(ps.tag, multipleOfTag) + if err != nil { + return err + } + + if multipleOf != nil { + field.multipleOf = multipleOf + } + } + + if field.schemaType == STRING || field.arrayType == STRING { + maxLength, err := getIntTag(ps.tag, maxLengthTag) + if err != nil { + return err + } + + if maxLength != nil { + field.maxLength = maxLength + } + + minLength, err := getIntTag(ps.tag, minLengthTag) + if err != nil { + return err + } + + if minLength != nil { + field.minLength = minLength + } + } + + // json:"name,string" or json:",string" + exampleTagValue, ok := ps.tag.Lookup(exampleTag) + if ok { + field.exampleValue = exampleTagValue + + if !strings.Contains(jsonTagValue, ",string") { + example, err := defineTypeOfExample(field.schemaType, field.arrayType, exampleTagValue) + if err != nil { + return err + } + + field.exampleValue = example + } + } + + // perform this after setting everything else (min, max, etc...) + if strings.Contains(jsonTagValue, ",string") { + // @encoding/json: "It applies only to fields of string, floating point, integer, or boolean types." + defaultValues := map[string]string{ + // Zero Values as string + STRING: "", + INTEGER: "0", + BOOLEAN: "false", + NUMBER: "0", + } + + defaultValue, ok := defaultValues[field.schemaType] + if ok { + field.schemaType = STRING + *schema = *PrimitiveSchema(field.schemaType) + + if field.exampleValue == nil { + // if exampleValue is not defined by the user, + // we will force an example with a correct value + // (eg: int->"0", bool:"false") + field.exampleValue = defaultValue + } + } + } + + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + + schema.ReadOnly = ps.tag.Get(readOnlyTag) == "true" + + defaultTagValue := ps.tag.Get(defaultTag) + if defaultTagValue != "" { + value, err := defineType(field.schemaType, defaultTagValue) + if err != nil { + return err + } + + schema.Default = value + } + + schema.Example = field.exampleValue + + if field.schemaType != ARRAY { + schema.Format = field.formatType + } + schema.Title = field.title + + extensionsTagValue := ps.tag.Get(extensionsTag) + if extensionsTagValue != "" { + schema.Extensions = setExtensionParam(extensionsTagValue) + } + + varNamesTag := ps.tag.Get("x-enum-varnames") + if varNamesTag != "" { + varNames := strings.Split(varNamesTag, ",") + if len(varNames) != len(field.enums) { + return fmt.Errorf("invalid count of x-enum-varnames. expected %d, got %d", len(field.enums), len(varNames)) + } + + field.enumVarNames = nil + + for _, v := range varNames { + field.enumVarNames = append(field.enumVarNames, v) + } + + if field.schemaType == ARRAY { + // Add the var names in the items schema + if schema.Items.Schema.Extensions == nil { + schema.Items.Schema.Extensions = map[string]interface{}{} + } + schema.Items.Schema.Extensions[enumVarNamesExtension] = field.enumVarNames + } else { + // Add to top level schema + if schema.Extensions == nil { + schema.Extensions = map[string]interface{}{} + } + schema.Extensions[enumVarNamesExtension] = field.enumVarNames + } + } + + eleSchema := schema + + if field.schemaType == ARRAY { + // For Array only + schema.MaxItems = field.maxItems + schema.MinItems = field.minItems + schema.UniqueItems = field.unique + + eleSchema = schema.Items.Schema + eleSchema.Format = field.formatType + } + + eleSchema.Maximum = field.maximum + eleSchema.Minimum = field.minimum + eleSchema.MultipleOf = field.multipleOf + eleSchema.MaxLength = field.maxLength + eleSchema.MinLength = field.minLength + eleSchema.Enum = field.enums + + return nil +} + +func getFloatTag(structTag reflect.StructTag, tagName string) (*float64, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.ParseFloat(strValue, 64) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func getIntTag(structTag reflect.StructTag, tagName string) (*int64, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.ParseInt(strValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func (ps *tagBaseFieldParser) IsRequired() (bool, error) { + if ps.field.Tag == nil { + return false, nil + } + + bindingTag := ps.tag.Get(bindingTag) + if bindingTag != "" { + for _, val := range strings.Split(bindingTag, ",") { + switch val { + case requiredLabel: + return true, nil + case optionalLabel: + return false, nil + } + } + } + + validateTag := ps.tag.Get(validateTag) + if validateTag != "" { + for _, val := range strings.Split(validateTag, ",") { + switch val { + case requiredLabel: + return true, nil + case optionalLabel: + return false, nil + } + } + } + + return ps.p.RequiredByDefault, nil +} + +func parseValidTags(validTag string, sf *structField) { + // `validate:"required,max=10,min=1"` + // ps. required checked by IsRequired(). + for _, val := range strings.Split(validTag, ",") { + var ( + valValue string + keyVal = strings.Split(val, "=") + ) + + switch len(keyVal) { + case 1: + case 2: + valValue = strings.ReplaceAll(strings.ReplaceAll(keyVal[1], utf8HexComma, ","), utf8Pipe, "|") + default: + continue + } + + switch keyVal[0] { + case "max", "lte": + sf.setMax(valValue) + case "min", "gte": + sf.setMin(valValue) + case "oneof": + sf.setOneOf(valValue) + case "unique": + if sf.schemaType == ARRAY { + sf.unique = true + } + case "dive": + // ignore dive + return + default: + continue + } + } +} + +func parseEnumTags(enumTag string, field *structField) error { + enumType := field.schemaType + if field.schemaType == ARRAY { + enumType = field.arrayType + } + + field.enums = nil + + for _, e := range strings.Split(enumTag, ",") { + value, err := defineType(enumType, e) + if err != nil { + return err + } + + field.enums = append(field.enums, value) + } + + return nil +} + +func (sf *structField) setOneOf(valValue string) { + if len(sf.enums) != 0 { + return + } + + enumType := sf.schemaType + if sf.schemaType == ARRAY { + enumType = sf.arrayType + } + + valValues := parseOneOfParam2(valValue) + for i := range valValues { + value, err := defineType(enumType, valValues[i]) + if err != nil { + continue + } + + sf.enums = append(sf.enums, value) + } +} + +func (sf *structField) setMin(valValue string) { + value, err := strconv.ParseFloat(valValue, 64) + if err != nil { + return + } + + switch sf.schemaType { + case INTEGER, NUMBER: + sf.minimum = &value + case STRING: + intValue := int64(value) + sf.minLength = &intValue + case ARRAY: + intValue := int64(value) + sf.minItems = &intValue + } +} + +func (sf *structField) setMax(valValue string) { + value, err := strconv.ParseFloat(valValue, 64) + if err != nil { + return + } + + switch sf.schemaType { + case INTEGER, NUMBER: + sf.maximum = &value + case STRING: + intValue := int64(value) + sf.maxLength = &intValue + case ARRAY: + intValue := int64(value) + sf.maxItems = &intValue + } +} + +const ( + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" +) + +// These code copy from +// https://github.com/go-playground/validator/blob/d4271985b44b735c6f76abc7a06532ee997f9476/baked_in.go#L207 +// ---. +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} +var splitParamsRegex = regexp.MustCompile(`'[^']*'|\S+`) + +func parseOneOfParam2(param string) []string { + oneofValsCacheRWLock.RLock() + values, ok := oneofValsCache[param] + oneofValsCacheRWLock.RUnlock() + + if !ok { + oneofValsCacheRWLock.Lock() + values = splitParamsRegex.FindAllString(param, -1) + + for i := 0; i < len(values); i++ { + values[i] = strings.ReplaceAll(values[i], "'", "") + } + + oneofValsCache[param] = values + + oneofValsCacheRWLock.Unlock() + } + + return values +} + +// ---. diff --git a/vendor/github.com/swaggo/swag/formatter.go b/vendor/github.com/swaggo/swag/formatter.go new file mode 100644 index 00000000..511e3a82 --- /dev/null +++ b/vendor/github.com/swaggo/swag/formatter.go @@ -0,0 +1,182 @@ +package swag + +import ( + "bytes" + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "log" + "os" + "regexp" + "sort" + "strings" + "text/tabwriter" +) + +// Check of @Param @Success @Failure @Response @Header +var specialTagForSplit = map[string]bool{ + paramAttr: true, + successAttr: true, + failureAttr: true, + responseAttr: true, + headerAttr: true, +} + +var skipChar = map[byte]byte{ + '"': '"', + '(': ')', + '{': '}', + '[': ']', +} + +// Formatter implements a formatter for Go source files. +type Formatter struct { + // debugging output goes here + debug Debugger +} + +// NewFormatter create a new formatter instance. +func NewFormatter() *Formatter { + formatter := &Formatter{ + debug: log.New(os.Stdout, "", log.LstdFlags), + } + return formatter +} + +// Format formats swag comments in contents. It uses fileName to report errors +// that happen during parsing of contents. +func (f *Formatter) Format(fileName string, contents []byte) ([]byte, error) { + fileSet := token.NewFileSet() + ast, err := goparser.ParseFile(fileSet, fileName, contents, goparser.ParseComments) + if err != nil { + return nil, err + } + + // Formatting changes are described as an edit list of byte range + // replacements. We make these content-level edits directly rather than + // changing the AST nodes and writing those out (via [go/printer] or + // [go/format]) so that we only change the formatting of Swag attribute + // comments. This won't touch the formatting of any other comments, or of + // functions, etc. + maxEdits := 0 + for _, comment := range ast.Comments { + maxEdits += len(comment.List) + } + edits := make(edits, 0, maxEdits) + + for _, comment := range ast.Comments { + formatFuncDoc(fileSet, comment.List, &edits) + } + + return edits.apply(contents), nil +} + +type edit struct { + begin int + end int + replacement []byte +} + +type edits []edit + +func (edits edits) apply(contents []byte) []byte { + // Apply the edits with the highest offset first, so that earlier edits + // don't affect the offsets of later edits. + sort.Slice(edits, func(i, j int) bool { + return edits[i].begin > edits[j].begin + }) + + for _, edit := range edits { + prefix := contents[:edit.begin] + suffix := contents[edit.end:] + contents = append(prefix, append(edit.replacement, suffix...)...) + } + + return contents +} + +// formatFuncDoc reformats the comment lines in commentList, and appends any +// changes to the edit list. +func formatFuncDoc(fileSet *token.FileSet, commentList []*ast.Comment, edits *edits) { + // Building the edit list to format a comment block is a two-step process. + // First, we iterate over each comment line looking for Swag attributes. In + // each one we find, we replace alignment whitespace with a tab character, + // then write the result into a tab writer. + + linesToComments := make(map[int]int, len(commentList)) + + buffer := &bytes.Buffer{} + w := tabwriter.NewWriter(buffer, 1, 4, 1, '\t', 0) + + for commentIndex, comment := range commentList { + text := comment.Text + if attr, body, found := swagComment(text); found { + formatted := "//\t" + attr + if body != "" { + formatted += "\t" + splitComment2(attr, body) + } + _, _ = fmt.Fprintln(w, formatted) + linesToComments[len(linesToComments)] = commentIndex + } + } + + // Once we've loaded all of the comment lines to be aligned into the tab + // writer, flushing it causes the aligned text to be written out to the + // backing buffer. + _ = w.Flush() + + // Now the second step: we iterate over the aligned comment lines that were + // written into the backing buffer, pair each one up to its original + // comment line, and use the combination to describe the edit that needs to + // be made to the original input. + formattedComments := bytes.Split(buffer.Bytes(), []byte("\n")) + for lineIndex, commentIndex := range linesToComments { + comment := commentList[commentIndex] + *edits = append(*edits, edit{ + begin: fileSet.Position(comment.Pos()).Offset, + end: fileSet.Position(comment.End()).Offset, + replacement: formattedComments[lineIndex], + }) + } +} + +func splitComment2(attr, body string) string { + if specialTagForSplit[strings.ToLower(attr)] { + for i := 0; i < len(body); i++ { + if skipEnd, ok := skipChar[body[i]]; ok { + skipStart, n := body[i], 1 + for i++; i < len(body); i++ { + if skipStart != skipEnd && body[i] == skipStart { + n++ + } else if body[i] == skipEnd { + n-- + if n == 0 { + break + } + } + } + } else if body[i] == ' ' || body[i] == '\t' { + j := i + for ; j < len(body) && (body[j] == ' ' || body[j] == '\t'); j++ { + } + body = replaceRange(body, i, j, "\t") + } + } + } + return body +} + +func replaceRange(s string, start, end int, new string) string { + return s[:start] + new + s[end:] +} + +var swagCommentLineExpression = regexp.MustCompile(`^\/\/\s+(@[\S.]+)\s*(.*)`) + +func swagComment(comment string) (string, string, bool) { + matches := swagCommentLineExpression.FindStringSubmatch(comment) + if matches == nil { + return "", "", false + } + return matches[1], matches[2], true +} diff --git a/vendor/github.com/swaggo/swag/generics.go b/vendor/github.com/swaggo/swag/generics.go new file mode 100644 index 00000000..80e93a90 --- /dev/null +++ b/vendor/github.com/swaggo/swag/generics.go @@ -0,0 +1,448 @@ +//go:build go1.18 +// +build go1.18 + +package swag + +import ( + "errors" + "fmt" + "go/ast" + "strings" + "unicode" + + "github.com/go-openapi/spec" +) + +type genericTypeSpec struct { + TypeSpec *TypeSpecDef + Name string +} + +type formalParamType struct { + Name string + Type string +} + +func (t *genericTypeSpec) TypeName() string { + if t.TypeSpec != nil { + return t.TypeSpec.TypeName() + } + return t.Name +} + +func normalizeGenericTypeName(name string) string { + return strings.Replace(name, ".", "_", -1) +} + +func (pkgDefs *PackagesDefinitions) getTypeFromGenericParam(genericParam string, file *ast.File) (typeSpecDef *TypeSpecDef) { + if strings.HasPrefix(genericParam, "[]") { + typeSpecDef = pkgDefs.getTypeFromGenericParam(genericParam[2:], file) + if typeSpecDef == nil { + return nil + } + var expr ast.Expr + switch typeSpecDef.TypeSpec.Type.(type) { + case *ast.ArrayType, *ast.MapType: + expr = typeSpecDef.TypeSpec.Type + default: + name := typeSpecDef.TypeName() + expr = ast.NewIdent(name) + if _, ok := pkgDefs.uniqueDefinitions[name]; !ok { + pkgDefs.uniqueDefinitions[name] = typeSpecDef + } + } + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(string(IgnoreNameOverridePrefix) + "array_" + typeSpecDef.TypeName()), + Type: &ast.ArrayType{ + Elt: expr, + }, + }, + Enums: typeSpecDef.Enums, + PkgPath: typeSpecDef.PkgPath, + ParentSpec: typeSpecDef.ParentSpec, + SchemaName: "array_" + typeSpecDef.SchemaName, + NotUnique: false, + } + } + + if strings.HasPrefix(genericParam, "map[") { + parts := strings.SplitN(genericParam[4:], "]", 2) + if len(parts) != 2 { + return nil + } + typeSpecDef = pkgDefs.getTypeFromGenericParam(parts[1], file) + if typeSpecDef == nil { + return nil + } + var expr ast.Expr + switch typeSpecDef.TypeSpec.Type.(type) { + case *ast.ArrayType, *ast.MapType: + expr = typeSpecDef.TypeSpec.Type + default: + name := typeSpecDef.TypeName() + expr = ast.NewIdent(name) + if _, ok := pkgDefs.uniqueDefinitions[name]; !ok { + pkgDefs.uniqueDefinitions[name] = typeSpecDef + } + } + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(string(IgnoreNameOverridePrefix) + "map_" + parts[0] + "_" + typeSpecDef.TypeName()), + Type: &ast.MapType{ + Key: ast.NewIdent(parts[0]), //assume key is string or integer + Value: expr, + }, + }, + Enums: typeSpecDef.Enums, + PkgPath: typeSpecDef.PkgPath, + ParentSpec: typeSpecDef.ParentSpec, + SchemaName: "map_" + parts[0] + "_" + typeSpecDef.SchemaName, + NotUnique: false, + } + } + if IsGolangPrimitiveType(genericParam) { + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(genericParam), + Type: ast.NewIdent(genericParam), + }, + SchemaName: genericParam, + } + } + return pkgDefs.FindTypeSpec(genericParam, file) +} + +func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, original *TypeSpecDef, fullGenericForm string) *TypeSpecDef { + if original == nil || original.TypeSpec.TypeParams == nil || len(original.TypeSpec.TypeParams.List) == 0 { + return original + } + + name, genericParams := splitGenericsTypeName(fullGenericForm) + if genericParams == nil { + return nil + } + + //generic[x,y any,z any] considered, TODO what if the type is not `any`, but a concrete one, such as `int32|int64` or an certain interface{} + var formals []formalParamType + for _, field := range original.TypeSpec.TypeParams.List { + for _, ident := range field.Names { + formal := formalParamType{Name: ident.Name} + if ident, ok := field.Type.(*ast.Ident); ok { + formal.Type = ident.Name + } + formals = append(formals, formal) + } + } + if len(genericParams) != len(formals) { + return nil + } + genericParamTypeDefs := map[string]*genericTypeSpec{} + + for i, genericParam := range genericParams { + var typeDef *TypeSpecDef + if !IsGolangPrimitiveType(genericParam) { + typeDef = pkgDefs.getTypeFromGenericParam(genericParam, file) + if typeDef != nil { + genericParam = typeDef.TypeName() + if _, ok := pkgDefs.uniqueDefinitions[genericParam]; !ok { + pkgDefs.uniqueDefinitions[genericParam] = typeDef + } + } + } + genericParamTypeDefs[formals[i].Name] = &genericTypeSpec{ + TypeSpec: typeDef, + Name: genericParam, + } + } + + name = fmt.Sprintf("%s%s-", string(IgnoreNameOverridePrefix), original.TypeName()) + schemaName := fmt.Sprintf("%s-", original.SchemaName) + + var nameParts []string + var schemaNameParts []string + + for _, def := range formals { + if specDef, ok := genericParamTypeDefs[def.Name]; ok { + nameParts = append(nameParts, specDef.Name) + + schemaNamePart := specDef.Name + + if specDef.TypeSpec != nil { + schemaNamePart = specDef.TypeSpec.SchemaName + } + + schemaNameParts = append(schemaNameParts, schemaNamePart) + } + } + + name += normalizeGenericTypeName(strings.Join(nameParts, "-")) + schemaName += normalizeGenericTypeName(strings.Join(schemaNameParts, "-")) + + if typeSpec, ok := pkgDefs.uniqueDefinitions[name]; ok { + return typeSpec + } + + parametrizedTypeSpec := &TypeSpecDef{ + File: original.File, + PkgPath: original.PkgPath, + TypeSpec: &ast.TypeSpec{ + Name: &ast.Ident{ + Name: name, + NamePos: original.TypeSpec.Name.NamePos, + Obj: original.TypeSpec.Name.Obj, + }, + Doc: original.TypeSpec.Doc, + Assign: original.TypeSpec.Assign, + }, + SchemaName: schemaName, + } + pkgDefs.uniqueDefinitions[name] = parametrizedTypeSpec + + parametrizedTypeSpec.TypeSpec.Type = pkgDefs.resolveGenericType(original.File, original.TypeSpec.Type, genericParamTypeDefs) + + return parametrizedTypeSpec +} + +// splitGenericsTypeName splits a generic struct name in his parts +func splitGenericsTypeName(fullGenericForm string) (string, []string) { + //remove all spaces character + fullGenericForm = strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r + }, fullGenericForm) + + // split only at the first '[' and remove the last ']' + if fullGenericForm[len(fullGenericForm)-1] != ']' { + return "", nil + } + + genericParams := strings.SplitN(fullGenericForm[:len(fullGenericForm)-1], "[", 2) + if len(genericParams) == 1 { + return "", nil + } + + // generic type name + genericTypeName := genericParams[0] + + depth := 0 + genericParams = strings.FieldsFunc(genericParams[1], func(r rune) bool { + if r == '[' { + depth++ + } else if r == ']' { + depth-- + } else if r == ',' && depth == 0 { + return true + } + return false + }) + if depth != 0 { + return "", nil + } + + return genericTypeName, genericParams +} + +func (pkgDefs *PackagesDefinitions) getParametrizedType(genTypeSpec *genericTypeSpec) ast.Expr { + if genTypeSpec.TypeSpec != nil && strings.Contains(genTypeSpec.Name, ".") { + parts := strings.SplitN(genTypeSpec.Name, ".", 2) + return &ast.SelectorExpr{ + X: &ast.Ident{Name: parts[0]}, + Sel: &ast.Ident{Name: parts[1]}, + } + } + + //a primitive type name or a type name in current package + return &ast.Ident{Name: genTypeSpec.Name} +} + +func (pkgDefs *PackagesDefinitions) resolveGenericType(file *ast.File, expr ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) ast.Expr { + switch astExpr := expr.(type) { + case *ast.Ident: + if genTypeSpec, ok := genericParamTypeDefs[astExpr.Name]; ok { + return pkgDefs.getParametrizedType(genTypeSpec) + } + case *ast.ArrayType: + return &ast.ArrayType{ + Elt: pkgDefs.resolveGenericType(file, astExpr.Elt, genericParamTypeDefs), + Len: astExpr.Len, + Lbrack: astExpr.Lbrack, + } + case *ast.MapType: + return &ast.MapType{ + Map: astExpr.Map, + Key: pkgDefs.resolveGenericType(file, astExpr.Key, genericParamTypeDefs), + Value: pkgDefs.resolveGenericType(file, astExpr.Value, genericParamTypeDefs), + } + case *ast.StarExpr: + return &ast.StarExpr{ + Star: astExpr.Star, + X: pkgDefs.resolveGenericType(file, astExpr.X, genericParamTypeDefs), + } + case *ast.IndexExpr, *ast.IndexListExpr: + fullGenericName, _ := getGenericFieldType(file, expr, genericParamTypeDefs) + typeDef := pkgDefs.FindTypeSpec(fullGenericName, file) + if typeDef != nil { + return typeDef.TypeSpec.Name + } + case *ast.StructType: + newStructTypeDef := &ast.StructType{ + Struct: astExpr.Struct, + Incomplete: astExpr.Incomplete, + Fields: &ast.FieldList{ + Opening: astExpr.Fields.Opening, + Closing: astExpr.Fields.Closing, + }, + } + + for _, field := range astExpr.Fields.List { + newField := &ast.Field{ + Type: field.Type, + Doc: field.Doc, + Names: field.Names, + Tag: field.Tag, + Comment: field.Comment, + } + + newField.Type = pkgDefs.resolveGenericType(file, field.Type, genericParamTypeDefs) + + newStructTypeDef.Fields.List = append(newStructTypeDef.Fields.List, newField) + } + return newStructTypeDef + } + return expr +} + +func getExtendedGenericFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { + switch fieldType := field.(type) { + case *ast.ArrayType: + fieldName, err := getExtendedGenericFieldType(file, fieldType.Elt, genericParamTypeDefs) + return "[]" + fieldName, err + case *ast.StarExpr: + return getExtendedGenericFieldType(file, fieldType.X, genericParamTypeDefs) + case *ast.Ident: + if genericParamTypeDefs != nil { + if typeSpec, ok := genericParamTypeDefs[fieldType.Name]; ok { + return typeSpec.Name, nil + } + } + if fieldType.Obj == nil { + return fieldType.Name, nil + } + + tSpec := &TypeSpecDef{ + File: file, + TypeSpec: fieldType.Obj.Decl.(*ast.TypeSpec), + PkgPath: file.Name.Name, + } + return tSpec.TypeName(), nil + default: + return getFieldType(file, field, genericParamTypeDefs) + } +} + +func getGenericFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { + var fullName string + var baseName string + var err error + switch fieldType := field.(type) { + case *ast.IndexListExpr: + baseName, err = getGenericTypeName(file, fieldType.X) + if err != nil { + return "", err + } + fullName = baseName + "[" + + for _, index := range fieldType.Indices { + fieldName, err := getExtendedGenericFieldType(file, index, genericParamTypeDefs) + if err != nil { + return "", err + } + + fullName += fieldName + "," + } + + fullName = strings.TrimRight(fullName, ",") + "]" + case *ast.IndexExpr: + baseName, err = getGenericTypeName(file, fieldType.X) + if err != nil { + return "", err + } + + indexName, err := getExtendedGenericFieldType(file, fieldType.Index, genericParamTypeDefs) + if err != nil { + return "", err + } + + fullName = fmt.Sprintf("%s[%s]", baseName, indexName) + } + + if fullName == "" { + return "", fmt.Errorf("unknown field type %#v", field) + } + + var packageName string + if !strings.Contains(baseName, ".") { + if file.Name == nil { + return "", errors.New("file name is nil") + } + packageName, _ = getFieldType(file, file.Name, genericParamTypeDefs) + } + + return strings.TrimLeft(fmt.Sprintf("%s.%s", packageName, fullName), "."), nil +} + +func getGenericTypeName(file *ast.File, field ast.Expr) (string, error) { + switch fieldType := field.(type) { + case *ast.Ident: + if fieldType.Obj == nil { + return fieldType.Name, nil + } + + tSpec := &TypeSpecDef{ + File: file, + TypeSpec: fieldType.Obj.Decl.(*ast.TypeSpec), + PkgPath: file.Name.Name, + } + return tSpec.TypeName(), nil + case *ast.ArrayType: + tSpec := &TypeSpecDef{ + File: file, + TypeSpec: fieldType.Elt.(*ast.Ident).Obj.Decl.(*ast.TypeSpec), + PkgPath: file.Name.Name, + } + return tSpec.TypeName(), nil + case *ast.SelectorExpr: + return fmt.Sprintf("%s.%s", fieldType.X.(*ast.Ident).Name, fieldType.Sel.Name), nil + } + return "", fmt.Errorf("unknown type %#v", field) +} + +func (parser *Parser) parseGenericTypeExpr(file *ast.File, typeExpr ast.Expr) (*spec.Schema, error) { + switch expr := typeExpr.(type) { + // suppress debug messages for these types + case *ast.InterfaceType: + case *ast.StructType: + case *ast.Ident: + case *ast.StarExpr: + case *ast.SelectorExpr: + case *ast.ArrayType: + case *ast.MapType: + case *ast.FuncType: + case *ast.IndexExpr, *ast.IndexListExpr: + name, err := getExtendedGenericFieldType(file, expr, nil) + if err == nil { + if schema, err := parser.getTypeSchema(name, file, false); err == nil { + return schema, nil + } + } + + parser.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead. (%s)\n", typeExpr, err) + default: + parser.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead.\n", typeExpr) + } + + return PrimitiveSchema(OBJECT), nil +} diff --git a/vendor/github.com/swaggo/swag/golist.go b/vendor/github.com/swaggo/swag/golist.go new file mode 100644 index 00000000..fa0b2cd9 --- /dev/null +++ b/vendor/github.com/swaggo/swag/golist.go @@ -0,0 +1,78 @@ +package swag + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/build" + "os/exec" + "path/filepath" +) + +func listPackages(ctx context.Context, dir string, env []string, args ...string) (pkgs []*build.Package, finalErr error) { + cmd := exec.CommandContext(ctx, "go", append([]string{"list", "-json", "-e"}, args...)...) + cmd.Env = env + cmd.Dir = dir + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + var stderrBuf bytes.Buffer + cmd.Stderr = &stderrBuf + defer func() { + if stderrBuf.Len() > 0 { + finalErr = fmt.Errorf("%v\n%s", finalErr, stderrBuf.Bytes()) + } + }() + + err = cmd.Start() + if err != nil { + return nil, err + } + dec := json.NewDecoder(stdout) + for dec.More() { + var pkg build.Package + err = dec.Decode(&pkg) + if err != nil { + return nil, err + } + pkgs = append(pkgs, &pkg) + } + err = cmd.Wait() + if err != nil { + return nil, err + } + return pkgs, nil +} + +func (parser *Parser) getAllGoFileInfoFromDepsByList(pkg *build.Package, parseFlag ParseFlag) error { + ignoreInternal := pkg.Goroot && !parser.ParseInternal + if ignoreInternal { // ignored internal + return nil + } + + if parser.skipPackageByPrefix(pkg.ImportPath) { + return nil // ignored by user-defined package path prefixes + } + + srcDir := pkg.Dir + var err error + for i := range pkg.GoFiles { + err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.GoFiles[i]), nil, parseFlag) + if err != nil { + return err + } + } + + // parse .go source files that import "C" + for i := range pkg.CgoFiles { + err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.CgoFiles[i]), nil, parseFlag) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/swaggo/swag/license b/vendor/github.com/swaggo/swag/license new file mode 100644 index 00000000..a97865bf --- /dev/null +++ b/vendor/github.com/swaggo/swag/license @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Eason Lin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/swaggo/swag/operation.go b/vendor/github.com/swaggo/swag/operation.go new file mode 100644 index 00000000..ba3a05b5 --- /dev/null +++ b/vendor/github.com/swaggo/swag/operation.go @@ -0,0 +1,1249 @@ +package swag + +import ( + "encoding/json" + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "net/http" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/go-openapi/spec" + "golang.org/x/tools/go/loader" +) + +// RouteProperties describes HTTP properties of a single router comment. +type RouteProperties struct { + HTTPMethod string + Path string + Deprecated bool +} + +// Operation describes a single API operation on a path. +// For more information: https://github.com/swaggo/swag#api-operation +type Operation struct { + parser *Parser + codeExampleFilesDir string + spec.Operation + RouterProperties []RouteProperties + State string +} + +var mimeTypeAliases = map[string]string{ + "json": "application/json", + "xml": "text/xml", + "plain": "text/plain", + "html": "text/html", + "mpfd": "multipart/form-data", + "x-www-form-urlencoded": "application/x-www-form-urlencoded", + "json-api": "application/vnd.api+json", + "json-stream": "application/x-json-stream", + "octet-stream": "application/octet-stream", + "png": "image/png", + "jpeg": "image/jpeg", + "gif": "image/gif", +} + +var mimeTypePattern = regexp.MustCompile("^[^/]+/[^/]+$") + +// NewOperation creates a new Operation with default properties. +// map[int]Response. +func NewOperation(parser *Parser, options ...func(*Operation)) *Operation { + if parser == nil { + parser = New() + } + + result := &Operation{ + parser: parser, + RouterProperties: []RouteProperties{}, + Operation: spec.Operation{ + OperationProps: spec.OperationProps{ + ID: "", + Description: "", + Summary: "", + Security: nil, + ExternalDocs: nil, + Deprecated: false, + Tags: []string{}, + Consumes: []string{}, + Produces: []string{}, + Schemes: []string{}, + Parameters: []spec.Parameter{}, + Responses: &spec.Responses{ + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, + }, + ResponsesProps: spec.ResponsesProps{ + Default: nil, + StatusCodeResponses: make(map[int]spec.Response), + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, + }, + }, + codeExampleFilesDir: "", + } + + for _, option := range options { + option(result) + } + + return result +} + +// SetCodeExampleFilesDirectory sets the directory to search for codeExamples. +func SetCodeExampleFilesDirectory(directoryPath string) func(*Operation) { + return func(o *Operation) { + o.codeExampleFilesDir = directoryPath + } +} + +// ParseComment parses comment for given comment string and returns error if error occurs. +func (operation *Operation) ParseComment(comment string, astFile *ast.File) error { + commentLine := strings.TrimSpace(strings.TrimLeft(comment, "/")) + if len(commentLine) == 0 { + return nil + } + + fields := FieldsByAnySpace(commentLine, 2) + attribute := fields[0] + lowerAttribute := strings.ToLower(attribute) + var lineRemainder string + if len(fields) > 1 { + lineRemainder = fields[1] + } + switch lowerAttribute { + case stateAttr: + operation.ParseStateComment(lineRemainder) + case descriptionAttr: + operation.ParseDescriptionComment(lineRemainder) + case descriptionMarkdownAttr: + commentInfo, err := getMarkdownForTag(lineRemainder, operation.parser.markdownFileDir) + if err != nil { + return err + } + + operation.ParseDescriptionComment(string(commentInfo)) + case summaryAttr: + operation.Summary = lineRemainder + case idAttr: + operation.ID = lineRemainder + case tagsAttr: + operation.ParseTagsComment(lineRemainder) + case acceptAttr: + return operation.ParseAcceptComment(lineRemainder) + case produceAttr: + return operation.ParseProduceComment(lineRemainder) + case paramAttr: + return operation.ParseParamComment(lineRemainder, astFile) + case successAttr, failureAttr, responseAttr: + return operation.ParseResponseComment(lineRemainder, astFile) + case headerAttr: + return operation.ParseResponseHeaderComment(lineRemainder, astFile) + case routerAttr: + return operation.ParseRouterComment(lineRemainder, false) + case deprecatedRouterAttr: + return operation.ParseRouterComment(lineRemainder, true) + case securityAttr: + return operation.ParseSecurityComment(lineRemainder) + case deprecatedAttr: + operation.Deprecate() + case xCodeSamplesAttr: + return operation.ParseCodeSample(attribute, commentLine, lineRemainder) + default: + return operation.ParseMetadata(attribute, lowerAttribute, lineRemainder) + } + + return nil +} + +// ParseCodeSample parse code sample. +func (operation *Operation) ParseCodeSample(attribute, _, lineRemainder string) error { + if lineRemainder == "file" { + data, err := getCodeExampleForSummary(operation.Summary, operation.codeExampleFilesDir) + if err != nil { + return err + } + + var valueJSON interface{} + + err = json.Unmarshal(data, &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value", attribute) + } + + // don't use the method provided by spec lib, because it will call toLower() on attribute names, which is wrongly + operation.Extensions[attribute[1:]] = valueJSON + + return nil + } + + // Fallback into existing logic + return operation.ParseMetadata(attribute, strings.ToLower(attribute), lineRemainder) +} + +// ParseStateComment parse state comment. +func (operation *Operation) ParseStateComment(lineRemainder string) { + operation.State = lineRemainder +} + +// ParseDescriptionComment parse description comment. +func (operation *Operation) ParseDescriptionComment(lineRemainder string) { + if operation.Description == "" { + operation.Description = lineRemainder + + return + } + + operation.Description += "\n" + lineRemainder +} + +// ParseMetadata parse metadata. +func (operation *Operation) ParseMetadata(attribute, lowerAttribute, lineRemainder string) error { + // parsing specific meta data extensions + if strings.HasPrefix(lowerAttribute, "@x-") { + if len(lineRemainder) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON interface{} + + err := json.Unmarshal([]byte(lineRemainder), &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value", attribute) + } + + // don't use the method provided by spec lib, because it will call toLower() on attribute names, which is wrongly + operation.Extensions[attribute[1:]] = valueJSON + } + + return nil +} + +var paramPattern = regexp.MustCompile(`(\S+)\s+(\w+)\s+([\S. ]+?)\s+(\w+)\s+"([^"]+)"`) + +func findInSlice(arr []string, target string) bool { + for _, str := range arr { + if str == target { + return true + } + } + + return false +} + +// ParseParamComment parses params return []string of param properties +// E.g. @Param queryText formData string true "The email for login" +// +// [param name] [paramType] [data type] [is mandatory?] [Comment] +// +// E.g. @Param some_id path int true "Some ID". +func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.File) error { + matches := paramPattern.FindStringSubmatch(commentLine) + if len(matches) != 6 { + return fmt.Errorf("missing required param comment parameters \"%s\"", commentLine) + } + + name := matches[1] + paramType := matches[2] + refType := TransToValidSchemeType(matches[3]) + + // Detect refType + objectType := OBJECT + + if strings.HasPrefix(refType, "[]") { + objectType = ARRAY + refType = strings.TrimPrefix(refType, "[]") + refType = TransToValidSchemeType(refType) + } else if IsPrimitiveType(refType) || + paramType == "formData" && refType == "file" { + objectType = PRIMITIVE + } + + var enums []interface{} + if !IsPrimitiveType(refType) { + schema, _ := operation.parser.getTypeSchema(refType, astFile, false) + if schema != nil && len(schema.Type) == 1 && schema.Enum != nil { + if objectType == OBJECT { + objectType = PRIMITIVE + } + refType = TransToValidSchemeType(schema.Type[0]) + enums = schema.Enum + } + } + + requiredText := strings.ToLower(matches[4]) + required := requiredText == "true" || requiredText == requiredLabel + description := strings.Join(strings.Split(matches[5], "\\n"), "\n") + + param := createParameter(paramType, description, name, objectType, refType, required, enums, operation.parser.collectionFormatInQuery) + + switch paramType { + case "path", "header", "query", "formData": + switch objectType { + case ARRAY: + if !IsPrimitiveType(refType) && !(refType == "file" && paramType == "formData") { + return fmt.Errorf("%s is not supported array type for %s", refType, paramType) + } + case PRIMITIVE: + break + case OBJECT: + schema, err := operation.parser.getTypeSchema(refType, astFile, false) + if err != nil { + return err + } + + if len(schema.Properties) == 0 { + return nil + } + + items := schema.Properties.ToOrderedSchemaItems() + + for _, item := range items { + name, prop := item.Name, &item.Schema + if len(prop.Type) == 0 { + prop = operation.parser.getUnderlyingSchema(prop) + if len(prop.Type) == 0 { + continue + } + } + + nameOverrideType := paramType + // query also uses formData tags + if paramType == "query" { + nameOverrideType = "formData" + } + // load overridden type specific name from extensions if exists + if nameVal, ok := item.Schema.Extensions[nameOverrideType]; ok { + name = nameVal.(string) + } + + switch { + case prop.Type[0] == ARRAY: + if prop.Items.Schema == nil { + continue + } + itemSchema := prop.Items.Schema + if len(itemSchema.Type) == 0 { + itemSchema = operation.parser.getUnderlyingSchema(prop.Items.Schema) + } + if itemSchema == nil { + continue + } + if len(itemSchema.Type) == 0 { + continue + } + if !IsSimplePrimitiveType(itemSchema.Type[0]) { + continue + } + param = createParameter(paramType, prop.Description, name, prop.Type[0], itemSchema.Type[0], findInSlice(schema.Required, item.Name), itemSchema.Enum, operation.parser.collectionFormatInQuery) + + case IsSimplePrimitiveType(prop.Type[0]): + param = createParameter(paramType, prop.Description, name, PRIMITIVE, prop.Type[0], findInSlice(schema.Required, item.Name), nil, operation.parser.collectionFormatInQuery) + default: + operation.parser.debug.Printf("skip field [%s] in %s is not supported type for %s", name, refType, paramType) + continue + } + + param.Nullable = prop.Nullable + param.Format = prop.Format + param.Default = prop.Default + param.Example = prop.Example + param.Extensions = prop.Extensions + param.CommonValidations.Maximum = prop.Maximum + param.CommonValidations.Minimum = prop.Minimum + param.CommonValidations.ExclusiveMaximum = prop.ExclusiveMaximum + param.CommonValidations.ExclusiveMinimum = prop.ExclusiveMinimum + param.CommonValidations.MaxLength = prop.MaxLength + param.CommonValidations.MinLength = prop.MinLength + param.CommonValidations.Pattern = prop.Pattern + param.CommonValidations.MaxItems = prop.MaxItems + param.CommonValidations.MinItems = prop.MinItems + param.CommonValidations.UniqueItems = prop.UniqueItems + param.CommonValidations.MultipleOf = prop.MultipleOf + param.CommonValidations.Enum = prop.Enum + operation.Operation.Parameters = append(operation.Operation.Parameters, param) + } + + return nil + } + case "body": + if objectType == PRIMITIVE { + param.Schema = PrimitiveSchema(refType) + } else { + schema, err := operation.parseAPIObjectSchema(commentLine, objectType, refType, astFile) + if err != nil { + return err + } + + param.Schema = schema + } + default: + return fmt.Errorf("%s is not supported paramType", paramType) + } + + err := operation.parseParamAttribute(commentLine, objectType, refType, paramType, ¶m) + + if err != nil { + return err + } + + operation.Operation.Parameters = append(operation.Operation.Parameters, param) + + return nil +} + +const ( + formTag = "form" + jsonTag = "json" + uriTag = "uri" + headerTag = "header" + bindingTag = "binding" + defaultTag = "default" + enumsTag = "enums" + exampleTag = "example" + schemaExampleTag = "schemaExample" + formatTag = "format" + titleTag = "title" + validateTag = "validate" + minimumTag = "minimum" + maximumTag = "maximum" + minLengthTag = "minLength" + maxLengthTag = "maxLength" + multipleOfTag = "multipleOf" + readOnlyTag = "readonly" + extensionsTag = "extensions" + collectionFormatTag = "collectionFormat" +) + +var regexAttributes = map[string]*regexp.Regexp{ + // for Enums(A, B) + enumsTag: regexp.MustCompile(`(?i)\s+enums\(.*\)`), + // for maximum(0) + maximumTag: regexp.MustCompile(`(?i)\s+maxinum|maximum\(.*\)`), + // for minimum(0) + minimumTag: regexp.MustCompile(`(?i)\s+mininum|minimum\(.*\)`), + // for default(0) + defaultTag: regexp.MustCompile(`(?i)\s+default\(.*\)`), + // for minlength(0) + minLengthTag: regexp.MustCompile(`(?i)\s+minlength\(.*\)`), + // for maxlength(0) + maxLengthTag: regexp.MustCompile(`(?i)\s+maxlength\(.*\)`), + // for format(email) + formatTag: regexp.MustCompile(`(?i)\s+format\(.*\)`), + // for extensions(x-example=test) + extensionsTag: regexp.MustCompile(`(?i)\s+extensions\(.*\)`), + // for collectionFormat(csv) + collectionFormatTag: regexp.MustCompile(`(?i)\s+collectionFormat\(.*\)`), + // example(0) + exampleTag: regexp.MustCompile(`(?i)\s+example\(.*\)`), + // schemaExample(0) + schemaExampleTag: regexp.MustCompile(`(?i)\s+schemaExample\(.*\)`), +} + +func (operation *Operation) parseParamAttribute(comment, objectType, schemaType, paramType string, param *spec.Parameter) error { + schemaType = TransToValidSchemeType(schemaType) + + for attrKey, re := range regexAttributes { + attr, err := findAttr(re, comment) + if err != nil { + continue + } + + switch attrKey { + case enumsTag: + err = setEnumParam(param, attr, objectType, schemaType, paramType) + case minimumTag, maximumTag: + err = setNumberParam(param, attrKey, schemaType, attr, comment) + case defaultTag: + err = setDefault(param, schemaType, attr) + case minLengthTag, maxLengthTag: + err = setStringParam(param, attrKey, schemaType, attr, comment) + case formatTag: + param.Format = attr + case exampleTag: + err = setExample(param, schemaType, attr) + case schemaExampleTag: + err = setSchemaExample(param, schemaType, attr) + case extensionsTag: + param.Extensions = setExtensionParam(attr) + case collectionFormatTag: + err = setCollectionFormatParam(param, attrKey, objectType, attr, comment) + } + + if err != nil { + return err + } + } + + return nil +} + +func findAttr(re *regexp.Regexp, commentLine string) (string, error) { + attr := re.FindString(commentLine) + + l, r := strings.Index(attr, "("), strings.Index(attr, ")") + if l == -1 || r == -1 { + return "", fmt.Errorf("can not find regex=%s, comment=%s", re.String(), commentLine) + } + + return strings.TrimSpace(attr[l+1 : r]), nil +} + +func setStringParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType != STRING { + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + } + + n, err := strconv.ParseInt(attr, 10, 64) + if err != nil { + return fmt.Errorf("%s is allow only a number got=%s", name, attr) + } + + switch name { + case minLengthTag: + param.MinLength = &n + case maxLengthTag: + param.MaxLength = &n + } + + return nil +} + +func setNumberParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + switch schemaType { + case INTEGER, NUMBER: + n, err := strconv.ParseFloat(attr, 64) + if err != nil { + return fmt.Errorf("maximum is allow only a number. comment=%s got=%s", commentLine, attr) + } + + switch name { + case minimumTag: + param.Minimum = &n + case maximumTag: + param.Maximum = &n + } + + return nil + default: + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + } +} + +func setEnumParam(param *spec.Parameter, attr, objectType, schemaType, paramType string) error { + for _, e := range strings.Split(attr, ",") { + e = strings.TrimSpace(e) + + value, err := defineType(schemaType, e) + if err != nil { + return err + } + + switch objectType { + case ARRAY: + param.Items.Enum = append(param.Items.Enum, value) + default: + switch paramType { + case "body": + param.Schema.Enum = append(param.Schema.Enum, value) + default: + param.Enum = append(param.Enum, value) + } + } + } + + return nil +} + +func setExtensionParam(attr string) spec.Extensions { + extensions := spec.Extensions{} + + for _, val := range splitNotWrapped(attr, ',') { + parts := strings.SplitN(val, "=", 2) + if len(parts) == 2 { + extensions.Add(parts[0], parts[1]) + + continue + } + + if len(parts[0]) > 0 && string(parts[0][0]) == "!" { + extensions.Add(parts[0][1:], false) + + continue + } + + extensions.Add(parts[0], true) + } + + return extensions +} + +func setCollectionFormatParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType == ARRAY { + param.CollectionFormat = TransToValidCollectionFormat(attr) + + return nil + } + + return fmt.Errorf("%s is attribute to set to an array. comment=%s got=%s", name, commentLine, schemaType) +} + +func setDefault(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a default value if it's not valid + } + + param.Default = val + + return nil +} + +func setSchemaExample(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a example value if it's not valid + } + // skip schema + if param.Schema == nil { + return nil + } + + switch v := val.(type) { + case string: + // replaces \r \n \t in example string values. + param.Schema.Example = strings.NewReplacer(`\r`, "\r", `\n`, "\n", `\t`, "\t").Replace(v) + default: + param.Schema.Example = val + } + + return nil +} + +func setExample(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a example value if it's not valid + } + + param.Example = val + + return nil +} + +// defineType enum value define the type (object and array unsupported). +func defineType(schemaType string, value string) (v interface{}, err error) { + schemaType = TransToValidSchemeType(schemaType) + + switch schemaType { + case STRING: + return value, nil + case NUMBER: + v, err = strconv.ParseFloat(value, 64) + if err != nil { + return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) + } + case INTEGER: + v, err = strconv.Atoi(value) + if err != nil { + return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) + } + case BOOLEAN: + v, err = strconv.ParseBool(value) + if err != nil { + return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) + } + default: + return nil, fmt.Errorf("%s is unsupported type in enum value %s", schemaType, value) + } + + return v, nil +} + +// ParseTagsComment parses comment for given `tag` comment string. +func (operation *Operation) ParseTagsComment(commentLine string) { + for _, tag := range strings.Split(commentLine, ",") { + operation.Tags = append(operation.Tags, strings.TrimSpace(tag)) + } +} + +// ParseAcceptComment parses comment for given `accept` comment string. +func (operation *Operation) ParseAcceptComment(commentLine string) error { + return parseMimeTypeList(commentLine, &operation.Consumes, "%v accept type can't be accepted") +} + +// ParseProduceComment parses comment for given `produce` comment string. +func (operation *Operation) ParseProduceComment(commentLine string) error { + return parseMimeTypeList(commentLine, &operation.Produces, "%v produce type can't be accepted") +} + +// parseMimeTypeList parses a list of MIME Types for a comment like +// `produce` (`Content-Type:` response header) or +// `accept` (`Accept:` request header). +func parseMimeTypeList(mimeTypeList string, typeList *[]string, format string) error { + for _, typeName := range strings.Split(mimeTypeList, ",") { + if mimeTypePattern.MatchString(typeName) { + *typeList = append(*typeList, typeName) + + continue + } + + aliasMimeType, ok := mimeTypeAliases[typeName] + if !ok { + return fmt.Errorf(format, typeName) + } + + *typeList = append(*typeList, aliasMimeType) + } + + return nil +} + +var routerPattern = regexp.MustCompile(`^(/[\w./\-{}\(\)+:$]*)[[:blank:]]+\[(\w+)]`) + +// ParseRouterComment parses comment for given `router` comment string. +func (operation *Operation) ParseRouterComment(commentLine string, deprecated bool) error { + matches := routerPattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { + return fmt.Errorf("can not parse router comment \"%s\"", commentLine) + } + + signature := RouteProperties{ + Path: matches[1], + HTTPMethod: strings.ToUpper(matches[2]), + Deprecated: deprecated, + } + + if _, ok := allMethod[signature.HTTPMethod]; !ok { + return fmt.Errorf("invalid method: %s", signature.HTTPMethod) + } + + operation.RouterProperties = append(operation.RouterProperties, signature) + + return nil +} + +// ParseSecurityComment parses comment for given `security` comment string. +func (operation *Operation) ParseSecurityComment(commentLine string) error { + if len(commentLine) == 0 { + operation.Security = []map[string][]string{} + return nil + } + + var ( + securityMap = make(map[string][]string) + securitySource = commentLine[strings.Index(commentLine, "@Security")+1:] + ) + + for _, securityOption := range strings.Split(securitySource, "||") { + securityOption = strings.TrimSpace(securityOption) + + left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") + + if !(left == -1 && right == -1) { + scopes := securityOption[left+1 : right] + + var options []string + + for _, scope := range strings.Split(scopes, ",") { + options = append(options, strings.TrimSpace(scope)) + } + + securityKey := securityOption[0:left] + securityMap[securityKey] = append(securityMap[securityKey], options...) + } else { + securityKey := strings.TrimSpace(securityOption) + securityMap[securityKey] = []string{} + } + } + + operation.Security = append(operation.Security, securityMap) + + return nil +} + +// findTypeDef attempts to find the *ast.TypeSpec for a specific type given the +// type's name and the package's import path. +// TODO: improve finding external pkg. +func findTypeDef(importPath, typeName string) (*ast.TypeSpec, error) { + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + + conf := loader.Config{ + ParserMode: goparser.SpuriousErrors, + Cwd: cwd, + } + + conf.Import(importPath) + + lprog, err := conf.Load() + if err != nil { + return nil, err + } + + // If the pkg is vendored, the actual pkg path is going to resemble + // something like "{importPath}/vendor/{importPath}" + for k := range lprog.AllPackages { + realPkgPath := k.Path() + + if strings.Contains(realPkgPath, "vendor/"+importPath) { + importPath = realPkgPath + } + } + + pkgInfo := lprog.Package(importPath) + + if pkgInfo == nil { + return nil, fmt.Errorf("package was nil") + } + + // TODO: possibly cache pkgInfo since it's an expensive operation + for i := range pkgInfo.Files { + for _, astDeclaration := range pkgInfo.Files[i].Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if ok && generalDeclaration.Tok == token.TYPE { + for _, astSpec := range generalDeclaration.Specs { + typeSpec, ok := astSpec.(*ast.TypeSpec) + if ok { + if typeSpec.Name.String() == typeName { + return typeSpec, nil + } + } + } + } + } + } + + return nil, fmt.Errorf("type spec not found") +} + +var responsePattern = regexp.MustCompile(`^([\w,]+)\s+([\w{}]+)\s+([\w\-.\\{}=,\[\s\]]+)\s*(".*)?`) + +// ResponseType{data1=Type1,data2=Type2}. +var combinedPattern = regexp.MustCompile(`^([\w\-./\[\]]+){(.*)}$`) + +func (operation *Operation) parseObjectSchema(refType string, astFile *ast.File) (*spec.Schema, error) { + return parseObjectSchema(operation.parser, refType, astFile) +} + +func parseObjectSchema(parser *Parser, refType string, astFile *ast.File) (*spec.Schema, error) { + switch { + case refType == NIL: + return nil, nil + case refType == INTERFACE: + return &spec.Schema{}, nil + case refType == ANY: + return &spec.Schema{}, nil + case IsGolangPrimitiveType(refType): + refType = TransToValidSchemeType(refType) + + return PrimitiveSchema(refType), nil + case IsPrimitiveType(refType): + return PrimitiveSchema(refType), nil + case strings.HasPrefix(refType, "[]"): + schema, err := parseObjectSchema(parser, refType[2:], astFile) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + case strings.HasPrefix(refType, "map["): + // ignore key type + idx := strings.Index(refType, "]") + if idx < 0 { + return nil, fmt.Errorf("invalid type: %s", refType) + } + + refType = refType[idx+1:] + if refType == INTERFACE || refType == ANY { + return spec.MapProperty(nil), nil + } + + schema, err := parseObjectSchema(parser, refType, astFile) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + case strings.Contains(refType, "{"): + return parseCombinedObjectSchema(parser, refType, astFile) + default: + if parser != nil { // checking refType has existing in 'TypeDefinitions' + schema, err := parser.getTypeSchema(refType, astFile, true) + if err != nil { + return nil, err + } + + return schema, nil + } + + return RefSchema(refType), nil + } +} + +func parseFields(s string) []string { + nestLevel := 0 + + return strings.FieldsFunc(s, func(char rune) bool { + if char == '{' { + nestLevel++ + + return false + } else if char == '}' { + nestLevel-- + + return false + } + + return char == ',' && nestLevel == 0 + }) +} + +func parseCombinedObjectSchema(parser *Parser, refType string, astFile *ast.File) (*spec.Schema, error) { + matches := combinedPattern.FindStringSubmatch(refType) + if len(matches) != 3 { + return nil, fmt.Errorf("invalid type: %s", refType) + } + + schema, err := parseObjectSchema(parser, matches[1], astFile) + if err != nil { + return nil, err + } + + fields, props := parseFields(matches[2]), map[string]spec.Schema{} + + for _, field := range fields { + keyVal := strings.SplitN(field, "=", 2) + if len(keyVal) == 2 { + schema, err := parseObjectSchema(parser, keyVal[1], astFile) + if err != nil { + return nil, err + } + + if schema == nil { + schema = PrimitiveSchema(OBJECT) + } + + props[keyVal[0]] = *schema + } + } + + if len(props) == 0 { + return schema, nil + } + + if schema.Ref.GetURL() == nil && len(schema.Type) > 0 && schema.Type[0] == OBJECT && len(schema.Properties) == 0 && schema.AdditionalProperties == nil { + schema.Properties = props + return schema, nil + } + + return spec.ComposedSchema(*schema, spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{OBJECT}, + Properties: props, + }, + }), nil +} + +func (operation *Operation) parseAPIObjectSchema(commentLine, schemaType, refType string, astFile *ast.File) (*spec.Schema, error) { + if strings.HasSuffix(refType, ",") && strings.Contains(refType, "[") { + // regexp may have broken generic syntax. find closing bracket and add it back + allMatchesLenOffset := strings.Index(commentLine, refType) + len(refType) + lostPartEndIdx := strings.Index(commentLine[allMatchesLenOffset:], "]") + if lostPartEndIdx >= 0 { + refType += commentLine[allMatchesLenOffset : allMatchesLenOffset+lostPartEndIdx+1] + } + } + + switch schemaType { + case OBJECT: + if !strings.HasPrefix(refType, "[]") { + return operation.parseObjectSchema(refType, astFile) + } + + refType = refType[2:] + + fallthrough + case ARRAY: + schema, err := operation.parseObjectSchema(refType, astFile) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + default: + return PrimitiveSchema(schemaType), nil + } +} + +// ParseResponseComment parses comment for given `response` comment string. +func (operation *Operation) ParseResponseComment(commentLine string, astFile *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { + err := operation.ParseEmptyResponseComment(commentLine) + if err != nil { + return operation.ParseEmptyResponseOnly(commentLine) + } + + return err + } + + description := strings.Trim(matches[4], "\"") + + schema, err := operation.parseAPIObjectSchema(commentLine, strings.Trim(matches[2], "{}"), strings.TrimSpace(matches[3]), astFile) + if err != nil { + return err + } + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + operation.DefaultResponse().WithSchema(schema).WithDescription(description) + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + resp := spec.NewResponse().WithSchema(schema).WithDescription(description) + if description == "" { + resp.WithDescription(http.StatusText(code)) + } + + operation.AddResponse(code, resp) + } + + return nil +} + +func newHeaderSpec(schemaType, description string) spec.Header { + return spec.Header{ + SimpleSchema: spec.SimpleSchema{ + Type: schemaType, + }, + HeaderProps: spec.HeaderProps{ + Description: description, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: nil, + }, + CommonValidations: spec.CommonValidations{ + Maximum: nil, + ExclusiveMaximum: false, + Minimum: nil, + ExclusiveMinimum: false, + MaxLength: nil, + MinLength: nil, + Pattern: "", + MaxItems: nil, + MinItems: nil, + UniqueItems: false, + MultipleOf: nil, + Enum: nil, + }, + } +} + +// ParseResponseHeaderComment parses comment for given `response header` comment string. +func (operation *Operation) ParseResponseHeaderComment(commentLine string, _ *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + header := newHeaderSpec(strings.Trim(matches[2], "{}"), strings.Trim(matches[4], "\"")) + + headerKey := strings.TrimSpace(matches[3]) + + if strings.EqualFold(matches[1], "all") { + if operation.Responses.Default != nil { + operation.Responses.Default.Headers[headerKey] = header + } + + if operation.Responses.StatusCodeResponses != nil { + for code, response := range operation.Responses.StatusCodeResponses { + response.Headers[headerKey] = header + operation.Responses.StatusCodeResponses[code] = response + } + } + + return nil + } + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + if operation.Responses.Default != nil { + operation.Responses.Default.Headers[headerKey] = header + } + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + if operation.Responses.StatusCodeResponses != nil { + response, responseExist := operation.Responses.StatusCodeResponses[code] + if responseExist { + response.Headers[headerKey] = header + + operation.Responses.StatusCodeResponses[code] = response + } + } + } + + return nil +} + +var emptyResponsePattern = regexp.MustCompile(`([\w,]+)\s+"(.*)"`) + +// ParseEmptyResponseComment parse only comment out status code and description,eg: @Success 200 "it's ok". +func (operation *Operation) ParseEmptyResponseComment(commentLine string) error { + matches := emptyResponsePattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + description := strings.Trim(matches[2], "\"") + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + operation.DefaultResponse().WithDescription(description) + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + operation.AddResponse(code, spec.NewResponse().WithDescription(description)) + } + + return nil +} + +// ParseEmptyResponseOnly parse only comment out status code ,eg: @Success 200. +func (operation *Operation) ParseEmptyResponseOnly(commentLine string) error { + for _, codeStr := range strings.Split(commentLine, ",") { + if strings.EqualFold(codeStr, defaultTag) { + _ = operation.DefaultResponse() + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + operation.AddResponse(code, spec.NewResponse().WithDescription(http.StatusText(code))) + } + + return nil +} + +// DefaultResponse return the default response member pointer. +func (operation *Operation) DefaultResponse() *spec.Response { + if operation.Responses.Default == nil { + operation.Responses.Default = &spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: "", + Headers: make(map[string]spec.Header), + }, + } + } + + return operation.Responses.Default +} + +// AddResponse add a response for a code. +func (operation *Operation) AddResponse(code int, response *spec.Response) { + if response.Headers == nil { + response.Headers = make(map[string]spec.Header) + } + + operation.Responses.StatusCodeResponses[code] = *response +} + +// createParameter returns swagger spec.Parameter for given paramType, description, paramName, schemaType, required. +func createParameter(paramType, description, paramName, objectType, schemaType string, required bool, enums []interface{}, collectionFormat string) spec.Parameter { + // //five possible parameter types. query, path, body, header, form + result := spec.Parameter{ + ParamProps: spec.ParamProps{ + Name: paramName, + Description: description, + Required: required, + In: paramType, + }, + } + + if paramType == "body" { + return result + } + + switch objectType { + case ARRAY: + result.Type = objectType + result.CollectionFormat = collectionFormat + result.Items = &spec.Items{ + CommonValidations: spec.CommonValidations{ + Enum: enums, + }, + SimpleSchema: spec.SimpleSchema{ + Type: schemaType, + }, + } + case PRIMITIVE, OBJECT: + result.Type = schemaType + result.Enum = enums + } + return result +} + +func getCodeExampleForSummary(summaryName string, dirPath string) ([]byte, error) { + dirEntries, err := os.ReadDir(dirPath) + if err != nil { + return nil, err + } + + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + + fileName := entry.Name() + + if !strings.Contains(fileName, ".json") { + continue + } + + if strings.Contains(fileName, summaryName) { + fullPath := filepath.Join(dirPath, fileName) + + commentInfo, err := os.ReadFile(fullPath) + if err != nil { + return nil, fmt.Errorf("Failed to read code example file %s error: %s ", fullPath, err) + } + + return commentInfo, nil + } + } + + return nil, fmt.Errorf("unable to find code example file for tag %s in the given directory", summaryName) +} diff --git a/vendor/github.com/swaggo/swag/package.go b/vendor/github.com/swaggo/swag/package.go new file mode 100644 index 00000000..6c3129e5 --- /dev/null +++ b/vendor/github.com/swaggo/swag/package.go @@ -0,0 +1,187 @@ +package swag + +import ( + "go/ast" + "go/token" + "reflect" + "strconv" + "strings" +) + +// PackageDefinitions files and definition in a package. +type PackageDefinitions struct { + // files in this package, map key is file's relative path starting package path + Files map[string]*ast.File + + // definitions in this package, map key is typeName + TypeDefinitions map[string]*TypeSpecDef + + // const variables in this package, map key is the name + ConstTable map[string]*ConstVariable + + // const variables in order in this package + OrderedConst []*ConstVariable + + // package name + Name string + + // package path + Path string +} + +// ConstVariableGlobalEvaluator an interface used to evaluate enums across packages +type ConstVariableGlobalEvaluator interface { + EvaluateConstValue(pkg *PackageDefinitions, cv *ConstVariable, recursiveStack map[string]struct{}) (interface{}, ast.Expr) + EvaluateConstValueByName(file *ast.File, pkgPath, constVariableName string, recursiveStack map[string]struct{}) (interface{}, ast.Expr) + FindTypeSpec(typeName string, file *ast.File) *TypeSpecDef +} + +// NewPackageDefinitions new a PackageDefinitions object +func NewPackageDefinitions(name, pkgPath string) *PackageDefinitions { + return &PackageDefinitions{ + Name: name, + Path: pkgPath, + Files: make(map[string]*ast.File), + TypeDefinitions: make(map[string]*TypeSpecDef), + ConstTable: make(map[string]*ConstVariable), + } +} + +// AddFile add a file +func (pkg *PackageDefinitions) AddFile(pkgPath string, file *ast.File) *PackageDefinitions { + pkg.Files[pkgPath] = file + return pkg +} + +// AddTypeSpec add a type spec. +func (pkg *PackageDefinitions) AddTypeSpec(name string, typeSpec *TypeSpecDef) *PackageDefinitions { + pkg.TypeDefinitions[name] = typeSpec + return pkg +} + +// AddConst add a const variable. +func (pkg *PackageDefinitions) AddConst(astFile *ast.File, valueSpec *ast.ValueSpec) *PackageDefinitions { + for i := 0; i < len(valueSpec.Names) && i < len(valueSpec.Values); i++ { + variable := &ConstVariable{ + Name: valueSpec.Names[i], + Type: valueSpec.Type, + Value: valueSpec.Values[i], + Comment: valueSpec.Comment, + File: astFile, + } + pkg.ConstTable[valueSpec.Names[i].Name] = variable + pkg.OrderedConst = append(pkg.OrderedConst, variable) + } + return pkg +} + +func (pkg *PackageDefinitions) evaluateConstValue(file *ast.File, iota int, expr ast.Expr, globalEvaluator ConstVariableGlobalEvaluator, recursiveStack map[string]struct{}) (interface{}, ast.Expr) { + switch valueExpr := expr.(type) { + case *ast.Ident: + if valueExpr.Name == "iota" { + return iota, nil + } + if pkg.ConstTable != nil { + if cv, ok := pkg.ConstTable[valueExpr.Name]; ok { + return globalEvaluator.EvaluateConstValue(pkg, cv, recursiveStack) + } + } + case *ast.SelectorExpr: + pkgIdent, ok := valueExpr.X.(*ast.Ident) + if !ok { + return nil, nil + } + return globalEvaluator.EvaluateConstValueByName(file, pkgIdent.Name, valueExpr.Sel.Name, recursiveStack) + case *ast.BasicLit: + switch valueExpr.Kind { + case token.INT: + // handle underscored number, such as 1_000_000 + if strings.ContainsRune(valueExpr.Value, '_') { + valueExpr.Value = strings.Replace(valueExpr.Value, "_", "", -1) + } + if len(valueExpr.Value) >= 2 && valueExpr.Value[0] == '0' { + var start, base = 2, 8 + switch valueExpr.Value[1] { + case 'x', 'X': + //hex + base = 16 + case 'b', 'B': + //binary + base = 2 + default: + //octet + start = 1 + } + if x, err := strconv.ParseInt(valueExpr.Value[start:], base, 64); err == nil { + return int(x), nil + } else if x, err := strconv.ParseUint(valueExpr.Value[start:], base, 64); err == nil { + return x, nil + } else { + panic(err) + } + } + + //a basic literal integer is int type in default, or must have an explicit converting type in front + if x, err := strconv.ParseInt(valueExpr.Value, 10, 64); err == nil { + return int(x), nil + } else if x, err := strconv.ParseUint(valueExpr.Value, 10, 64); err == nil { + return x, nil + } else { + panic(err) + } + case token.STRING: + if valueExpr.Value[0] == '`' { + return valueExpr.Value[1 : len(valueExpr.Value)-1], nil + } + return EvaluateEscapedString(valueExpr.Value[1 : len(valueExpr.Value)-1]), nil + case token.CHAR: + return EvaluateEscapedChar(valueExpr.Value[1 : len(valueExpr.Value)-1]), nil + } + case *ast.UnaryExpr: + x, evalType := pkg.evaluateConstValue(file, iota, valueExpr.X, globalEvaluator, recursiveStack) + if x == nil { + return x, evalType + } + return EvaluateUnary(x, valueExpr.Op, evalType) + case *ast.BinaryExpr: + x, evalTypex := pkg.evaluateConstValue(file, iota, valueExpr.X, globalEvaluator, recursiveStack) + y, evalTypey := pkg.evaluateConstValue(file, iota, valueExpr.Y, globalEvaluator, recursiveStack) + if x == nil || y == nil { + return nil, nil + } + return EvaluateBinary(x, y, valueExpr.Op, evalTypex, evalTypey) + case *ast.ParenExpr: + return pkg.evaluateConstValue(file, iota, valueExpr.X, globalEvaluator, recursiveStack) + case *ast.CallExpr: + //data conversion + if len(valueExpr.Args) != 1 { + return nil, nil + } + arg := valueExpr.Args[0] + if ident, ok := valueExpr.Fun.(*ast.Ident); ok { + name := ident.Name + if name == "uintptr" { + name = "uint" + } + value, _ := pkg.evaluateConstValue(file, iota, arg, globalEvaluator, recursiveStack) + if IsGolangPrimitiveType(name) { + value = EvaluateDataConversion(value, name) + return value, nil + } else if name == "len" { + return reflect.ValueOf(value).Len(), nil + } + typeDef := globalEvaluator.FindTypeSpec(name, file) + if typeDef == nil { + return nil, nil + } + return value, valueExpr.Fun + } else if selector, ok := valueExpr.Fun.(*ast.SelectorExpr); ok { + typeDef := globalEvaluator.FindTypeSpec(fullTypeName(selector.X.(*ast.Ident).Name, selector.Sel.Name), file) + if typeDef == nil { + return nil, nil + } + return arg, typeDef.TypeSpec.Type + } + } + return nil, nil +} diff --git a/vendor/github.com/swaggo/swag/packages.go b/vendor/github.com/swaggo/swag/packages.go new file mode 100644 index 00000000..20324140 --- /dev/null +++ b/vendor/github.com/swaggo/swag/packages.go @@ -0,0 +1,613 @@ +package swag + +import ( + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "golang.org/x/tools/go/loader" +) + +// PackagesDefinitions map[package import path]*PackageDefinitions. +type PackagesDefinitions struct { + files map[*ast.File]*AstFileInfo + packages map[string]*PackageDefinitions + uniqueDefinitions map[string]*TypeSpecDef + parseDependency ParseFlag + debug Debugger +} + +// NewPackagesDefinitions create object PackagesDefinitions. +func NewPackagesDefinitions() *PackagesDefinitions { + return &PackagesDefinitions{ + files: make(map[*ast.File]*AstFileInfo), + packages: make(map[string]*PackageDefinitions), + uniqueDefinitions: make(map[string]*TypeSpecDef), + } +} + +// ParseFile parse a source file. +func (pkgDefs *PackagesDefinitions) ParseFile(packageDir, path string, src interface{}, flag ParseFlag) error { + // positions are relative to FileSet + fileSet := token.NewFileSet() + astFile, err := goparser.ParseFile(fileSet, path, src, goparser.ParseComments) + if err != nil { + return fmt.Errorf("failed to parse file %s, error:%+v", path, err) + } + return pkgDefs.collectAstFile(fileSet, packageDir, path, astFile, flag) +} + +// collectAstFile collect ast.file. +func (pkgDefs *PackagesDefinitions) collectAstFile(fileSet *token.FileSet, packageDir, path string, astFile *ast.File, flag ParseFlag) error { + if pkgDefs.files == nil { + pkgDefs.files = make(map[*ast.File]*AstFileInfo) + } + + if pkgDefs.packages == nil { + pkgDefs.packages = make(map[string]*PackageDefinitions) + } + + // return without storing the file if we lack a packageDir + if packageDir == "" { + return nil + } + + path, err := filepath.Abs(path) + if err != nil { + return err + } + + dependency, ok := pkgDefs.packages[packageDir] + if ok { + // return without storing the file if it already exists + _, exists := dependency.Files[path] + if exists { + return nil + } + + dependency.Files[path] = astFile + } else { + pkgDefs.packages[packageDir] = NewPackageDefinitions(astFile.Name.Name, packageDir).AddFile(path, astFile) + } + + pkgDefs.files[astFile] = &AstFileInfo{ + FileSet: fileSet, + File: astFile, + Path: path, + PackagePath: packageDir, + ParseFlag: flag, + } + + return nil +} + +// RangeFiles for range the collection of ast.File in alphabetic order. +func (pkgDefs *PackagesDefinitions) RangeFiles(handle func(info *AstFileInfo) error) error { + sortedFiles := make([]*AstFileInfo, 0, len(pkgDefs.files)) + for _, info := range pkgDefs.files { + // ignore package path prefix with 'vendor' or $GOROOT, + // because the router info of api will not be included these files. + if strings.HasPrefix(info.PackagePath, "vendor") || (runtime.GOROOT() != "" && strings.HasPrefix(info.Path, runtime.GOROOT()+string(filepath.Separator))) { + continue + } + sortedFiles = append(sortedFiles, info) + } + + sort.Slice(sortedFiles, func(i, j int) bool { + return strings.Compare(sortedFiles[i].Path, sortedFiles[j].Path) < 0 + }) + + for _, info := range sortedFiles { + err := handle(info) + if err != nil { + return err + } + } + + return nil +} + +// ParseTypes parse types +// @Return parsed definitions. +func (pkgDefs *PackagesDefinitions) ParseTypes() (map[*TypeSpecDef]*Schema, error) { + parsedSchemas := make(map[*TypeSpecDef]*Schema) + for astFile, info := range pkgDefs.files { + pkgDefs.parseTypesFromFile(astFile, info.PackagePath, parsedSchemas) + pkgDefs.parseFunctionScopedTypesFromFile(astFile, info.PackagePath, parsedSchemas) + } + pkgDefs.removeAllNotUniqueTypes() + pkgDefs.evaluateAllConstVariables() + pkgDefs.collectConstEnums(parsedSchemas) + return parsedSchemas, nil +} + +func (pkgDefs *PackagesDefinitions) parseTypesFromFile(astFile *ast.File, packagePath string, parsedSchemas map[*TypeSpecDef]*Schema) { + for _, astDeclaration := range astFile.Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if !ok { + continue + } + if generalDeclaration.Tok == token.TYPE { + for _, astSpec := range generalDeclaration.Specs { + if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { + typeSpecDef := &TypeSpecDef{ + PkgPath: packagePath, + File: astFile, + TypeSpec: typeSpec, + } + + if idt, ok := typeSpec.Type.(*ast.Ident); ok && IsGolangPrimitiveType(idt.Name) && parsedSchemas != nil { + parsedSchemas[typeSpecDef] = &Schema{ + PkgPath: typeSpecDef.PkgPath, + Name: astFile.Name.Name, + Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + } + } + + if pkgDefs.uniqueDefinitions == nil { + pkgDefs.uniqueDefinitions = make(map[string]*TypeSpecDef) + } + + fullName := typeSpecDef.TypeName() + + anotherTypeDef, ok := pkgDefs.uniqueDefinitions[fullName] + if ok { + if anotherTypeDef == nil { + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } else if typeSpecDef.PkgPath != anotherTypeDef.PkgPath { + pkgDefs.uniqueDefinitions[fullName] = nil + anotherTypeDef.NotUnique = true + pkgDefs.uniqueDefinitions[anotherTypeDef.TypeName()] = anotherTypeDef + anotherTypeDef.SetSchemaName() + + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } + } else { + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } + + typeSpecDef.SetSchemaName() + + if pkgDefs.packages[typeSpecDef.PkgPath] == nil { + pkgDefs.packages[typeSpecDef.PkgPath] = NewPackageDefinitions(astFile.Name.Name, typeSpecDef.PkgPath).AddTypeSpec(typeSpecDef.Name(), typeSpecDef) + } else if _, ok = pkgDefs.packages[typeSpecDef.PkgPath].TypeDefinitions[typeSpecDef.Name()]; !ok { + pkgDefs.packages[typeSpecDef.PkgPath].AddTypeSpec(typeSpecDef.Name(), typeSpecDef) + } + } + } + } else if generalDeclaration.Tok == token.CONST { + // collect consts + pkgDefs.collectConstVariables(astFile, packagePath, generalDeclaration) + } + } +} + +func (pkgDefs *PackagesDefinitions) parseFunctionScopedTypesFromFile(astFile *ast.File, packagePath string, parsedSchemas map[*TypeSpecDef]*Schema) { + for _, astDeclaration := range astFile.Decls { + funcDeclaration, ok := astDeclaration.(*ast.FuncDecl) + if ok && funcDeclaration.Body != nil { + functionScopedTypes := make(map[string]*TypeSpecDef) + for _, stmt := range funcDeclaration.Body.List { + if declStmt, ok := (stmt).(*ast.DeclStmt); ok { + if genDecl, ok := (declStmt.Decl).(*ast.GenDecl); ok && genDecl.Tok == token.TYPE { + for _, astSpec := range genDecl.Specs { + if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { + typeSpecDef := &TypeSpecDef{ + PkgPath: packagePath, + File: astFile, + TypeSpec: typeSpec, + ParentSpec: astDeclaration, + } + + if idt, ok := typeSpec.Type.(*ast.Ident); ok && IsGolangPrimitiveType(idt.Name) && parsedSchemas != nil { + parsedSchemas[typeSpecDef] = &Schema{ + PkgPath: typeSpecDef.PkgPath, + Name: astFile.Name.Name, + Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + } + } + + fullName := typeSpecDef.TypeName() + if structType, ok := typeSpecDef.TypeSpec.Type.(*ast.StructType); ok { + for _, field := range structType.Fields.List { + var idt *ast.Ident + var ok bool + switch field.Type.(type) { + case *ast.Ident: + idt, ok = field.Type.(*ast.Ident) + case *ast.StarExpr: + idt, ok = field.Type.(*ast.StarExpr).X.(*ast.Ident) + case *ast.ArrayType: + idt, ok = field.Type.(*ast.ArrayType).Elt.(*ast.Ident) + } + if ok && !IsGolangPrimitiveType(idt.Name) { + if functype, ok := functionScopedTypes[idt.Name]; ok { + idt.Name = functype.TypeName() + } + } + } + } + + if pkgDefs.uniqueDefinitions == nil { + pkgDefs.uniqueDefinitions = make(map[string]*TypeSpecDef) + } + + anotherTypeDef, ok := pkgDefs.uniqueDefinitions[fullName] + if ok { + if anotherTypeDef == nil { + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } else if typeSpecDef.PkgPath != anotherTypeDef.PkgPath { + pkgDefs.uniqueDefinitions[fullName] = nil + anotherTypeDef.NotUnique = true + pkgDefs.uniqueDefinitions[anotherTypeDef.TypeName()] = anotherTypeDef + anotherTypeDef.SetSchemaName() + + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } + } else { + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + functionScopedTypes[typeSpec.Name.Name] = typeSpecDef + } + + typeSpecDef.SetSchemaName() + + if pkgDefs.packages[typeSpecDef.PkgPath] == nil { + pkgDefs.packages[typeSpecDef.PkgPath] = NewPackageDefinitions(astFile.Name.Name, typeSpecDef.PkgPath).AddTypeSpec(fullName, typeSpecDef) + } else if _, ok = pkgDefs.packages[typeSpecDef.PkgPath].TypeDefinitions[fullName]; !ok { + pkgDefs.packages[typeSpecDef.PkgPath].AddTypeSpec(fullName, typeSpecDef) + } + } + } + + } + } + } + } + } +} + +func (pkgDefs *PackagesDefinitions) collectConstVariables(astFile *ast.File, packagePath string, generalDeclaration *ast.GenDecl) { + pkg, ok := pkgDefs.packages[packagePath] + if !ok { + pkg = NewPackageDefinitions(astFile.Name.Name, packagePath) + pkgDefs.packages[packagePath] = pkg + } + + var lastValueSpec *ast.ValueSpec + for _, astSpec := range generalDeclaration.Specs { + valueSpec, ok := astSpec.(*ast.ValueSpec) + if !ok { + continue + } + if len(valueSpec.Names) == 1 && len(valueSpec.Values) == 1 { + lastValueSpec = valueSpec + } else if len(valueSpec.Names) == 1 && len(valueSpec.Values) == 0 && valueSpec.Type == nil && lastValueSpec != nil { + valueSpec.Type = lastValueSpec.Type + valueSpec.Values = lastValueSpec.Values + } + pkg.AddConst(astFile, valueSpec) + } +} + +func (pkgDefs *PackagesDefinitions) evaluateAllConstVariables() { + for _, pkg := range pkgDefs.packages { + for _, constVar := range pkg.OrderedConst { + pkgDefs.EvaluateConstValue(pkg, constVar, nil) + } + } +} + +// EvaluateConstValue evaluate a const variable. +func (pkgDefs *PackagesDefinitions) EvaluateConstValue(pkg *PackageDefinitions, cv *ConstVariable, recursiveStack map[string]struct{}) (interface{}, ast.Expr) { + if expr, ok := cv.Value.(ast.Expr); ok { + defer func() { + if err := recover(); err != nil { + if fi, ok := pkgDefs.files[cv.File]; ok { + pos := fi.FileSet.Position(cv.Name.NamePos) + pkgDefs.debug.Printf("warning: failed to evaluate const %s at %s:%d:%d, %v", cv.Name.Name, fi.Path, pos.Line, pos.Column, err) + } + } + }() + if recursiveStack == nil { + recursiveStack = make(map[string]struct{}) + } + fullConstName := fullTypeName(pkg.Path, cv.Name.Name) + if _, ok = recursiveStack[fullConstName]; ok { + return nil, nil + } + recursiveStack[fullConstName] = struct{}{} + + value, evalType := pkg.evaluateConstValue(cv.File, cv.Name.Obj.Data.(int), expr, pkgDefs, recursiveStack) + if cv.Type == nil && evalType != nil { + cv.Type = evalType + } + if value != nil { + cv.Value = value + } + return value, cv.Type + } + return cv.Value, cv.Type +} + +// EvaluateConstValueByName evaluate a const variable by name. +func (pkgDefs *PackagesDefinitions) EvaluateConstValueByName(file *ast.File, pkgName, constVariableName string, recursiveStack map[string]struct{}) (interface{}, ast.Expr) { + matchedPkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports(pkgName, file) + for _, pkgPath := range matchedPkgPaths { + if pkg, ok := pkgDefs.packages[pkgPath]; ok { + if cv, ok := pkg.ConstTable[constVariableName]; ok { + return pkgDefs.EvaluateConstValue(pkg, cv, recursiveStack) + } + } + } + if pkgDefs.parseDependency > 0 { + for _, pkgPath := range externalPkgPaths { + if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { + if pkg, ok := pkgDefs.packages[pkgPath]; ok { + if cv, ok := pkg.ConstTable[constVariableName]; ok { + return pkgDefs.EvaluateConstValue(pkg, cv, recursiveStack) + } + } + } + } + } + return nil, nil +} + +func (pkgDefs *PackagesDefinitions) collectConstEnums(parsedSchemas map[*TypeSpecDef]*Schema) { + for _, pkg := range pkgDefs.packages { + for _, constVar := range pkg.OrderedConst { + if constVar.Type == nil { + continue + } + ident, ok := constVar.Type.(*ast.Ident) + if !ok || IsGolangPrimitiveType(ident.Name) { + continue + } + typeDef, ok := pkg.TypeDefinitions[ident.Name] + if !ok { + continue + } + + // delete it from parsed schemas, and will parse it again + if _, ok = parsedSchemas[typeDef]; ok { + delete(parsedSchemas, typeDef) + } + + if typeDef.Enums == nil { + typeDef.Enums = make([]EnumValue, 0) + } + + name := constVar.Name.Name + if _, ok = constVar.Value.(ast.Expr); ok { + continue + } + + enumValue := EnumValue{ + key: name, + Value: constVar.Value, + } + if constVar.Comment != nil && len(constVar.Comment.List) > 0 { + enumValue.Comment = constVar.Comment.List[0].Text + enumValue.Comment = strings.TrimPrefix(enumValue.Comment, "//") + enumValue.Comment = strings.TrimPrefix(enumValue.Comment, "/*") + enumValue.Comment = strings.TrimSuffix(enumValue.Comment, "*/") + enumValue.Comment = strings.TrimSpace(enumValue.Comment) + } + typeDef.Enums = append(typeDef.Enums, enumValue) + } + } +} + +func (pkgDefs *PackagesDefinitions) removeAllNotUniqueTypes() { + for key, ud := range pkgDefs.uniqueDefinitions { + if ud == nil { + delete(pkgDefs.uniqueDefinitions, key) + } + } +} + +func (pkgDefs *PackagesDefinitions) findTypeSpec(pkgPath string, typeName string) *TypeSpecDef { + if pkgDefs.packages == nil { + return nil + } + + pd, found := pkgDefs.packages[pkgPath] + if found { + typeSpec, ok := pd.TypeDefinitions[typeName] + if ok { + return typeSpec + } + } + + return nil +} + +func (pkgDefs *PackagesDefinitions) loadExternalPackage(importPath string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + conf := loader.Config{ + ParserMode: goparser.ParseComments, + Cwd: cwd, + } + + conf.Import(importPath) + + loaderProgram, err := conf.Load() + if err != nil { + return err + } + + for _, info := range loaderProgram.AllPackages { + pkgPath := strings.TrimPrefix(info.Pkg.Path(), "vendor/") + for _, astFile := range info.Files { + pkgDefs.parseTypesFromFile(astFile, pkgPath, nil) + } + } + + return nil +} + +// findPackagePathFromImports finds out the package path of a package via ranging imports of an ast.File +// @pkg the name of the target package +// @file current ast.File in which to search imports +// @return the package paths of a package of @pkg. +func (pkgDefs *PackagesDefinitions) findPackagePathFromImports(pkg string, file *ast.File) (matchedPkgPaths, externalPkgPaths []string) { + if file == nil { + return + } + + if strings.ContainsRune(pkg, '.') { + pkg = strings.Split(pkg, ".")[0] + } + + matchLastPathPart := func(pkgPath string) bool { + paths := strings.Split(pkgPath, "/") + return paths[len(paths)-1] == pkg + } + + // prior to match named package + for _, imp := range file.Imports { + path := strings.Trim(imp.Path.Value, `"`) + if imp.Name != nil { + if imp.Name.Name == pkg { + // if name match, break loop and return + _, ok := pkgDefs.packages[path] + if ok { + matchedPkgPaths = []string{path} + externalPkgPaths = nil + } else { + externalPkgPaths = []string{path} + matchedPkgPaths = nil + } + break + } else if imp.Name.Name == "_" && len(pkg) > 0 { + // for unused types + pd, ok := pkgDefs.packages[path] + if ok { + if pd.Name == pkg { + matchedPkgPaths = append(matchedPkgPaths, path) + } + } else if matchLastPathPart(path) { + externalPkgPaths = append(externalPkgPaths, path) + } + } else if imp.Name.Name == "." && len(pkg) == 0 { + _, ok := pkgDefs.packages[path] + if ok { + matchedPkgPaths = append(matchedPkgPaths, path) + } else if len(pkg) == 0 || matchLastPathPart(path) { + externalPkgPaths = append(externalPkgPaths, path) + } + } + } else if pkgDefs.packages != nil && len(pkg) > 0 { + pd, ok := pkgDefs.packages[path] + if ok { + if pd.Name == pkg { + matchedPkgPaths = append(matchedPkgPaths, path) + } + } else if matchLastPathPart(path) { + externalPkgPaths = append(externalPkgPaths, path) + } + } + } + + if len(pkg) == 0 || file.Name.Name == pkg { + matchedPkgPaths = append(matchedPkgPaths, pkgDefs.files[file].PackagePath) + } + + return +} + +func (pkgDefs *PackagesDefinitions) findTypeSpecFromPackagePaths(matchedPkgPaths, externalPkgPaths []string, name string) (typeDef *TypeSpecDef) { + if pkgDefs.parseDependency > 0 { + for _, pkgPath := range externalPkgPaths { + if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { + typeDef = pkgDefs.findTypeSpec(pkgPath, name) + if typeDef != nil { + return typeDef + } + } + } + } + + for _, pkgPath := range matchedPkgPaths { + typeDef = pkgDefs.findTypeSpec(pkgPath, name) + if typeDef != nil { + return typeDef + } + } + + return typeDef +} + +// FindTypeSpec finds out TypeSpecDef of a type by typeName +// @typeName the name of the target type, if it starts with a package name, find its own package path from imports on top of @file +// @file the ast.file in which @typeName is used +// @pkgPath the package path of @file. +func (pkgDefs *PackagesDefinitions) FindTypeSpec(typeName string, file *ast.File) *TypeSpecDef { + if IsGolangPrimitiveType(typeName) { + return nil + } + + if file == nil { // for test + return pkgDefs.uniqueDefinitions[typeName] + } + + parts := strings.Split(strings.Split(typeName, "[")[0], ".") + if len(parts) > 1 { + pkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports(parts[0], file) + if len(externalPkgPaths) == 0 || pkgDefs.parseDependency == ParseNone { + typeDef, ok := pkgDefs.uniqueDefinitions[typeName] + if ok { + return typeDef + } + } + typeDef := pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, parts[1]) + return pkgDefs.parametrizeGenericType(file, typeDef, typeName) + } + + typeDef, ok := pkgDefs.uniqueDefinitions[fullTypeName(file.Name.Name, typeName)] + if ok { + return typeDef + } + + name := parts[0] + typeDef, ok = pkgDefs.uniqueDefinitions[fullTypeName(file.Name.Name, name)] + if !ok { + pkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports("", file) + typeDef = pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, name) + } + + if typeDef != nil { + return pkgDefs.parametrizeGenericType(file, typeDef, typeName) + } + + // in case that comment //@name renamed the type with a name without a dot + for k, v := range pkgDefs.uniqueDefinitions { + if v == nil { + pkgDefs.debug.Printf("%s TypeSpecDef is nil", k) + continue + } + if v.SchemaName == typeName { + return v + } + } + + return nil +} diff --git a/vendor/github.com/swaggo/swag/parser.go b/vendor/github.com/swaggo/swag/parser.go new file mode 100644 index 00000000..cd2a46c6 --- /dev/null +++ b/vendor/github.com/swaggo/swag/parser.go @@ -0,0 +1,1920 @@ +package swag + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/build" + goparser "go/parser" + "go/token" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/KyleBanks/depth" + "github.com/go-openapi/spec" +) + +const ( + // CamelCase indicates using CamelCase strategy for struct field. + CamelCase = "camelcase" + + // PascalCase indicates using PascalCase strategy for struct field. + PascalCase = "pascalcase" + + // SnakeCase indicates using SnakeCase strategy for struct field. + SnakeCase = "snakecase" + + idAttr = "@id" + acceptAttr = "@accept" + produceAttr = "@produce" + paramAttr = "@param" + successAttr = "@success" + failureAttr = "@failure" + responseAttr = "@response" + headerAttr = "@header" + tagsAttr = "@tags" + routerAttr = "@router" + deprecatedRouterAttr = "@deprecatedrouter" + summaryAttr = "@summary" + deprecatedAttr = "@deprecated" + securityAttr = "@security" + titleAttr = "@title" + conNameAttr = "@contact.name" + conURLAttr = "@contact.url" + conEmailAttr = "@contact.email" + licNameAttr = "@license.name" + licURLAttr = "@license.url" + versionAttr = "@version" + descriptionAttr = "@description" + descriptionMarkdownAttr = "@description.markdown" + secBasicAttr = "@securitydefinitions.basic" + secAPIKeyAttr = "@securitydefinitions.apikey" + secApplicationAttr = "@securitydefinitions.oauth2.application" + secImplicitAttr = "@securitydefinitions.oauth2.implicit" + secPasswordAttr = "@securitydefinitions.oauth2.password" + secAccessCodeAttr = "@securitydefinitions.oauth2.accesscode" + tosAttr = "@termsofservice" + extDocsDescAttr = "@externaldocs.description" + extDocsURLAttr = "@externaldocs.url" + xCodeSamplesAttr = "@x-codesamples" + scopeAttrPrefix = "@scope." + stateAttr = "@state" +) + +// ParseFlag determine what to parse +type ParseFlag int + +const ( + // ParseNone parse nothing + ParseNone ParseFlag = 0x00 + // ParseModels parse models + ParseModels = 0x01 + // ParseOperations parse operations + ParseOperations = 0x02 + // ParseAll parse operations and models + ParseAll = ParseOperations | ParseModels +) + +var ( + // ErrRecursiveParseStruct recursively parsing struct. + ErrRecursiveParseStruct = errors.New("recursively parsing struct") + + // ErrFuncTypeField field type is func. + ErrFuncTypeField = errors.New("field type is func") + + // ErrFailedConvertPrimitiveType Failed to convert for swag to interpretable type. + ErrFailedConvertPrimitiveType = errors.New("swag property: failed convert primitive type") + + // ErrSkippedField .swaggo specifies field should be skipped. + ErrSkippedField = errors.New("field is skipped by global overrides") +) + +var allMethod = map[string]struct{}{ + http.MethodGet: {}, + http.MethodPut: {}, + http.MethodPost: {}, + http.MethodDelete: {}, + http.MethodOptions: {}, + http.MethodHead: {}, + http.MethodPatch: {}, +} + +// Parser implements a parser for Go source files. +type Parser struct { + // swagger represents the root document object for the API specification + swagger *spec.Swagger + + // packages store entities of APIs, definitions, file, package path etc. and their relations + packages *PackagesDefinitions + + // parsedSchemas store schemas which have been parsed from ast.TypeSpec + parsedSchemas map[*TypeSpecDef]*Schema + + // outputSchemas store schemas which will be export to swagger + outputSchemas map[*TypeSpecDef]*Schema + + // PropNamingStrategy naming strategy + PropNamingStrategy string + + // ParseVendor parse vendor folder + ParseVendor bool + + // ParseDependencies whether swag should be parse outside dependency folder: 0 none, 1 models, 2 operations, 3 all + ParseDependency ParseFlag + + // ParseInternal whether swag should parse internal packages + ParseInternal bool + + // Strict whether swag should error or warn when it detects cases which are most likely user errors + Strict bool + + // RequiredByDefault set validation required for all fields by default + RequiredByDefault bool + + // structStack stores full names of the structures that were already parsed or are being parsed now + structStack []*TypeSpecDef + + // markdownFileDir holds the path to the folder, where markdown files are stored + markdownFileDir string + + // codeExampleFilesDir holds path to the folder, where code example files are stored + codeExampleFilesDir string + + // collectionFormatInQuery set the default collectionFormat otherwise then 'csv' for array in query params + collectionFormatInQuery string + + // excludes excludes dirs and files in SearchDir + excludes map[string]struct{} + + // packagePrefix is a list of package path prefixes, packages that do not + // match any one of them will be excluded when searching. + packagePrefix []string + + // tells parser to include only specific extension + parseExtension string + + // debugging output goes here + debug Debugger + + // fieldParserFactory create FieldParser + fieldParserFactory FieldParserFactory + + // Overrides allows global replacements of types. A blank replacement will be skipped. + Overrides map[string]string + + // parseGoList whether swag use go list to parse dependency + parseGoList bool + + // tags to filter the APIs after + tags map[string]struct{} + + // HostState is the state of the host + HostState string + + // ParseFuncBody whether swag should parse api info inside of funcs + ParseFuncBody bool +} + +// FieldParserFactory create FieldParser. +type FieldParserFactory func(ps *Parser, field *ast.Field) FieldParser + +// FieldParser parse struct field. +type FieldParser interface { + ShouldSkip() bool + FieldNames() ([]string, error) + FormName() string + HeaderName() string + PathName() string + CustomSchema() (*spec.Schema, error) + ComplementSchema(schema *spec.Schema) error + IsRequired() (bool, error) +} + +// Debugger is the interface that wraps the basic Printf method. +type Debugger interface { + Printf(format string, v ...interface{}) +} + +// New creates a new Parser with default properties. +func New(options ...func(*Parser)) *Parser { + parser := &Parser{ + swagger: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Contact: &spec.ContactInfo{}, + License: nil, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, + }, + }, + Paths: &spec.Paths{ + Paths: make(map[string]spec.PathItem), + VendorExtensible: spec.VendorExtensible{ + Extensions: nil, + }, + }, + Definitions: make(map[string]spec.Schema), + SecurityDefinitions: make(map[string]*spec.SecurityScheme), + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: nil, + }, + }, + packages: NewPackagesDefinitions(), + debug: log.New(os.Stdout, "", log.LstdFlags), + parsedSchemas: make(map[*TypeSpecDef]*Schema), + outputSchemas: make(map[*TypeSpecDef]*Schema), + excludes: make(map[string]struct{}), + tags: make(map[string]struct{}), + fieldParserFactory: newTagBaseFieldParser, + Overrides: make(map[string]string), + } + + for _, option := range options { + option(parser) + } + + parser.packages.debug = parser.debug + + return parser +} + +// SetParseDependency sets whether to parse the dependent packages. +func SetParseDependency(parseDependency int) func(*Parser) { + return func(p *Parser) { + p.ParseDependency = ParseFlag(parseDependency) + if p.packages != nil { + p.packages.parseDependency = p.ParseDependency + } + } +} + +// SetMarkdownFileDirectory sets the directory to search for markdown files. +func SetMarkdownFileDirectory(directoryPath string) func(*Parser) { + return func(p *Parser) { + p.markdownFileDir = directoryPath + } +} + +// SetCodeExamplesDirectory sets the directory to search for code example files. +func SetCodeExamplesDirectory(directoryPath string) func(*Parser) { + return func(p *Parser) { + p.codeExampleFilesDir = directoryPath + } +} + +// SetExcludedDirsAndFiles sets directories and files to be excluded when searching. +func SetExcludedDirsAndFiles(excludes string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(excludes, ",") { + f = strings.TrimSpace(f) + if f != "" { + f = filepath.Clean(f) + p.excludes[f] = struct{}{} + } + } + } +} + +// SetPackagePrefix sets a list of package path prefixes from a comma-separated +// string, packages that do not match any one of them will be excluded when +// searching. +func SetPackagePrefix(packagePrefix string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(packagePrefix, ",") { + f = strings.TrimSpace(f) + if f != "" { + p.packagePrefix = append(p.packagePrefix, f) + } + } + } +} + +// SetTags sets the tags to be included +func SetTags(include string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(include, ",") { + f = strings.TrimSpace(f) + if f != "" { + p.tags[f] = struct{}{} + } + } + } +} + +// SetParseExtension parses only those operations which match given extension +func SetParseExtension(parseExtension string) func(*Parser) { + return func(p *Parser) { + p.parseExtension = parseExtension + } +} + +// SetStrict sets whether swag should error or warn when it detects cases which are most likely user errors. +func SetStrict(strict bool) func(*Parser) { + return func(p *Parser) { + p.Strict = strict + } +} + +// SetDebugger allows the use of user-defined implementations. +func SetDebugger(logger Debugger) func(parser *Parser) { + return func(p *Parser) { + if logger != nil { + p.debug = logger + } + } +} + +// SetFieldParserFactory allows the use of user-defined implementations. +func SetFieldParserFactory(factory FieldParserFactory) func(parser *Parser) { + return func(p *Parser) { + p.fieldParserFactory = factory + } +} + +// SetOverrides allows the use of user-defined global type overrides. +func SetOverrides(overrides map[string]string) func(parser *Parser) { + return func(p *Parser) { + for k, v := range overrides { + p.Overrides[k] = v + } + } +} + +// SetCollectionFormat set default collection format +func SetCollectionFormat(collectionFormat string) func(*Parser) { + return func(p *Parser) { + p.collectionFormatInQuery = collectionFormat + } +} + +// ParseUsingGoList sets whether swag use go list to parse dependency +func ParseUsingGoList(enabled bool) func(parser *Parser) { + return func(p *Parser) { + p.parseGoList = enabled + } +} + +// ParseAPI parses general api info for given searchDir and mainAPIFile. +func (parser *Parser) ParseAPI(searchDir string, mainAPIFile string, parseDepth int) error { + return parser.ParseAPIMultiSearchDir([]string{searchDir}, mainAPIFile, parseDepth) +} + +// skipPackageByPrefix returns true the given pkgpath does not match +// any user-defined package path prefixes. +func (parser *Parser) skipPackageByPrefix(pkgpath string) bool { + if len(parser.packagePrefix) == 0 { + return false + } + for _, prefix := range parser.packagePrefix { + if strings.HasPrefix(pkgpath, prefix) { + return false + } + } + return true +} + +// ParseAPIMultiSearchDir is like ParseAPI but for multiple search dirs. +func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile string, parseDepth int) error { + for _, searchDir := range searchDirs { + parser.debug.Printf("Generate general API Info, search dir:%s", searchDir) + + packageDir, err := getPkgName(searchDir) + if err != nil { + parser.debug.Printf("warning: failed to get package name in dir: %s, error: %s", searchDir, err.Error()) + } + + err = parser.getAllGoFileInfo(packageDir, searchDir) + if err != nil { + return err + } + } + + absMainAPIFilePath, err := filepath.Abs(filepath.Join(searchDirs[0], mainAPIFile)) + if err != nil { + return err + } + + // Use 'go list' command instead of depth.Resolve() + if parser.ParseDependency > 0 { + if parser.parseGoList { + pkgs, err := listPackages(context.Background(), filepath.Dir(absMainAPIFilePath), nil, "-deps") + if err != nil { + return fmt.Errorf("pkg %s cannot find all dependencies, %s", filepath.Dir(absMainAPIFilePath), err) + } + + length := len(pkgs) + for i := 0; i < length; i++ { + err := parser.getAllGoFileInfoFromDepsByList(pkgs[i], parser.ParseDependency) + if err != nil { + return err + } + } + } else { + var t depth.Tree + t.ResolveInternal = true + t.MaxDepth = parseDepth + + pkgName, err := getPkgName(filepath.Dir(absMainAPIFilePath)) + if err != nil { + return err + } + + err = t.Resolve(pkgName) + if err != nil { + return fmt.Errorf("pkg %s cannot find all dependencies, %s", pkgName, err) + } + for i := 0; i < len(t.Root.Deps); i++ { + err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i], parser.ParseDependency) + if err != nil { + return err + } + } + } + } + + err = parser.ParseGeneralAPIInfo(absMainAPIFilePath) + if err != nil { + return err + } + + parser.parsedSchemas, err = parser.packages.ParseTypes() + if err != nil { + return err + } + + err = parser.packages.RangeFiles(parser.ParseRouterAPIInfo) + if err != nil { + return err + } + + return parser.checkOperationIDUniqueness() +} + +func getPkgName(searchDir string) (string, error) { + cmd := exec.Command("go", "list", "-f={{.ImportPath}}") + cmd.Dir = searchDir + + var stdout, stderr strings.Builder + + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("execute go list command, %s, stdout:%s, stderr:%s", err, stdout.String(), stderr.String()) + } + + outStr, _ := stdout.String(), stderr.String() + + if outStr[0] == '_' { // will shown like _/{GOPATH}/src/{YOUR_PACKAGE} when NOT enable GO MODULE. + outStr = strings.TrimPrefix(outStr, "_"+build.Default.GOPATH+"/src/") + } + + f := strings.Split(outStr, "\n") + + outStr = f[0] + + return outStr, nil +} + +// ParseGeneralAPIInfo parses general api info for given mainAPIFile path. +func (parser *Parser) ParseGeneralAPIInfo(mainAPIFile string) error { + fileTree, err := goparser.ParseFile(token.NewFileSet(), mainAPIFile, nil, goparser.ParseComments) + if err != nil { + return fmt.Errorf("cannot parse source files %s: %s", mainAPIFile, err) + } + + parser.swagger.Swagger = "2.0" + + for _, comment := range fileTree.Comments { + comments := strings.Split(comment.Text(), "\n") + if !isGeneralAPIComment(comments) { + continue + } + + err = parseGeneralAPIInfo(parser, comments) + if err != nil { + return err + } + } + + return nil +} + +func parseGeneralAPIInfo(parser *Parser, comments []string) error { + previousAttribute := "" + var tag *spec.Tag + // parsing classic meta data model + for line := 0; line < len(comments); line++ { + commentLine := comments[line] + commentLine = strings.TrimSpace(commentLine) + if len(commentLine) == 0 { + continue + } + fields := FieldsByAnySpace(commentLine, 2) + + attribute := fields[0] + var value string + if len(fields) > 1 { + value = fields[1] + } + + switch attr := strings.ToLower(attribute); attr { + case versionAttr, titleAttr, tosAttr, licNameAttr, licURLAttr, conNameAttr, conURLAttr, conEmailAttr: + setSwaggerInfo(parser.swagger, attr, value) + case descriptionAttr: + if previousAttribute == attribute { + parser.swagger.Info.Description += "\n" + value + + continue + } + + setSwaggerInfo(parser.swagger, attr, value) + case descriptionMarkdownAttr: + commentInfo, err := getMarkdownForTag("api", parser.markdownFileDir) + if err != nil { + return err + } + + setSwaggerInfo(parser.swagger, descriptionAttr, string(commentInfo)) + + case "@host": + parser.swagger.Host = value + case "@hoststate": + fields = FieldsByAnySpace(commentLine, 3) + if len(fields) != 3 { + return fmt.Errorf("%s needs 3 arguments", attribute) + } + if parser.HostState == fields[1] { + parser.swagger.Host = fields[2] + } + case "@basepath": + parser.swagger.BasePath = value + + case acceptAttr: + err := parser.ParseAcceptComment(value) + if err != nil { + return err + } + case produceAttr: + err := parser.ParseProduceComment(value) + if err != nil { + return err + } + case "@schemes": + parser.swagger.Schemes = strings.Split(value, " ") + case "@tag.name": + if parser.matchTag(value) { + parser.swagger.Tags = append(parser.swagger.Tags, spec.Tag{ + TagProps: spec.TagProps{ + Name: value, + }, + }) + tag = &parser.swagger.Tags[len(parser.swagger.Tags)-1] + } else { + tag = nil + } + case "@tag.description": + if tag != nil { + tag.TagProps.Description = value + } + case "@tag.description.markdown": + if tag != nil { + commentInfo, err := getMarkdownForTag(tag.TagProps.Name, parser.markdownFileDir) + if err != nil { + return err + } + + tag.TagProps.Description = string(commentInfo) + } + case "@tag.docs.url": + if tag != nil { + tag.TagProps.ExternalDocs = &spec.ExternalDocumentation{ + URL: value, + } + } + case "@tag.docs.description": + if tag != nil { + if tag.TagProps.ExternalDocs == nil { + return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) + } + + tag.TagProps.ExternalDocs.Description = value + } + case secBasicAttr, secAPIKeyAttr, secApplicationAttr, secImplicitAttr, secPasswordAttr, secAccessCodeAttr: + scheme, err := parseSecAttributes(attribute, comments, &line) + if err != nil { + return err + } + + parser.swagger.SecurityDefinitions[value] = scheme + + case securityAttr: + parser.swagger.Security = append(parser.swagger.Security, parseSecurity(value)) + + case "@query.collection.format": + parser.collectionFormatInQuery = TransToValidCollectionFormat(value) + + case extDocsDescAttr, extDocsURLAttr: + if parser.swagger.ExternalDocs == nil { + parser.swagger.ExternalDocs = new(spec.ExternalDocumentation) + } + switch attr { + case extDocsDescAttr: + parser.swagger.ExternalDocs.Description = value + case extDocsURLAttr: + parser.swagger.ExternalDocs.URL = value + } + + default: + if strings.HasPrefix(attribute, "@x-") { + extensionName := attribute[1:] + + extExistsInSecurityDef := false + // for each security definition + for _, v := range parser.swagger.SecurityDefinitions { + // check if extension exists + _, extExistsInSecurityDef = v.VendorExtensible.Extensions.GetString(extensionName) + // if it exists in at least one, then we stop iterating + if extExistsInSecurityDef { + break + } + } + + // if it is present on security def, don't add it again + if extExistsInSecurityDef { + break + } + + if len(value) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON interface{} + err := json.Unmarshal([]byte(value), &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value", attribute) + } + + if strings.Contains(extensionName, "logo") { + parser.swagger.Info.Extensions.Add(extensionName, valueJSON) + } else { + if parser.swagger.Extensions == nil { + parser.swagger.Extensions = make(map[string]interface{}) + } + + parser.swagger.Extensions[attribute[1:]] = valueJSON + } + } + } + + previousAttribute = attribute + } + + return nil +} + +func setSwaggerInfo(swagger *spec.Swagger, attribute, value string) { + switch attribute { + case versionAttr: + swagger.Info.Version = value + case titleAttr: + swagger.Info.Title = value + case tosAttr: + swagger.Info.TermsOfService = value + case descriptionAttr: + swagger.Info.Description = value + case conNameAttr: + swagger.Info.Contact.Name = value + case conEmailAttr: + swagger.Info.Contact.Email = value + case conURLAttr: + swagger.Info.Contact.URL = value + case licNameAttr: + swagger.Info.License = initIfEmpty(swagger.Info.License) + swagger.Info.License.Name = value + case licURLAttr: + swagger.Info.License = initIfEmpty(swagger.Info.License) + swagger.Info.License.URL = value + } +} + +func parseSecAttributes(context string, lines []string, index *int) (*spec.SecurityScheme, error) { + const ( + in = "@in" + name = "@name" + descriptionAttr = "@description" + tokenURL = "@tokenurl" + authorizationURL = "@authorizationurl" + ) + + var search []string + + attribute := strings.ToLower(FieldsByAnySpace(lines[*index], 2)[0]) + switch attribute { + case secBasicAttr: + return spec.BasicAuth(), nil + case secAPIKeyAttr: + search = []string{in, name} + case secApplicationAttr, secPasswordAttr: + search = []string{tokenURL} + case secImplicitAttr: + search = []string{authorizationURL} + case secAccessCodeAttr: + search = []string{tokenURL, authorizationURL} + } + + // For the first line we get the attributes in the context parameter, so we skip to the next one + *index++ + + attrMap, scopes := make(map[string]string), make(map[string]string) + extensions, description := make(map[string]interface{}), "" + +loopline: + for ; *index < len(lines); *index++ { + v := strings.TrimSpace(lines[*index]) + if len(v) == 0 { + continue + } + + fields := FieldsByAnySpace(v, 2) + securityAttr := strings.ToLower(fields[0]) + var value string + if len(fields) > 1 { + value = fields[1] + } + + for _, findterm := range search { + if securityAttr == findterm { + attrMap[securityAttr] = value + continue loopline + } + } + + if isExists, err := isExistsScope(securityAttr); err != nil { + return nil, err + } else if isExists { + scopes[securityAttr[len(scopeAttrPrefix):]] = value + continue + } + + if strings.HasPrefix(securityAttr, "@x-") { + // Add the custom attribute without the @ + extensions[securityAttr[1:]] = value + continue + } + + // Not mandatory field + if securityAttr == descriptionAttr { + if description != "" { + description += "\n" + } + description += value + } + + // next securityDefinitions + if strings.Index(securityAttr, "@securitydefinitions.") == 0 { + // Go back to the previous line and break + *index-- + + break + } + } + + if len(attrMap) != len(search) { + return nil, fmt.Errorf("%s is %v required", context, search) + } + + var scheme *spec.SecurityScheme + + switch attribute { + case secAPIKeyAttr: + scheme = spec.APIKeyAuth(attrMap[name], attrMap[in]) + case secApplicationAttr: + scheme = spec.OAuth2Application(attrMap[tokenURL]) + case secImplicitAttr: + scheme = spec.OAuth2Implicit(attrMap[authorizationURL]) + case secPasswordAttr: + scheme = spec.OAuth2Password(attrMap[tokenURL]) + case secAccessCodeAttr: + scheme = spec.OAuth2AccessToken(attrMap[authorizationURL], attrMap[tokenURL]) + } + + scheme.Description = description + + for extKey, extValue := range extensions { + scheme.AddExtension(extKey, extValue) + } + + for scope, scopeDescription := range scopes { + scheme.AddScope(scope, scopeDescription) + } + + return scheme, nil +} + +func parseSecurity(commentLine string) map[string][]string { + securityMap := make(map[string][]string) + + for _, securityOption := range strings.Split(commentLine, "||") { + securityOption = strings.TrimSpace(securityOption) + + left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") + + if !(left == -1 && right == -1) { + scopes := securityOption[left+1 : right] + + var options []string + + for _, scope := range strings.Split(scopes, ",") { + options = append(options, strings.TrimSpace(scope)) + } + + securityKey := securityOption[0:left] + securityMap[securityKey] = append(securityMap[securityKey], options...) + } else { + securityKey := strings.TrimSpace(securityOption) + securityMap[securityKey] = []string{} + } + } + + return securityMap +} + +func initIfEmpty(license *spec.License) *spec.License { + if license == nil { + return new(spec.License) + } + + return license +} + +// ParseAcceptComment parses comment for given `accept` comment string. +func (parser *Parser) ParseAcceptComment(commentLine string) error { + return parseMimeTypeList(commentLine, &parser.swagger.Consumes, "%v accept type can't be accepted") +} + +// ParseProduceComment parses comment for given `produce` comment string. +func (parser *Parser) ParseProduceComment(commentLine string) error { + return parseMimeTypeList(commentLine, &parser.swagger.Produces, "%v produce type can't be accepted") +} + +func isGeneralAPIComment(comments []string) bool { + for _, commentLine := range comments { + commentLine = strings.TrimSpace(commentLine) + if len(commentLine) == 0 { + continue + } + attribute := strings.ToLower(FieldsByAnySpace(commentLine, 2)[0]) + switch attribute { + // The @summary, @router, @success, @failure annotation belongs to Operation + case summaryAttr, routerAttr, successAttr, failureAttr, responseAttr: + return false + } + } + + return true +} + +func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { + if tagName == "" { + // this happens when parsing the @description.markdown attribute + // it will be called properly another time with tagName="api" + // so we can safely return an empty byte slice here + return make([]byte, 0), nil + } + + dirEntries, err := os.ReadDir(dirPath) + if err != nil { + return nil, err + } + + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + + fileName := entry.Name() + + expectedFileName := tagName + if !strings.HasSuffix(tagName, ".md") { + expectedFileName = tagName + ".md" + } + + if fileName == expectedFileName { + fullPath := filepath.Join(dirPath, fileName) + + commentInfo, err := os.ReadFile(fullPath) + if err != nil { + return nil, fmt.Errorf("Failed to read markdown file %s error: %s ", fullPath, err) + } + + return commentInfo, nil + } + } + + return nil, fmt.Errorf("Unable to find markdown file for tag %s in the given directory", tagName) +} + +func isExistsScope(scope string) (bool, error) { + s := strings.Fields(scope) + for _, v := range s { + if strings.HasPrefix(v, scopeAttrPrefix) { + if strings.Contains(v, ",") { + return false, fmt.Errorf("@scope can't use comma(,) get=" + v) + } + } + } + + return strings.HasPrefix(scope, scopeAttrPrefix), nil +} + +func getTagsFromComment(comment string) (tags []string) { + commentLine := strings.TrimSpace(strings.TrimLeft(comment, "/")) + if len(commentLine) == 0 { + return nil + } + + attribute := strings.Fields(commentLine)[0] + lineRemainder, lowerAttribute := strings.TrimSpace(commentLine[len(attribute):]), strings.ToLower(attribute) + + if lowerAttribute == tagsAttr { + for _, tag := range strings.Split(lineRemainder, ",") { + tags = append(tags, strings.TrimSpace(tag)) + } + } + return + +} + +func (parser *Parser) matchTag(tag string) bool { + if len(parser.tags) == 0 { + return true + } + + if _, has := parser.tags["!"+tag]; has { + return false + } + if _, has := parser.tags[tag]; has { + return true + } + + // If all tags are negation then we should return true + for key := range parser.tags { + if key[0] != '!' { + return false + } + } + return true +} + +func (parser *Parser) matchTags(comments []*ast.Comment) (match bool) { + if len(parser.tags) == 0 { + return true + } + + match = false + for _, comment := range comments { + for _, tag := range getTagsFromComment(comment.Text) { + if _, has := parser.tags["!"+tag]; has { + return false + } + if _, has := parser.tags[tag]; has { + match = true // keep iterating as it may contain a tag that is excluded + } + } + } + + if !match { + // If all tags are negation then we should return true + for key := range parser.tags { + if key[0] != '!' { + return false + } + } + } + return true +} + +func matchExtension(extensionToMatch string, comments []*ast.Comment) (match bool) { + if len(extensionToMatch) != 0 { + for _, comment := range comments { + commentLine := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + fields := FieldsByAnySpace(commentLine, 2) + if len(fields) > 0 { + lowerAttribute := strings.ToLower(fields[0]) + + if lowerAttribute == fmt.Sprintf("@x-%s", strings.ToLower(extensionToMatch)) { + return true + } + } + } + return false + } + return true +} + +// ParseRouterAPIInfo parses router api info for given astFile. +func (parser *Parser) ParseRouterAPIInfo(fileInfo *AstFileInfo) error { + if (fileInfo.ParseFlag & ParseOperations) == ParseNone { + return nil + } + + // parse File.Comments instead of File.Decls.Doc if ParseFuncBody flag set to "true" + if parser.ParseFuncBody { + for _, astComments := range fileInfo.File.Comments { + if astComments.List != nil { + if err := parser.parseRouterAPIInfoComment(astComments.List, fileInfo); err != nil { + return err + } + } + } + + return nil + } + + for _, astDescription := range fileInfo.File.Decls { + astDeclaration, ok := astDescription.(*ast.FuncDecl) + if ok && astDeclaration.Doc != nil && astDeclaration.Doc.List != nil { + if err := parser.parseRouterAPIInfoComment(astDeclaration.Doc.List, fileInfo); err != nil { + return err + } + } + } + + return nil +} + +func (parser *Parser) parseRouterAPIInfoComment(comments []*ast.Comment, fileInfo *AstFileInfo) error { + if parser.matchTags(comments) && matchExtension(parser.parseExtension, comments) { + // for per 'function' comment, create a new 'Operation' object + operation := NewOperation(parser, SetCodeExampleFilesDirectory(parser.codeExampleFilesDir)) + for _, comment := range comments { + err := operation.ParseComment(comment.Text, fileInfo.File) + if err != nil { + return fmt.Errorf("ParseComment error in file %s :%+v", fileInfo.Path, err) + } + if operation.State != "" && operation.State != parser.HostState { + return nil + } + } + err := processRouterOperation(parser, operation) + if err != nil { + return err + } + } + + return nil +} + +func refRouteMethodOp(item *spec.PathItem, method string) (op **spec.Operation) { + switch method { + case http.MethodGet: + op = &item.Get + case http.MethodPost: + op = &item.Post + case http.MethodDelete: + op = &item.Delete + case http.MethodPut: + op = &item.Put + case http.MethodPatch: + op = &item.Patch + case http.MethodHead: + op = &item.Head + case http.MethodOptions: + op = &item.Options + } + + return +} + +func processRouterOperation(parser *Parser, operation *Operation) error { + for _, routeProperties := range operation.RouterProperties { + var ( + pathItem spec.PathItem + ok bool + ) + + pathItem, ok = parser.swagger.Paths.Paths[routeProperties.Path] + if !ok { + pathItem = spec.PathItem{} + } + + op := refRouteMethodOp(&pathItem, routeProperties.HTTPMethod) + + // check if we already have an operation for this path and method + if *op != nil { + err := fmt.Errorf("route %s %s is declared multiple times", routeProperties.HTTPMethod, routeProperties.Path) + if parser.Strict { + return err + } + + parser.debug.Printf("warning: %s\n", err) + } + + if len(operation.RouterProperties) > 1 { + newOp := *operation + var validParams []spec.Parameter + for _, param := range newOp.Operation.OperationProps.Parameters { + if param.In == "path" && !strings.Contains(routeProperties.Path, param.Name) { + // This path param is not actually contained in the path, skip adding it to the final params + continue + } + validParams = append(validParams, param) + } + newOp.Operation.OperationProps.Parameters = validParams + *op = &newOp.Operation + } else { + *op = &operation.Operation + } + + if routeProperties.Deprecated { + (*op).Deprecated = routeProperties.Deprecated + } + + parser.swagger.Paths.Paths[routeProperties.Path] = pathItem + } + + return nil +} + +func convertFromSpecificToPrimitive(typeName string) (string, error) { + name := typeName + if strings.ContainsRune(name, '.') { + name = strings.Split(name, ".")[1] + } + + switch strings.ToUpper(name) { + case "TIME", "OBJECTID", "UUID": + return STRING, nil + case "DECIMAL": + return NUMBER, nil + } + + return typeName, ErrFailedConvertPrimitiveType +} + +func (parser *Parser) getTypeSchema(typeName string, file *ast.File, ref bool) (*spec.Schema, error) { + if override, ok := parser.Overrides[typeName]; ok { + parser.debug.Printf("Override detected for %s: using %s instead", typeName, override) + return parseObjectSchema(parser, override, file) + } + + if IsInterfaceLike(typeName) { + return &spec.Schema{}, nil + } + if IsGolangPrimitiveType(typeName) { + return PrimitiveSchema(TransToValidSchemeType(typeName)), nil + } + + schemaType, err := convertFromSpecificToPrimitive(typeName) + if err == nil { + return PrimitiveSchema(schemaType), nil + } + + typeSpecDef := parser.packages.FindTypeSpec(typeName, file) + if typeSpecDef == nil { + return nil, fmt.Errorf("cannot find type definition: %s", typeName) + } + + if override, ok := parser.Overrides[typeSpecDef.FullPath()]; ok { + if override == "" { + parser.debug.Printf("Override detected for %s: ignoring", typeSpecDef.FullPath()) + + return nil, ErrSkippedField + } + + parser.debug.Printf("Override detected for %s: using %s instead", typeSpecDef.FullPath(), override) + + separator := strings.LastIndex(override, ".") + if separator == -1 { + // treat as a swaggertype tag + parts := strings.Split(override, ",") + + return BuildCustomSchema(parts) + } + + typeSpecDef = parser.packages.findTypeSpec(override[0:separator], override[separator+1:]) + } + + schema, ok := parser.parsedSchemas[typeSpecDef] + if !ok { + var err error + + schema, err = parser.ParseDefinition(typeSpecDef) + if err != nil { + if err == ErrRecursiveParseStruct && ref { + return parser.getRefTypeSchema(typeSpecDef, schema), nil + } + return nil, fmt.Errorf("%s: %w", typeName, err) + } + } + + if ref { + if IsComplexSchema(schema.Schema) { + return parser.getRefTypeSchema(typeSpecDef, schema), nil + } + // if it is a simple schema, just return a copy + newSchema := *schema.Schema + return &newSchema, nil + } + + return schema.Schema, nil +} + +func (parser *Parser) getRefTypeSchema(typeSpecDef *TypeSpecDef, schema *Schema) *spec.Schema { + _, ok := parser.outputSchemas[typeSpecDef] + if !ok { + parser.swagger.Definitions[schema.Name] = spec.Schema{} + + if schema.Schema != nil { + parser.swagger.Definitions[schema.Name] = *schema.Schema + } + + parser.outputSchemas[typeSpecDef] = schema + } + + refSchema := RefSchema(schema.Name) + + return refSchema +} + +func (parser *Parser) isInStructStack(typeSpecDef *TypeSpecDef) bool { + for _, specDef := range parser.structStack { + if typeSpecDef == specDef { + return true + } + } + + return false +} + +// ParseDefinition parses given type spec that corresponds to the type under +// given name and package, and populates swagger schema definitions registry +// with a schema for the given type +func (parser *Parser) ParseDefinition(typeSpecDef *TypeSpecDef) (*Schema, error) { + typeName := typeSpecDef.TypeName() + schema, found := parser.parsedSchemas[typeSpecDef] + if found { + parser.debug.Printf("Skipping '%s', already parsed.", typeName) + + return schema, nil + } + + if parser.isInStructStack(typeSpecDef) { + parser.debug.Printf("Skipping '%s', recursion detected.", typeName) + + return &Schema{ + Name: typeName, + PkgPath: typeSpecDef.PkgPath, + Schema: PrimitiveSchema(OBJECT), + }, + ErrRecursiveParseStruct + } + + parser.structStack = append(parser.structStack, typeSpecDef) + + parser.debug.Printf("Generating %s", typeName) + + definition, err := parser.parseTypeExpr(typeSpecDef.File, typeSpecDef.TypeSpec.Type, false) + if err != nil { + parser.debug.Printf("Error parsing type definition '%s': %s", typeName, err) + return nil, err + } + + if definition.Description == "" { + err = parser.fillDefinitionDescription(definition, typeSpecDef.File, typeSpecDef) + if err != nil { + return nil, err + } + } + + if len(typeSpecDef.Enums) > 0 { + var varnames []string + var enumComments = make(map[string]string) + for _, value := range typeSpecDef.Enums { + definition.Enum = append(definition.Enum, value.Value) + varnames = append(varnames, value.key) + if len(value.Comment) > 0 { + enumComments[value.key] = value.Comment + } + } + if definition.Extensions == nil { + definition.Extensions = make(spec.Extensions) + } + definition.Extensions[enumVarNamesExtension] = varnames + if len(enumComments) > 0 { + definition.Extensions[enumCommentsExtension] = enumComments + } + } + + schemaName := typeName + + if typeSpecDef.SchemaName != "" { + schemaName = typeSpecDef.SchemaName + } + + sch := Schema{ + Name: schemaName, + PkgPath: typeSpecDef.PkgPath, + Schema: definition, + } + parser.parsedSchemas[typeSpecDef] = &sch + + // update an empty schema as a result of recursion + s2, found := parser.outputSchemas[typeSpecDef] + if found { + parser.swagger.Definitions[s2.Name] = *definition + } + + return &sch, nil +} + +func fullTypeName(parts ...string) string { + return strings.Join(parts, ".") +} + +// fillDefinitionDescription additionally fills fields in definition (spec.Schema) +// TODO: If .go file contains many types, it may work for a long time +func (parser *Parser) fillDefinitionDescription(definition *spec.Schema, file *ast.File, typeSpecDef *TypeSpecDef) (err error) { + if file == nil { + return + } + for _, astDeclaration := range file.Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if !ok || generalDeclaration.Tok != token.TYPE { + continue + } + + for _, astSpec := range generalDeclaration.Specs { + typeSpec, ok := astSpec.(*ast.TypeSpec) + if !ok || typeSpec != typeSpecDef.TypeSpec { + continue + } + var typeName string + if typeSpec.Name != nil { + typeName = typeSpec.Name.Name + } + definition.Description, err = + parser.extractDeclarationDescription(typeName, typeSpec.Doc, typeSpec.Comment, generalDeclaration.Doc) + if err != nil { + return + } + } + } + return nil +} + +// extractDeclarationDescription gets first description +// from attribute descriptionAttr in commentGroups (ast.CommentGroup) +func (parser *Parser) extractDeclarationDescription(typeName string, commentGroups ...*ast.CommentGroup) (string, error) { + var description string + + for _, commentGroup := range commentGroups { + if commentGroup == nil { + continue + } + + isHandlingDescription := false + + for _, comment := range commentGroup.List { + commentText := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + if len(commentText) == 0 { + continue + } + fields := FieldsByAnySpace(commentText, 2) + attribute := fields[0] + + if attr := strings.ToLower(attribute); attr == descriptionMarkdownAttr { + if len(fields) > 1 { + typeName = fields[1] + } + if typeName == "" { + continue + } + desc, err := getMarkdownForTag(typeName, parser.markdownFileDir) + if err != nil { + return "", err + } + // if found markdown description, we will only use the markdown file content + return string(desc), nil + } else if attr != descriptionAttr { + if !isHandlingDescription { + continue + } + + break + } + + isHandlingDescription = true + description += " " + strings.TrimSpace(commentText[len(attribute):]) + } + } + + return strings.TrimLeft(description, " "), nil +} + +// parseTypeExpr parses given type expression that corresponds to the type under +// given name and package, and returns swagger schema for it. +func (parser *Parser) parseTypeExpr(file *ast.File, typeExpr ast.Expr, ref bool) (*spec.Schema, error) { + switch expr := typeExpr.(type) { + // type Foo interface{} + case *ast.InterfaceType: + return &spec.Schema{}, nil + + // type Foo struct {...} + case *ast.StructType: + return parser.parseStruct(file, expr.Fields) + + // type Foo Baz + case *ast.Ident: + return parser.getTypeSchema(expr.Name, file, ref) + + // type Foo *Baz + case *ast.StarExpr: + return parser.parseTypeExpr(file, expr.X, ref) + + // type Foo pkg.Bar + case *ast.SelectorExpr: + if xIdent, ok := expr.X.(*ast.Ident); ok { + return parser.getTypeSchema(fullTypeName(xIdent.Name, expr.Sel.Name), file, ref) + } + // type Foo []Baz + case *ast.ArrayType: + itemSchema, err := parser.parseTypeExpr(file, expr.Elt, true) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(itemSchema), nil + // type Foo map[string]Bar + case *ast.MapType: + if _, ok := expr.Value.(*ast.InterfaceType); ok { + return spec.MapProperty(nil), nil + } + schema, err := parser.parseTypeExpr(file, expr.Value, true) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + + case *ast.FuncType: + return nil, ErrFuncTypeField + // ... + } + + return parser.parseGenericTypeExpr(file, typeExpr) +} + +func (parser *Parser) parseStruct(file *ast.File, fields *ast.FieldList) (*spec.Schema, error) { + required, properties := make([]string, 0), make(map[string]spec.Schema) + + for _, field := range fields.List { + fieldProps, requiredFromAnon, err := parser.parseStructField(file, field) + if err != nil { + if errors.Is(err, ErrFuncTypeField) || errors.Is(err, ErrSkippedField) { + continue + } + + return nil, err + } + + if len(fieldProps) == 0 { + continue + } + + required = append(required, requiredFromAnon...) + + for k, v := range fieldProps { + properties[k] = v + } + } + + sort.Strings(required) + + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{OBJECT}, + Properties: properties, + Required: required, + }, + }, nil +} + +func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[string]spec.Schema, []string, error) { + if field.Tag != nil { + skip, ok := reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")).Lookup("swaggerignore") + if ok && strings.EqualFold(skip, "true") { + return nil, nil, nil + } + } + + ps := parser.fieldParserFactory(parser, field) + + if ps.ShouldSkip() { + return nil, nil, nil + } + + fieldNames, err := ps.FieldNames() + if err != nil { + return nil, nil, err + } + + if len(fieldNames) == 0 { + typeName, err := getFieldType(file, field.Type, nil) + if err != nil { + return nil, nil, err + } + + schema, err := parser.getTypeSchema(typeName, file, false) + if err != nil { + return nil, nil, err + } + + if len(schema.Type) > 0 && schema.Type[0] == OBJECT { + if len(schema.Properties) == 0 { + return nil, nil, nil + } + + properties := map[string]spec.Schema{} + for k, v := range schema.Properties { + properties[k] = v + } + + return properties, schema.SchemaProps.Required, nil + } + // for alias type of non-struct types ,such as array,map, etc. ignore field tag. + return map[string]spec.Schema{typeName: *schema}, nil, nil + + } + + schema, err := ps.CustomSchema() + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + + if schema == nil { + typeName, err := getFieldType(file, field.Type, nil) + if err == nil { + // named type + schema, err = parser.getTypeSchema(typeName, file, true) + } else { + // unnamed type + schema, err = parser.parseTypeExpr(file, field.Type, false) + } + + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + } + + err = ps.ComplementSchema(schema) + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + + var tagRequired []string + + required, err := ps.IsRequired() + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + + if required { + tagRequired = append(tagRequired, fieldNames...) + } + + if schema.Extensions == nil { + schema.Extensions = make(spec.Extensions) + } + if formName := ps.FormName(); len(formName) > 0 { + schema.Extensions["formData"] = formName + } + if headerName := ps.HeaderName(); len(headerName) > 0 { + schema.Extensions["header"] = headerName + } + if pathName := ps.PathName(); len(pathName) > 0 { + schema.Extensions["path"] = pathName + } + fields := make(map[string]spec.Schema) + for _, name := range fieldNames { + fields[name] = *schema + } + return fields, tagRequired, nil +} + +func getFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { + switch fieldType := field.(type) { + case *ast.Ident: + return fieldType.Name, nil + case *ast.SelectorExpr: + packageName, err := getFieldType(file, fieldType.X, genericParamTypeDefs) + if err != nil { + return "", err + } + + return fullTypeName(packageName, fieldType.Sel.Name), nil + case *ast.StarExpr: + fullName, err := getFieldType(file, fieldType.X, genericParamTypeDefs) + if err != nil { + return "", err + } + + return fullName, nil + default: + return getGenericFieldType(file, field, genericParamTypeDefs) + } +} + +func (parser *Parser) getUnderlyingSchema(schema *spec.Schema) *spec.Schema { + if schema == nil { + return nil + } + + if url := schema.Ref.GetURL(); url != nil { + if pos := strings.LastIndexByte(url.Fragment, '/'); pos >= 0 { + name := url.Fragment[pos+1:] + if schema, ok := parser.swagger.Definitions[name]; ok { + return &schema + } + } + } + + if len(schema.AllOf) > 0 { + merged := &spec.Schema{} + MergeSchema(merged, schema) + for _, s := range schema.AllOf { + MergeSchema(merged, parser.getUnderlyingSchema(&s)) + } + return merged + } + return nil +} + +// GetSchemaTypePath get path of schema type. +func (parser *Parser) GetSchemaTypePath(schema *spec.Schema, depth int) []string { + if schema == nil || depth == 0 { + return nil + } + + if underlying := parser.getUnderlyingSchema(schema); underlying != nil { + return parser.GetSchemaTypePath(underlying, depth) + } + + if len(schema.Type) > 0 { + switch schema.Type[0] { + case ARRAY: + depth-- + + s := []string{schema.Type[0]} + + return append(s, parser.GetSchemaTypePath(schema.Items.Schema, depth)...) + case OBJECT: + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + // for map + depth-- + + s := []string{schema.Type[0]} + + return append(s, parser.GetSchemaTypePath(schema.AdditionalProperties.Schema, depth)...) + } + } + + return []string{schema.Type[0]} + } + + return []string{ANY} +} + +// defineTypeOfExample example value define the type (object and array unsupported). +func defineTypeOfExample(schemaType, arrayType, exampleValue string) (interface{}, error) { + switch schemaType { + case STRING: + return exampleValue, nil + case NUMBER: + v, err := strconv.ParseFloat(exampleValue, 64) + if err != nil { + return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) + } + + return v, nil + case INTEGER: + v, err := strconv.Atoi(exampleValue) + if err != nil { + return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) + } + + return v, nil + case BOOLEAN: + v, err := strconv.ParseBool(exampleValue) + if err != nil { + return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) + } + + return v, nil + case ARRAY: + values := strings.Split(exampleValue, ",") + result := make([]interface{}, 0) + for _, value := range values { + v, err := defineTypeOfExample(arrayType, "", value) + if err != nil { + return nil, err + } + + result = append(result, v) + } + + return result, nil + case OBJECT: + if arrayType == "" { + return nil, fmt.Errorf("%s is unsupported type in example value `%s`", schemaType, exampleValue) + } + + values := strings.Split(exampleValue, ",") + + result := map[string]interface{}{} + + for _, value := range values { + mapData := strings.SplitN(value, ":", 2) + + if len(mapData) == 2 { + v, err := defineTypeOfExample(arrayType, "", mapData[1]) + if err != nil { + return nil, err + } + + result[mapData[0]] = v + + continue + } + + return nil, fmt.Errorf("example value %s should format: key:value", exampleValue) + } + + return result, nil + } + + return nil, fmt.Errorf("%s is unsupported type in example value %s", schemaType, exampleValue) +} + +// GetAllGoFileInfo gets all Go source files information for given searchDir. +func (parser *Parser) getAllGoFileInfo(packageDir, searchDir string) error { + if parser.skipPackageByPrefix(packageDir) { + return nil // ignored by user-defined package path prefixes + } + return filepath.Walk(searchDir, func(path string, f os.FileInfo, _ error) error { + err := parser.Skip(path, f) + if err != nil { + return err + } + + if f.IsDir() { + return nil + } + + relPath, err := filepath.Rel(searchDir, path) + if err != nil { + return err + } + + return parser.parseFile(filepath.ToSlash(filepath.Dir(filepath.Clean(filepath.Join(packageDir, relPath)))), path, nil, ParseAll) + }) +} + +func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg, parseFlag ParseFlag) error { + ignoreInternal := pkg.Internal && !parser.ParseInternal + if ignoreInternal || !pkg.Resolved { // ignored internal and not resolved dependencies + return nil + } + + if pkg.Raw != nil && parser.skipPackageByPrefix(pkg.Raw.ImportPath) { + return nil // ignored by user-defined package path prefixes + } + + // Skip cgo + if pkg.Raw == nil && pkg.Name == "C" { + return nil + } + + srcDir := pkg.Raw.Dir + + files, err := os.ReadDir(srcDir) // only parsing files in the dir(don't contain sub dir files) + if err != nil { + return err + } + + for _, f := range files { + if f.IsDir() { + continue + } + + path := filepath.Join(srcDir, f.Name()) + if err := parser.parseFile(pkg.Name, path, nil, parseFlag); err != nil { + return err + } + } + + for i := 0; i < len(pkg.Deps); i++ { + if err := parser.getAllGoFileInfoFromDeps(&pkg.Deps[i], parseFlag); err != nil { + return err + } + } + + return nil +} + +func (parser *Parser) parseFile(packageDir, path string, src interface{}, flag ParseFlag) error { + if strings.HasSuffix(strings.ToLower(path), "_test.go") || filepath.Ext(path) != ".go" { + return nil + } + + return parser.packages.ParseFile(packageDir, path, src, flag) +} + +func (parser *Parser) checkOperationIDUniqueness() error { + // operationsIds contains all operationId annotations to check it's unique + operationsIds := make(map[string]string) + + for path, item := range parser.swagger.Paths.Paths { + var method, id string + + for method = range allMethod { + op := refRouteMethodOp(&item, method) + if *op != nil { + id = (**op).ID + + break + } + } + + if id == "" { + continue + } + + current := fmt.Sprintf("%s %s", method, path) + + previous, ok := operationsIds[id] + if ok { + return fmt.Errorf( + "duplicated @id annotation '%s' found in '%s', previously declared in: '%s'", + id, current, previous) + } + + operationsIds[id] = current + } + + return nil +} + +// Skip returns filepath.SkipDir error if match vendor and hidden folder. +func (parser *Parser) Skip(path string, f os.FileInfo) error { + return walkWith(parser.excludes, parser.ParseVendor)(path, f) +} + +func walkWith(excludes map[string]struct{}, parseVendor bool) func(path string, fileInfo os.FileInfo) error { + return func(path string, f os.FileInfo) error { + if f.IsDir() { + if !parseVendor && f.Name() == "vendor" || // ignore "vendor" + f.Name() == "docs" || // exclude docs + len(f.Name()) > 1 && f.Name()[0] == '.' && f.Name() != ".." { // exclude all hidden folder + return filepath.SkipDir + } + + if excludes != nil { + if _, ok := excludes[path]; ok { + return filepath.SkipDir + } + } + } + + return nil + } +} + +// GetSwagger returns *spec.Swagger which is the root document object for the API specification. +func (parser *Parser) GetSwagger() *spec.Swagger { + return parser.swagger +} + +// addTestType just for tests. +func (parser *Parser) addTestType(typename string) { + typeDef := &TypeSpecDef{} + parser.packages.uniqueDefinitions[typename] = typeDef + parser.parsedSchemas[typeDef] = &Schema{ + PkgPath: "", + Name: typename, + Schema: PrimitiveSchema(OBJECT), + } +} diff --git a/vendor/github.com/swaggo/swag/schema.go b/vendor/github.com/swaggo/swag/schema.go new file mode 100644 index 00000000..b3a5b38c --- /dev/null +++ b/vendor/github.com/swaggo/swag/schema.go @@ -0,0 +1,293 @@ +package swag + +import ( + "errors" + "fmt" + "github.com/go-openapi/spec" +) + +const ( + // ARRAY represent a array value. + ARRAY = "array" + // OBJECT represent a object value. + OBJECT = "object" + // PRIMITIVE represent a primitive value. + PRIMITIVE = "primitive" + // BOOLEAN represent a boolean value. + BOOLEAN = "boolean" + // INTEGER represent a integer value. + INTEGER = "integer" + // NUMBER represent a number value. + NUMBER = "number" + // STRING represent a string value. + STRING = "string" + // FUNC represent a function value. + FUNC = "func" + // ERROR represent a error value. + ERROR = "error" + // INTERFACE represent a interface value. + INTERFACE = "interface{}" + // ANY represent a any value. + ANY = "any" + // NIL represent a empty value. + NIL = "nil" + + // IgnoreNameOverridePrefix Prepend to model to avoid renaming based on comment. + IgnoreNameOverridePrefix = '$' +) + +// CheckSchemaType checks if typeName is not a name of primitive type. +func CheckSchemaType(typeName string) error { + if !IsPrimitiveType(typeName) { + return fmt.Errorf("%s is not basic types", typeName) + } + + return nil +} + +// IsSimplePrimitiveType determine whether the type name is a simple primitive type. +func IsSimplePrimitiveType(typeName string) bool { + switch typeName { + case STRING, NUMBER, INTEGER, BOOLEAN: + return true + } + + return false +} + +// IsPrimitiveType determine whether the type name is a primitive type. +func IsPrimitiveType(typeName string) bool { + switch typeName { + case STRING, NUMBER, INTEGER, BOOLEAN, ARRAY, OBJECT, FUNC: + return true + } + + return false +} + +// IsInterfaceLike determines whether the swagger type name is an go named interface type like error type. +func IsInterfaceLike(typeName string) bool { + return typeName == ERROR || typeName == ANY +} + +// IsNumericType determines whether the swagger type name is a numeric type. +func IsNumericType(typeName string) bool { + return typeName == INTEGER || typeName == NUMBER +} + +// TransToValidSchemeType indicates type will transfer golang basic type to swagger supported type. +func TransToValidSchemeType(typeName string) string { + switch typeName { + case "uint", "int", "uint8", "int8", "uint16", "int16", "byte": + return INTEGER + case "uint32", "int32", "rune": + return INTEGER + case "uint64", "int64": + return INTEGER + case "float32", "float64": + return NUMBER + case "bool": + return BOOLEAN + case "string": + return STRING + } + + return typeName +} + +// IsGolangPrimitiveType determine whether the type name is a golang primitive type. +func IsGolangPrimitiveType(typeName string) bool { + switch typeName { + case "uint", + "int", + "uint8", + "int8", + "uint16", + "int16", + "byte", + "uint32", + "int32", + "rune", + "uint64", + "int64", + "float32", + "float64", + "bool", + "string": + return true + } + + return false +} + +// TransToValidCollectionFormat determine valid collection format. +func TransToValidCollectionFormat(format string) string { + switch format { + case "csv", "multi", "pipes", "tsv", "ssv": + return format + } + + return "" +} + +func ignoreNameOverride(name string) bool { + return len(name) != 0 && name[0] == IgnoreNameOverridePrefix +} + +// IsComplexSchema whether a schema is complex and should be a ref schema +func IsComplexSchema(schema *spec.Schema) bool { + // a enum type should be complex + if len(schema.Enum) > 0 { + return true + } + + // a deep array type is complex, how to determine deep? here more than 2 ,for example: [][]object,[][][]int + if len(schema.Type) > 2 { + return true + } + + //Object included, such as Object or []Object + for _, st := range schema.Type { + if st == OBJECT { + return true + } + } + return false +} + +// IsRefSchema whether a schema is a reference schema. +func IsRefSchema(schema *spec.Schema) bool { + return schema.Ref.Ref.GetURL() != nil +} + +// RefSchema build a reference schema. +func RefSchema(refType string) *spec.Schema { + return spec.RefSchema("#/definitions/" + refType) +} + +// PrimitiveSchema build a primitive schema. +func PrimitiveSchema(refType string) *spec.Schema { + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{refType}}} +} + +// BuildCustomSchema build custom schema specified by tag swaggertype. +func BuildCustomSchema(types []string) (*spec.Schema, error) { + if len(types) == 0 { + return nil, nil + } + + switch types[0] { + case PRIMITIVE: + if len(types) == 1 { + return nil, errors.New("need primitive type after primitive") + } + + return BuildCustomSchema(types[1:]) + case ARRAY: + if len(types) == 1 { + return nil, errors.New("need array item type after array") + } + + schema, err := BuildCustomSchema(types[1:]) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + case OBJECT: + if len(types) == 1 { + return PrimitiveSchema(types[0]), nil + } + + schema, err := BuildCustomSchema(types[1:]) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + default: + err := CheckSchemaType(types[0]) + if err != nil { + return nil, err + } + + return PrimitiveSchema(types[0]), nil + } +} + +// MergeSchema merge schemas +func MergeSchema(dst *spec.Schema, src *spec.Schema) *spec.Schema { + if len(src.Type) > 0 { + dst.Type = src.Type + } + if len(src.Properties) > 0 { + dst.Properties = src.Properties + } + if src.Items != nil { + dst.Items = src.Items + } + if src.AdditionalProperties != nil { + dst.AdditionalProperties = src.AdditionalProperties + } + if len(src.Description) > 0 { + dst.Description = src.Description + } + if src.Nullable { + dst.Nullable = src.Nullable + } + if len(src.Format) > 0 { + dst.Format = src.Format + } + if src.Default != nil { + dst.Default = src.Default + } + if src.Example != nil { + dst.Example = src.Example + } + if len(src.Extensions) > 0 { + dst.Extensions = src.Extensions + } + if src.Maximum != nil { + dst.Maximum = src.Maximum + } + if src.Minimum != nil { + dst.Minimum = src.Minimum + } + if src.ExclusiveMaximum { + dst.ExclusiveMaximum = src.ExclusiveMaximum + } + if src.ExclusiveMinimum { + dst.ExclusiveMinimum = src.ExclusiveMinimum + } + if src.MaxLength != nil { + dst.MaxLength = src.MaxLength + } + if src.MinLength != nil { + dst.MinLength = src.MinLength + } + if len(src.Pattern) > 0 { + dst.Pattern = src.Pattern + } + if src.MaxItems != nil { + dst.MaxItems = src.MaxItems + } + if src.MinItems != nil { + dst.MinItems = src.MinItems + } + if src.UniqueItems { + dst.UniqueItems = src.UniqueItems + } + if src.MultipleOf != nil { + dst.MultipleOf = src.MultipleOf + } + if len(src.Enum) > 0 { + dst.Enum = src.Enum + } + if len(src.Extensions) > 0 { + dst.Extensions = src.Extensions + } + if len(src.ExtraProps) > 0 { + dst.ExtraProps = src.ExtraProps + } + return dst +} diff --git a/vendor/github.com/swaggo/swag/spec.go b/vendor/github.com/swaggo/swag/spec.go new file mode 100644 index 00000000..c18a365b --- /dev/null +++ b/vendor/github.com/swaggo/swag/spec.go @@ -0,0 +1,64 @@ +package swag + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// Spec holds exported Swagger Info so clients can modify it. +type Spec struct { + Version string + Host string + BasePath string + Schemes []string + Title string + Description string + InfoInstanceName string + SwaggerTemplate string + LeftDelim string + RightDelim string +} + +// ReadDoc parses SwaggerTemplate into swagger document. +func (i *Spec) ReadDoc() string { + i.Description = strings.ReplaceAll(i.Description, "\n", "\\n") + + tpl := template.New("swagger_info").Funcs(template.FuncMap{ + "marshal": func(v interface{}) string { + a, _ := json.Marshal(v) + + return string(a) + }, + "escape": func(v interface{}) string { + // escape tabs + var str = strings.ReplaceAll(v.(string), "\t", "\\t") + // replace " with \", and if that results in \\", replace that with \\\" + str = strings.ReplaceAll(str, "\"", "\\\"") + + return strings.ReplaceAll(str, "\\\\\"", "\\\\\\\"") + }, + }) + + if i.LeftDelim != "" && i.RightDelim != "" { + tpl = tpl.Delims(i.LeftDelim, i.RightDelim) + } + + parsed, err := tpl.Parse(i.SwaggerTemplate) + if err != nil { + return i.SwaggerTemplate + } + + var doc bytes.Buffer + if err = parsed.Execute(&doc, i); err != nil { + return i.SwaggerTemplate + } + + return doc.String() +} + +// InstanceName returns Spec instance name. +func (i *Spec) InstanceName() string { + return i.InfoInstanceName +} diff --git a/vendor/github.com/swaggo/swag/swagger.go b/vendor/github.com/swaggo/swag/swagger.go new file mode 100644 index 00000000..74c162c2 --- /dev/null +++ b/vendor/github.com/swaggo/swag/swagger.go @@ -0,0 +1,72 @@ +package swag + +import ( + "errors" + "fmt" + "sync" +) + +// Name is a unique name be used to register swag instance. +const Name = "swagger" + +var ( + swaggerMu sync.RWMutex + swags map[string]Swagger +) + +// Swagger is an interface to read swagger document. +type Swagger interface { + ReadDoc() string +} + +// Register registers swagger for given name. +func Register(name string, swagger Swagger) { + swaggerMu.Lock() + defer swaggerMu.Unlock() + + if swagger == nil { + panic("swagger is nil") + } + + if swags == nil { + swags = make(map[string]Swagger) + } + + if _, ok := swags[name]; ok { + panic("Register called twice for swag: " + name) + } + + swags[name] = swagger +} + +// GetSwagger returns the swagger instance for given name. +// If not found, returns nil. +func GetSwagger(name string) Swagger { + swaggerMu.RLock() + defer swaggerMu.RUnlock() + + return swags[name] +} + +// ReadDoc reads swagger document. An optional name parameter can be passed to read a specific document. +// The default name is "swagger". +func ReadDoc(optionalName ...string) (string, error) { + swaggerMu.RLock() + defer swaggerMu.RUnlock() + + if swags == nil { + return "", errors.New("no swag has yet been registered") + } + + name := Name + if len(optionalName) != 0 && optionalName[0] != "" { + name = optionalName[0] + } + + swag, ok := swags[name] + if !ok { + return "", fmt.Errorf("no swag named \"%s\" was registered", name) + } + + return swag.ReadDoc(), nil +} diff --git a/vendor/github.com/swaggo/swag/types.go b/vendor/github.com/swaggo/swag/types.go new file mode 100644 index 00000000..5f3031e0 --- /dev/null +++ b/vendor/github.com/swaggo/swag/types.go @@ -0,0 +1,123 @@ +package swag + +import ( + "go/ast" + "go/token" + "regexp" + "strings" + + "github.com/go-openapi/spec" +) + +// Schema parsed schema. +type Schema struct { + *spec.Schema // + PkgPath string // package import path used to rename Name of a definition int case of conflict + Name string // Name in definitions +} + +// TypeSpecDef the whole information of a typeSpec. +type TypeSpecDef struct { + // ast file where TypeSpec is + File *ast.File + + // the TypeSpec of this type definition + TypeSpec *ast.TypeSpec + + Enums []EnumValue + + // path of package starting from under ${GOPATH}/src or from module path in go.mod + PkgPath string + ParentSpec ast.Decl + + SchemaName string + + NotUnique bool +} + +// Name the name of the typeSpec. +func (t *TypeSpecDef) Name() string { + if t.TypeSpec != nil && t.TypeSpec.Name != nil { + return t.TypeSpec.Name.Name + } + + return "" +} + +// TypeName the type name of the typeSpec. +func (t *TypeSpecDef) TypeName() string { + if ignoreNameOverride(t.TypeSpec.Name.Name) { + return t.TypeSpec.Name.Name[1:] + } + + var names []string + if t.NotUnique { + pkgPath := strings.Map(func(r rune) rune { + if r == '\\' || r == '/' || r == '.' { + return '_' + } + return r + }, t.PkgPath) + names = append(names, pkgPath) + } else if t.File != nil { + names = append(names, t.File.Name.Name) + } + if parentFun, ok := (t.ParentSpec).(*ast.FuncDecl); ok && parentFun != nil { + names = append(names, parentFun.Name.Name) + } + names = append(names, t.TypeSpec.Name.Name) + return fullTypeName(names...) +} + +// FullPath return the full path of the typeSpec. +func (t *TypeSpecDef) FullPath() string { + return t.PkgPath + "." + t.Name() +} + +const regexCaseInsensitive = "(?i)" + +var reTypeName = regexp.MustCompile(regexCaseInsensitive + `^@name\s+(\S+)`) + +func (t *TypeSpecDef) Alias() string { + if t.TypeSpec.Comment == nil { + return "" + } + + // get alias from comment '// @name ' + for _, comment := range t.TypeSpec.Comment.List { + trimmedComment := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + texts := reTypeName.FindStringSubmatch(trimmedComment) + if len(texts) > 1 { + return texts[1] + } + } + + return "" +} + +func (t *TypeSpecDef) SetSchemaName() { + if alias := t.Alias(); alias != "" { + t.SchemaName = alias + return + } + + t.SchemaName = t.TypeName() +} + +// AstFileInfo information of an ast.File. +type AstFileInfo struct { + //FileSet the FileSet object which is used to parse this go source file + FileSet *token.FileSet + + // File ast.File + File *ast.File + + // Path the path of the ast.File + Path string + + // PackagePath package import path of the ast.File + PackagePath string + + // ParseFlag determine what to parse + ParseFlag ParseFlag +} diff --git a/vendor/github.com/swaggo/swag/utils.go b/vendor/github.com/swaggo/swag/utils.go new file mode 100644 index 00000000..df31ff2e --- /dev/null +++ b/vendor/github.com/swaggo/swag/utils.go @@ -0,0 +1,55 @@ +package swag + +import "unicode" + +// FieldsFunc split a string s by a func splitter into max n parts +func FieldsFunc(s string, f func(rune2 rune) bool, n int) []string { + // A span is used to record a slice of s of the form s[start:end]. + // The start index is inclusive and the end index is exclusive. + type span struct { + start int + end int + } + spans := make([]span, 0, 32) + + // Find the field start and end indices. + // Doing this in a separate pass (rather than slicing the string s + // and collecting the result substrings right away) is significantly + // more efficient, possibly due to cache effects. + start := -1 // valid span start if >= 0 + for end, rune := range s { + if f(rune) { + if start >= 0 { + spans = append(spans, span{start, end}) + // Set start to a negative value. + // Note: using -1 here consistently and reproducibly + // slows down this code by a several percent on amd64. + start = ^start + } + } else { + if start < 0 { + start = end + if n > 0 && len(spans)+1 >= n { + break + } + } + } + } + + // Last field might end at EOF. + if start >= 0 { + spans = append(spans, span{start, len(s)}) + } + + // Create strings from recorded field indices. + a := make([]string, len(spans)) + for i, span := range spans { + a[i] = s[span.start:span.end] + } + return a +} + +// FieldsByAnySpace split a string s by any space character into max n parts +func FieldsByAnySpace(s string, n int) []string { + return FieldsFunc(s, unicode.IsSpace, n) +} diff --git a/vendor/github.com/swaggo/swag/version.go b/vendor/github.com/swaggo/swag/version.go new file mode 100644 index 00000000..cf52320f --- /dev/null +++ b/vendor/github.com/swaggo/swag/version.go @@ -0,0 +1,4 @@ +package swag + +// Version of swag. +const Version = "v1.16.4" diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b27..81faec7e 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 7688c356..c7601c90 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 832414b4..b55547ae 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f5968f44..090d0e1b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,26 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +397,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +831,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1466,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1748,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2461,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3279,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index ccba391c..6ebc48b3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1799,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1924,6 +1931,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2970,6 +2978,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0c00cb3f..c0d45e32 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -297,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -335,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dfb36455..c731d24f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -298,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -336,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index d46dcf78..680018a4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -303,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -341,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3af3248a..a63909f3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -205,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -294,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -332,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 292bcf02..9b0a2573 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -290,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -328,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 782b7110..958e6e06 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 84973fd9..50c7f25b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 6d9cbc3b..ced21d66 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 5f9fedbc..226c0441 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index bb0026ee..3122737c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -351,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -389,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46120db5..eb5d3467 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5c951634..e921ebc6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 11a84d5a..38ba81c5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -287,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -325,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f78c4617..71f04009 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -359,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -397,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index aeb777c3..c44a3133 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -350,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -436,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d4..17c53bd9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941..2392226a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8daaf3fa..5537148d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2594,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3541,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3802,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3850,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4031,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4200,7 +4200,8 @@ type ( } PtpSysOffsetExtended struct { Samples uint32 - Rsv [3]uint32 + Clockid int32 + Rsv [2]uint32 Ts [25][3]PtpClockTime } PtpSysOffsetPrecise struct { @@ -4399,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 4510bfc3..4a325438 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 51311e20..9d138de5 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6f525288..01c0716c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -280,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1612,7 +1614,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1652,7 +1654,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1660,7 +1662,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1672,7 +1674,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1684,7 +1686,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -2446,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2462,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..2a7cf70d --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000..6e34df46 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,654 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000..a6b5ed0a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,490 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). +func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000..58934f76 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,486 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000..ca71e3e1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +// Deprecated: use [ast.Unparen]. +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go new file mode 100644 index 00000000..dfb8cd6c --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/allpackages.go @@ -0,0 +1,195 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildutil provides utilities related to the go/build +// package in the standard library. +// +// All I/O is done via the build.Context file system interface, which must +// be concurrency-safe. +package buildutil // import "golang.org/x/tools/go/buildutil" + +import ( + "go/build" + "os" + "path/filepath" + "sort" + "strings" + "sync" +) + +// AllPackages returns the package path of each Go package in any source +// directory of the specified build context (e.g. $GOROOT or an element +// of $GOPATH). Errors are ignored. The results are sorted. +// All package paths are canonical, and thus may contain "/vendor/". +// +// The result may include import paths for directories that contain no +// *.go files, such as "archive" (in $GOROOT/src). +// +// All I/O is done via the build.Context file system interface, +// which must be concurrency-safe. +func AllPackages(ctxt *build.Context) []string { + var list []string + ForEachPackage(ctxt, func(pkg string, _ error) { + list = append(list, pkg) + }) + sort.Strings(list) + return list +} + +// ForEachPackage calls the found function with the package path of +// each Go package it finds in any source directory of the specified +// build context (e.g. $GOROOT or an element of $GOPATH). +// All package paths are canonical, and thus may contain "/vendor/". +// +// If the package directory exists but could not be read, the second +// argument to the found function provides the error. +// +// All I/O is done via the build.Context file system interface, +// which must be concurrency-safe. +func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { + ch := make(chan item) + + var wg sync.WaitGroup + for _, root := range ctxt.SrcDirs() { + root := root + wg.Add(1) + go func() { + allPackages(ctxt, root, ch) + wg.Done() + }() + } + go func() { + wg.Wait() + close(ch) + }() + + // All calls to found occur in the caller's goroutine. + for i := range ch { + found(i.importPath, i.err) + } +} + +type item struct { + importPath string + err error // (optional) +} + +// We use a process-wide counting semaphore to limit +// the number of parallel calls to ReadDir. +var ioLimit = make(chan bool, 20) + +func allPackages(ctxt *build.Context, root string, ch chan<- item) { + root = filepath.Clean(root) + string(os.PathSeparator) + + var wg sync.WaitGroup + + var walkDir func(dir string) + walkDir = func(dir string) { + // Avoid .foo, _foo, and testdata directory trees. + base := filepath.Base(dir) + if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { + return + } + + pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) + + // Prune search if we encounter any of these import paths. + switch pkg { + case "builtin": + return + } + + ioLimit <- true + files, err := ReadDir(ctxt, dir) + <-ioLimit + if pkg != "" || err != nil { + ch <- item{pkg, err} + } + for _, fi := range files { + fi := fi + if fi.IsDir() { + wg.Add(1) + go func() { + walkDir(filepath.Join(dir, fi.Name())) + wg.Done() + }() + } + } + } + + walkDir(root) + wg.Wait() +} + +// ExpandPatterns returns the set of packages matched by patterns, +// which may have the following forms: +// +// golang.org/x/tools/cmd/guru # a single package +// golang.org/x/tools/... # all packages beneath dir +// ... # the entire workspace. +// +// Order is significant: a pattern preceded by '-' removes matching +// packages from the set. For example, these patterns match all encoding +// packages except encoding/xml: +// +// encoding/... -encoding/xml +// +// A trailing slash in a pattern is ignored. (Path components of Go +// package names are separated by slash, not the platform's path separator.) +func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { + // TODO(adonovan): support other features of 'go list': + // - "std"/"cmd"/"all" meta-packages + // - "..." not at the end of a pattern + // - relative patterns using "./" or "../" prefix + + pkgs := make(map[string]bool) + doPkg := func(pkg string, neg bool) { + if neg { + delete(pkgs, pkg) + } else { + pkgs[pkg] = true + } + } + + // Scan entire workspace if wildcards are present. + // TODO(adonovan): opt: scan only the necessary subtrees of the workspace. + var all []string + for _, arg := range patterns { + if strings.HasSuffix(arg, "...") { + all = AllPackages(ctxt) + break + } + } + + for _, arg := range patterns { + if arg == "" { + continue + } + + neg := arg[0] == '-' + if neg { + arg = arg[1:] + } + + if arg == "..." { + // ... matches all packages + for _, pkg := range all { + doPkg(pkg, neg) + } + } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { + // dir/... matches all packages beneath dir + for _, pkg := range all { + if strings.HasPrefix(pkg, dir) && + (len(pkg) == len(dir) || pkg[len(dir)] == '/') { + doPkg(pkg, neg) + } + } + } else { + // single package + doPkg(strings.TrimSuffix(arg, "/"), neg) + } + } + + return pkgs +} diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go new file mode 100644 index 00000000..763d1880 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -0,0 +1,111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "fmt" + "go/build" + "io" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" +) + +// FakeContext returns a build.Context for the fake file tree specified +// by pkgs, which maps package import paths to a mapping from file base +// names to contents. +// +// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides +// the necessary file access methods to read from memory instead of the +// real file system. +// +// Unlike a real file tree, the fake one has only two levels---packages +// and files---so ReadDir("/go/src/") returns all packages under +// /go/src/ including, for instance, "math" and "math/big". +// ReadDir("/go/src/math/big") would return all the files in the +// "math/big" package. +func FakeContext(pkgs map[string]map[string]string) *build.Context { + clean := func(filename string) string { + f := path.Clean(filepath.ToSlash(filename)) + // Removing "/go/src" while respecting segment + // boundaries has this unfortunate corner case: + if f == "/go/src" { + return "" + } + return strings.TrimPrefix(f, "/go/src/") + } + + ctxt := build.Default // copy + ctxt.GOROOT = "/go" + ctxt.GOPATH = "" + ctxt.Compiler = "gc" + ctxt.IsDir = func(dir string) bool { + dir = clean(dir) + if dir == "" { + return true // needed by (*build.Context).SrcDirs + } + return pkgs[dir] != nil + } + ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { + dir = clean(dir) + var fis []os.FileInfo + if dir == "" { + // enumerate packages + for importPath := range pkgs { + fis = append(fis, fakeDirInfo(importPath)) + } + } else { + // enumerate files of package + for basename := range pkgs[dir] { + fis = append(fis, fakeFileInfo(basename)) + } + } + sort.Sort(byName(fis)) + return fis, nil + } + ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { + filename = clean(filename) + dir, base := path.Split(filename) + content, ok := pkgs[path.Clean(dir)][base] + if !ok { + return nil, fmt.Errorf("file not found: %s", filename) + } + return io.NopCloser(strings.NewReader(content)), nil + } + ctxt.IsAbsPath = func(path string) bool { + path = filepath.ToSlash(path) + // Don't rely on the default (filepath.Path) since on + // Windows, it reports virtual paths as non-absolute. + return strings.HasPrefix(path, "/") + } + return &ctxt +} + +type byName []os.FileInfo + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } + +type fakeFileInfo string + +func (fi fakeFileInfo) Name() string { return string(fi) } +func (fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (fakeFileInfo) IsDir() bool { return false } +func (fakeFileInfo) Size() int64 { return 0 } +func (fakeFileInfo) Mode() os.FileMode { return 0644 } + +type fakeDirInfo string + +func (fd fakeDirInfo) Name() string { return string(fd) } +func (fakeDirInfo) Sys() interface{} { return nil } +func (fakeDirInfo) ModTime() time.Time { return time.Time{} } +func (fakeDirInfo) IsDir() bool { return true } +func (fakeDirInfo) Size() int64 { return 0 } +func (fakeDirInfo) Mode() os.FileMode { return 0755 } diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go new file mode 100644 index 00000000..7e371658 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/overlay.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io" + "path/filepath" + "strconv" + "strings" +) + +// OverlayContext overlays a build.Context with additional files from +// a map. Files in the map take precedence over other files. +// +// In addition to plain string comparison, two file names are +// considered equal if their base names match and their directory +// components point at the same directory on the file system. That is, +// symbolic links are followed for directories, but not files. +// +// A common use case for OverlayContext is to allow editors to pass in +// a set of unsaved, modified files. +// +// Currently, only the Context.OpenFile function will respect the +// overlay. This may change in the future. +func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { + // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir + + rc := func(data []byte) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(data)), nil + } + + copy := *orig // make a copy + ctxt := © + ctxt.OpenFile = func(path string) (io.ReadCloser, error) { + // Fast path: names match exactly. + if content, ok := overlay[path]; ok { + return rc(content) + } + + // Slow path: check for same file under a different + // alias, perhaps due to a symbolic link. + for filename, content := range overlay { + if sameFile(path, filename) { + return rc(content) + } + } + + return OpenFile(orig, path) + } + return ctxt +} + +// ParseOverlayArchive parses an archive containing Go files and their +// contents. The result is intended to be used with OverlayContext. +// +// # Archive format +// +// The archive consists of a series of files. Each file consists of a +// name, a decimal file size and the file contents, separated by +// newlines. No newline follows after the file contents. +func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { + overlay := make(map[string][]byte) + r := bufio.NewReader(archive) + for { + // Read file name. + filename, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break // OK + } + return nil, fmt.Errorf("reading archive file name: %v", err) + } + filename = filepath.Clean(strings.TrimSpace(filename)) + + // Read file size. + sz, err := r.ReadString('\n') + if err != nil { + return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) + } + sz = strings.TrimSpace(sz) + size, err := strconv.ParseUint(sz, 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) + } + + // Read file content. + content := make([]byte, size) + if _, err := io.ReadFull(r, content); err != nil { + return nil, fmt.Errorf("reading archive file %s: %v", filename, err) + } + overlay[filename] = content + } + + return overlay, nil +} diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go new file mode 100644 index 00000000..32c8d142 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +// This duplicated logic must be kept in sync with that from go build: +// $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) +// $GOROOT/src/cmd/go/internal/base/flag.go (StringsFlag.Set) +// $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + +import ( + "fmt" + "strings" +) + +const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + + "For more information about build tags, see the description of " + + "build constraints in the documentation for the go/build package" + +// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses +// a flag value the same as go build's -tags flag and populates a []string slice. +// +// See $GOROOT/src/go/build/doc.go for description of build tags. +// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. +// +// Example: +// +// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) +type TagsFlag []string + +func (v *TagsFlag) Set(s string) error { + // See $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) + // For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'". + if strings.Contains(s, " ") || strings.Contains(s, "'") { + var err error + *v, err = splitQuotedFields(s) + if *v == nil { + *v = []string{} + } + return err + } + + // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. + *v = []string{} + for _, s := range strings.Split(s, ",") { + if s != "" { + *v = append(*v, s) + } + } + return nil +} + +func (v *TagsFlag) Get() interface{} { return *v } + +func splitQuotedFields(s string) ([]string, error) { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) + // This must remain in sync with that logic. + var f []string + for len(s) > 0 { + for len(s) > 0 && isSpaceByte(s[0]) { + s = s[1:] + } + if len(s) == 0 { + break + } + // Accepted quoted string. No unescaping inside. + if s[0] == '"' || s[0] == '\'' { + quote := s[0] + s = s[1:] + i := 0 + for i < len(s) && s[i] != quote { + i++ + } + if i >= len(s) { + return nil, fmt.Errorf("unterminated %c string", quote) + } + f = append(f, s[:i]) + s = s[i+1:] + continue + } + i := 0 + for i < len(s) && !isSpaceByte(s[i]) { + i++ + } + f = append(f, s[:i]) + s = s[i:] + } + return f, nil +} + +func (v *TagsFlag) String() string { + return "" +} + +func isSpaceByte(c byte) bool { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + // This list must remain in sync with that. + return c == ' ' || c == '\t' || c == '\n' || c == '\r' +} diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go new file mode 100644 index 00000000..bee6390d --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/util.go @@ -0,0 +1,209 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +// ParseFile behaves like parser.ParseFile, +// but uses the build context's file system interface, if any. +// +// If file is not absolute (as defined by IsAbsPath), the (dir, file) +// components are joined using JoinPath; dir must be absolute. +// +// The displayPath function, if provided, is used to transform the +// filename that will be attached to the ASTs. +// +// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. +func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { + if !IsAbsPath(ctxt, file) { + file = JoinPath(ctxt, dir, file) + } + rd, err := OpenFile(ctxt, file) + if err != nil { + return nil, err + } + defer rd.Close() // ignore error + if displayPath != nil { + file = displayPath(file) + } + return parser.ParseFile(fset, file, rd, mode) +} + +// ContainingPackage returns the package containing filename. +// +// If filename is not absolute, it is interpreted relative to working directory dir. +// All I/O is via the build context's file system interface, if any. +// +// The '...Files []string' fields of the resulting build.Package are not +// populated (build.FindOnly mode). +func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { + if !IsAbsPath(ctxt, filename) { + filename = JoinPath(ctxt, dir, filename) + } + + // We must not assume the file tree uses + // "/" always, + // `\` always, + // or os.PathSeparator (which varies by platform), + // but to make any progress, we are forced to assume that + // paths will not use `\` unless the PathSeparator + // is also `\`, thus we can rely on filepath.ToSlash for some sanity. + + dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" + + // We assume that no source root (GOPATH[i] or GOROOT) contains any other. + for _, srcdir := range ctxt.SrcDirs() { + srcdirSlash := filepath.ToSlash(srcdir) + "/" + if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { + return ctxt.Import(importPath, dir, build.FindOnly) + } + } + + return nil, fmt.Errorf("can't find package containing %s", filename) +} + +// -- Effective methods of file system interface ------------------------- + +// (go/build.Context defines these as methods, but does not export them.) + +// HasSubdir calls ctxt.HasSubdir (if not nil) or else uses +// the local file system to answer the question. +func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { + if f := ctxt.HasSubdir; f != nil { + return f(root, dir) + } + + // Try using paths we received. + if rel, ok = hasSubdir(root, dir); ok { + return + } + + // Try expanding symlinks and comparing + // expanded against unexpanded and + // expanded against expanded. + rootSym, _ := filepath.EvalSymlinks(root) + dirSym, _ := filepath.EvalSymlinks(dir) + + if rel, ok = hasSubdir(rootSym, dir); ok { + return + } + if rel, ok = hasSubdir(root, dirSym); ok { + return + } + return hasSubdir(rootSym, dirSym) +} + +func hasSubdir(root, dir string) (rel string, ok bool) { + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + + return filepath.ToSlash(dir[len(root):]), true +} + +// FileExists returns true if the specified file exists, +// using the build context's file system interface. +func FileExists(ctxt *build.Context, path string) bool { + if ctxt.OpenFile != nil { + r, err := ctxt.OpenFile(path) + if err != nil { + return false + } + r.Close() // ignore error + return true + } + _, err := os.Stat(path) + return err == nil +} + +// OpenFile behaves like os.Open, +// but uses the build context's file system interface, if any. +func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { + if ctxt.OpenFile != nil { + return ctxt.OpenFile(path) + } + return os.Open(path) +} + +// IsAbsPath behaves like filepath.IsAbs, +// but uses the build context's file system interface, if any. +func IsAbsPath(ctxt *build.Context, path string) bool { + if ctxt.IsAbsPath != nil { + return ctxt.IsAbsPath(path) + } + return filepath.IsAbs(path) +} + +// JoinPath behaves like filepath.Join, +// but uses the build context's file system interface, if any. +func JoinPath(ctxt *build.Context, path ...string) string { + if ctxt.JoinPath != nil { + return ctxt.JoinPath(path...) + } + return filepath.Join(path...) +} + +// IsDir behaves like os.Stat plus IsDir, +// but uses the build context's file system interface, if any. +func IsDir(ctxt *build.Context, path string) bool { + if ctxt.IsDir != nil { + return ctxt.IsDir(path) + } + fi, err := os.Stat(path) + return err == nil && fi.IsDir() +} + +// ReadDir behaves like ioutil.ReadDir, +// but uses the build context's file system interface, if any. +func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { + if ctxt.ReadDir != nil { + return ctxt.ReadDir(path) + } + return ioutil.ReadDir(path) +} + +// SplitPathList behaves like filepath.SplitList, +// but uses the build context's file system interface, if any. +func SplitPathList(ctxt *build.Context, s string) []string { + if ctxt.SplitPathList != nil { + return ctxt.SplitPathList(s) + } + return filepath.SplitList(s) +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if path.Clean(x) == path.Clean(y) { + return true + } + if filepath.Base(x) == filepath.Base(y) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go new file mode 100644 index 00000000..697974bb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -0,0 +1,219 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cgo handles cgo preprocessing of files containing `import "C"`. +// +// DESIGN +// +// The approach taken is to run the cgo processor on the package's +// CgoFiles and parse the output, faking the filenames of the +// resulting ASTs so that the synthetic file containing the C types is +// called "C" (e.g. "~/go/src/net/C") and the preprocessed files +// have their original names (e.g. "~/go/src/net/cgo_unix.go"), +// not the names of the actual temporary files. +// +// The advantage of this approach is its fidelity to 'go build'. The +// downside is that the token.Position.Offset for each AST node is +// incorrect, being an offset within the temporary file. Line numbers +// should still be correct because of the //line comments. +// +// The logic of this file is mostly plundered from the 'go build' +// tool, which also invokes the cgo preprocessor. +// +// +// REJECTED ALTERNATIVE +// +// An alternative approach that we explored is to extend go/types' +// Importer mechanism to provide the identity of the importing package +// so that each time `import "C"` appears it resolves to a different +// synthetic package containing just the objects needed in that case. +// The loader would invoke cgo but parse only the cgo_types.go file +// defining the package-level objects, discarding the other files +// resulting from preprocessing. +// +// The benefit of this approach would have been that source-level +// syntax information would correspond exactly to the original cgo +// file, with no preprocessing involved, making source tools like +// godoc, guru, and eg happy. However, the approach was rejected +// due to the additional complexity it would impose on go/types. (It +// made for a beautiful demo, though.) +// +// cgo files, despite their *.go extension, are not legal Go source +// files per the specification since they may refer to unexported +// members of package "C" such as C.int. Also, a function such as +// C.getpwent has in effect two types, one matching its C type and one +// which additionally returns (errno C.int). The cgo preprocessor +// uses name mangling to distinguish these two functions in the +// processed code, but go/types would need to duplicate this logic in +// its handling of function calls, analogous to the treatment of map +// lookups in which y=m[k] and y,ok=m[k] are both legal. + +package cgo + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" +) + +// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses +// the output and returns the resulting ASTs. +func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { + tmpdir, err := os.MkdirTemp("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpdir) + + pkgdir := bp.Dir + if DisplayPath != nil { + pkgdir = DisplayPath(pkgdir) + } + + cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false) + if err != nil { + return nil, err + } + var files []*ast.File + for i := range cgoFiles { + rd, err := os.Open(cgoFiles[i]) + if err != nil { + return nil, err + } + display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) + f, err := parser.ParseFile(fset, display, rd, mode) + rd.Close() + if err != nil { + return nil, err + } + files = append(files, f) + } + return files, nil +} + +var cgoRe = regexp.MustCompile(`[/\\:]`) + +// Run invokes the cgo preprocessor on bp.CgoFiles and returns two +// lists of files: the resulting processed files (in temporary +// directory tmpdir) and the corresponding names of the unprocessed files. +// +// Run is adapted from (*builder).cgo in +// $GOROOT/src/cmd/go/build.go, but these features are unsupported: +// Objective C, CGOPKGPATH, CGO_FLAGS. +// +// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in +// to the cgo preprocessor. This in turn will set the // line comments +// referring to those files to use absolute paths. This is needed for +// go/packages using the legacy go list support so it is able to find +// the original files. +func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) { + cgoCPPFLAGS, _, _, _ := cflags(bp, true) + _, cgoexeCFLAGS, _, _ := cflags(bp, false) + + if len(bp.CgoPkgConfig) > 0 { + pcCFLAGS, err := pkgConfigFlags(bp) + if err != nil { + return nil, nil, err + } + cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) + } + + // Allows including _cgo_export.h from .[ch] files in the package. + cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) + + // _cgo_gotypes.go (displayed "C") contains the type definitions. + files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) + displayFiles = append(displayFiles, "C") + for _, fn := range bp.CgoFiles { + // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. + f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") + files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) + displayFiles = append(displayFiles, fn) + } + + var cgoflags []string + if bp.Goroot && bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_runtime_cgo=false") + } + if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_syscall=false") + } + + var cgoFiles []string = bp.CgoFiles + if useabs { + cgoFiles = make([]string, len(bp.CgoFiles)) + for i := range cgoFiles { + cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i]) + } + } + + args := stringList( + "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", + cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles, + ) + if false { + log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) + } + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = pkgdir + cmd.Env = append(os.Environ(), "PWD="+pkgdir) + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) + } + + return files, displayFiles, nil +} + +// -- unmodified from 'go build' --------------------------------------- + +// Return the flags to use when invoking the C or C++ compilers, or cgo. +func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { + var defaults string + if def { + defaults = "-g -O2" + } + + cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) + cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) + cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) + ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) + return +} + +// envList returns the value of the given environment variable broken +// into fields, using the default value when the variable is empty. +func envList(key, def string) []string { + v := os.Getenv(key) + if v == "" { + v = def + } + return strings.Fields(v) +} + +// stringList's arguments should be a sequence of string or []string values. +// stringList flattens them into a single []string. +func stringList(args ...interface{}) []string { + var x []string + for _, arg := range args { + switch arg := arg.(type) { + case []string: + x = append(x, arg...) + case string: + x = append(x, arg) + default: + panic("stringList: invalid argument") + } + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go new file mode 100644 index 00000000..2455be54 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -0,0 +1,42 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgo + +import ( + "errors" + "fmt" + "go/build" + "os/exec" + "strings" +) + +// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. +func pkgConfig(mode string, pkgs []string) (flags []string, err error) { + cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) + out, err := cmd.Output() + if err != nil { + s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) + if len(out) > 0 { + s = fmt.Sprintf("%s: %s", s, out) + } + if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 { + s = fmt.Sprintf("%s\nstderr:\n%s", s, err.Stderr) + } + return nil, errors.New(s) + } + if len(out) > 0 { + flags = strings.Fields(string(out)) + } + return +} + +// pkgConfigFlags calls pkg-config if needed and returns the cflags +// needed to build the package. +func pkgConfigFlags(p *build.Package) (cflags []string, err error) { + if len(p.CgoPkgConfig) == 0 { + return nil, nil + } + return pkgConfig("--cflags", p.CgoPkgConfig) +} diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go new file mode 100644 index 00000000..e35b1fd7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/doc.go @@ -0,0 +1,202 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loader loads a complete Go program from source code, parsing +// and type-checking the initial packages plus their transitive closure +// of dependencies. The ASTs and the derived facts are retained for +// later use. +// +// Deprecated: This is an older API and does not have support +// for modules. Use golang.org/x/tools/go/packages instead. +// +// The package defines two primary types: Config, which specifies a +// set of initial packages to load and various other options; and +// Program, which is the result of successfully loading the packages +// specified by a configuration. +// +// The configuration can be set directly, but *Config provides various +// convenience methods to simplify the common cases, each of which can +// be called any number of times. Finally, these are followed by a +// call to Load() to actually load and type-check the program. +// +// var conf loader.Config +// +// // Use the command-line arguments to specify +// // a set of initial packages to load from source. +// // See FromArgsUsage for help. +// rest, err := conf.FromArgs(os.Args[1:], wantTests) +// +// // Parse the specified files and create an ad hoc package with path "foo". +// // All files must have the same 'package' declaration. +// conf.CreateFromFilenames("foo", "foo.go", "bar.go") +// +// // Create an ad hoc package with path "foo" from +// // the specified already-parsed files. +// // All ASTs must have the same 'package' declaration. +// conf.CreateFromFiles("foo", parsedFiles) +// +// // Add "runtime" to the set of packages to be loaded. +// conf.Import("runtime") +// +// // Adds "fmt" and "fmt_test" to the set of packages +// // to be loaded. "fmt" will include *_test.go files. +// conf.ImportWithTests("fmt") +// +// // Finally, load all the packages specified by the configuration. +// prog, err := conf.Load() +// +// See examples_test.go for examples of API usage. +// +// # CONCEPTS AND TERMINOLOGY +// +// The WORKSPACE is the set of packages accessible to the loader. The +// workspace is defined by Config.Build, a *build.Context. The +// default context treats subdirectories of $GOROOT and $GOPATH as +// packages, but this behavior may be overridden. +// +// An AD HOC package is one specified as a set of source files on the +// command line. In the simplest case, it may consist of a single file +// such as $GOROOT/src/net/http/triv.go. +// +// EXTERNAL TEST packages are those comprised of a set of *_test.go +// files all with the same 'package foo_test' declaration, all in the +// same directory. (go/build.Package calls these files XTestFiles.) +// +// An IMPORTABLE package is one that can be referred to by some import +// spec. Every importable package is uniquely identified by its +// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json", +// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path +// typically denotes a subdirectory of the workspace. +// +// An import declaration uses an IMPORT PATH to refer to a package. +// Most import declarations use the package path as the import path. +// +// Due to VENDORING (https://golang.org/s/go15vendor), the +// interpretation of an import path may depend on the directory in which +// it appears. To resolve an import path to a package path, go/build +// must search the enclosing directories for a subdirectory named +// "vendor". +// +// ad hoc packages and external test packages are NON-IMPORTABLE. The +// path of an ad hoc package is inferred from the package +// declarations of its files and is therefore not a unique package key. +// For example, Config.CreatePkgs may specify two initial ad hoc +// packages, both with path "main". +// +// An AUGMENTED package is an importable package P plus all the +// *_test.go files with same 'package foo' declaration as P. +// (go/build.Package calls these files TestFiles.) +// +// The INITIAL packages are those specified in the configuration. A +// DEPENDENCY is a package loaded to satisfy an import in an initial +// package or another dependency. +package loader + +// IMPLEMENTATION NOTES +// +// 'go test', in-package test files, and import cycles +// --------------------------------------------------- +// +// An external test package may depend upon members of the augmented +// package that are not in the unaugmented package, such as functions +// that expose internals. (See bufio/export_test.go for an example.) +// So, the loader must ensure that for each external test package +// it loads, it also augments the corresponding non-test package. +// +// The import graph over n unaugmented packages must be acyclic; the +// import graph over n-1 unaugmented packages plus one augmented +// package must also be acyclic. ('go test' relies on this.) But the +// import graph over n augmented packages may contain cycles. +// +// First, all the (unaugmented) non-test packages and their +// dependencies are imported in the usual way; the loader reports an +// error if it detects an import cycle. +// +// Then, each package P for which testing is desired is augmented by +// the list P' of its in-package test files, by calling +// (*types.Checker).Files. This arrangement ensures that P' may +// reference definitions within P, but P may not reference definitions +// within P'. Furthermore, P' may import any other package, including +// ones that depend upon P, without an import cycle error. +// +// Consider two packages A and B, both of which have lists of +// in-package test files we'll call A' and B', and which have the +// following import graph edges: +// B imports A +// B' imports A +// A' imports B +// This last edge would be expected to create an error were it not +// for the special type-checking discipline above. +// Cycles of size greater than two are possible. For example: +// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil" +// io/ioutil/tempfile_test.go (package ioutil) imports "regexp" +// regexp/exec_test.go (package regexp) imports "compress/bzip2" +// +// +// Concurrency +// ----------- +// +// Let us define the import dependency graph as follows. Each node is a +// list of files passed to (Checker).Files at once. Many of these lists +// are the production code of an importable Go package, so those nodes +// are labelled by the package's path. The remaining nodes are +// ad hoc packages and lists of in-package *_test.go files that augment +// an importable package; those nodes have no label. +// +// The edges of the graph represent import statements appearing within a +// file. An edge connects a node (a list of files) to the node it +// imports, which is importable and thus always labelled. +// +// Loading is controlled by this dependency graph. +// +// To reduce I/O latency, we start loading a package's dependencies +// asynchronously as soon as we've parsed its files and enumerated its +// imports (scanImports). This performs a preorder traversal of the +// import dependency graph. +// +// To exploit hardware parallelism, we type-check unrelated packages in +// parallel, where "unrelated" means not ordered by the partial order of +// the import dependency graph. +// +// We use a concurrency-safe non-blocking cache (importer.imported) to +// record the results of type-checking, whether success or failure. An +// entry is created in this cache by startLoad the first time the +// package is imported. The first goroutine to request an entry becomes +// responsible for completing the task and broadcasting completion to +// subsequent requestors, which block until then. +// +// Type checking occurs in (parallel) postorder: we cannot type-check a +// set of files until we have loaded and type-checked all of their +// immediate dependencies (and thus all of their transitive +// dependencies). If the input were guaranteed free of import cycles, +// this would be trivial: we could simply wait for completion of the +// dependencies and then invoke the typechecker. +// +// But as we saw in the 'go test' section above, some cycles in the +// import graph over packages are actually legal, so long as the +// cycle-forming edge originates in the in-package test files that +// augment the package. This explains why the nodes of the import +// dependency graph are not packages, but lists of files: the unlabelled +// nodes avoid the cycles. Consider packages A and B where B imports A +// and A's in-package tests AT import B. The naively constructed import +// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but +// the graph over lists of files is AT --> B --> A, where AT is an +// unlabelled node. +// +// Awaiting completion of the dependencies in a cyclic graph would +// deadlock, so we must materialize the import dependency graph (as +// importer.graph) and check whether each import edge forms a cycle. If +// x imports y, and the graph already contains a path from y to x, then +// there is an import cycle, in which case the processing of x must not +// wait for the completion of processing of y. +// +// When the type-checker makes a callback (doImport) to the loader for a +// given import edge, there are two possible cases. In the normal case, +// the dependency has already been completely type-checked; doImport +// does a cache lookup and returns it. In the cyclic case, the entry in +// the cache is still necessarily incomplete, indicating a cycle. We +// perform the cycle check again to obtain the error message, and return +// the error. +// +// The result of using concurrency is about a 2.5x speedup for stdlib_test. diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go new file mode 100644 index 00000000..2d4865f6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -0,0 +1,1064 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +// See doc.go for package documentation and implementation notes. + +import ( + "errors" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/internal/cgo" +) + +var ignoreVendor build.ImportMode + +const trace = false // show timing info for type-checking + +// Config specifies the configuration for loading a whole program from +// Go source code. +// The zero value for Config is a ready-to-use default configuration. +type Config struct { + // Fset is the file set for the parser to use when loading the + // program. If nil, it may be lazily initialized by any + // method of Config. + Fset *token.FileSet + + // ParserMode specifies the mode to be used by the parser when + // loading source packages. + ParserMode parser.Mode + + // TypeChecker contains options relating to the type checker. + // + // The supplied IgnoreFuncBodies is not used; the effective + // value comes from the TypeCheckFuncBodies func below. + // The supplied Import function is not used either. + TypeChecker types.Config + + // TypeCheckFuncBodies is a predicate over package paths. + // A package for which the predicate is false will + // have its package-level declarations type checked, but not + // its function bodies; this can be used to quickly load + // dependencies from source. If nil, all func bodies are type + // checked. + TypeCheckFuncBodies func(path string) bool + + // If Build is non-nil, it is used to locate source packages. + // Otherwise &build.Default is used. + // + // By default, cgo is invoked to preprocess Go files that + // import the fake package "C". This behaviour can be + // disabled by setting CGO_ENABLED=0 in the environment prior + // to startup, or by setting Build.CgoEnabled=false. + Build *build.Context + + // The current directory, used for resolving relative package + // references such as "./go/loader". If empty, os.Getwd will be + // used instead. + Cwd string + + // If DisplayPath is non-nil, it is used to transform each + // file name obtained from Build.Import(). This can be used + // to prevent a virtualized build.Config's file names from + // leaking into the user interface. + DisplayPath func(path string) string + + // If AllowErrors is true, Load will return a Program even + // if some of the its packages contained I/O, parser or type + // errors; such errors are accessible via PackageInfo.Errors. If + // false, Load will fail if any package had an error. + AllowErrors bool + + // CreatePkgs specifies a list of non-importable initial + // packages to create. The resulting packages will appear in + // the corresponding elements of the Program.Created slice. + CreatePkgs []PkgSpec + + // ImportPkgs specifies a set of initial packages to load. + // The map keys are package paths. + // + // The map value indicates whether to load tests. If true, Load + // will add and type-check two lists of files to the package: + // non-test files followed by in-package *_test.go files. In + // addition, it will append the external test package (if any) + // to Program.Created. + ImportPkgs map[string]bool + + // FindPackage is called during Load to create the build.Package + // for a given import path from a given directory. + // If FindPackage is nil, (*build.Context).Import is used. + // A client may use this hook to adapt to a proprietary build + // system that does not follow the "go build" layout + // conventions, for example. + // + // It must be safe to call concurrently from multiple goroutines. + FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) + + // AfterTypeCheck is called immediately after a list of files + // has been type-checked and appended to info.Files. + // + // This optional hook function is the earliest opportunity for + // the client to observe the output of the type checker, + // which may be useful to reduce analysis latency when loading + // a large program. + // + // The function is permitted to modify info.Info, for instance + // to clear data structures that are no longer needed, which can + // dramatically reduce peak memory consumption. + // + // The function may be called twice for the same PackageInfo: + // once for the files of the package and again for the + // in-package test files. + // + // It must be safe to call concurrently from multiple goroutines. + AfterTypeCheck func(info *PackageInfo, files []*ast.File) +} + +// A PkgSpec specifies a non-importable package to be created by Load. +// Files are processed first, but typically only one of Files and +// Filenames is provided. The path needn't be globally unique. +// +// For vendoring purposes, the package's directory is the one that +// contains the first file. +type PkgSpec struct { + Path string // package path ("" => use package declaration) + Files []*ast.File // ASTs of already-parsed files + Filenames []string // names of files to be parsed +} + +// A Program is a Go program loaded from source as specified by a Config. +type Program struct { + Fset *token.FileSet // the file set for this program + + // Created[i] contains the initial package whose ASTs or + // filenames were supplied by Config.CreatePkgs[i], followed by + // the external test package, if any, of each package in + // Config.ImportPkgs ordered by ImportPath. + // + // NOTE: these files must not import "C". Cgo preprocessing is + // only performed on imported packages, not ad hoc packages. + // + // TODO(adonovan): we need to copy and adapt the logic of + // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make + // Config.Import and Config.Create methods return the same kind + // of entity, essentially a build.Package. + // Perhaps we can even reuse that type directly. + Created []*PackageInfo + + // Imported contains the initially imported packages, + // as specified by Config.ImportPkgs. + Imported map[string]*PackageInfo + + // AllPackages contains the PackageInfo of every package + // encountered by Load: all initial packages and all + // dependencies, including incomplete ones. + AllPackages map[*types.Package]*PackageInfo + + // importMap is the canonical mapping of package paths to + // packages. It contains all Imported initial packages, but not + // Created ones, and all imported dependencies. + importMap map[string]*types.Package +} + +// PackageInfo holds the ASTs and facts derived by the type-checker +// for a single package. +// +// Not mutated once exposed via the API. +type PackageInfo struct { + Pkg *types.Package + Importable bool // true if 'import "Pkg.Path()"' would resolve to this + TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors + Files []*ast.File // syntax trees for the package's files + Errors []error // non-nil if the package had errors + types.Info // type-checker deductions. + dir string // package directory + + checker *types.Checker // transient type-checker state + errorFunc func(error) +} + +func (info *PackageInfo) String() string { return info.Pkg.Path() } + +func (info *PackageInfo) appendError(err error) { + if info.errorFunc != nil { + info.errorFunc(err) + } else { + fmt.Fprintln(os.Stderr, err) + } + info.Errors = append(info.Errors, err) +} + +func (conf *Config) fset() *token.FileSet { + if conf.Fset == nil { + conf.Fset = token.NewFileSet() + } + return conf.Fset +} + +// ParseFile is a convenience function (intended for testing) that invokes +// the parser using the Config's FileSet, which is initialized if nil. +// +// src specifies the parser input as a string, []byte, or io.Reader, and +// filename is its apparent name. If src is nil, the contents of +// filename are read from the file system. +func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { + // TODO(adonovan): use conf.build() etc like parseFiles does. + return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) +} + +// FromArgsUsage is a partial usage message that applications calling +// FromArgs may wish to include in their -help output. +const FromArgsUsage = ` + is a list of arguments denoting a set of initial packages. +It may take one of two forms: + +1. A list of *.go source files. + + All of the specified files are loaded, parsed and type-checked + as a single package. All the files must belong to the same directory. + +2. A list of import paths, each denoting a package. + + The package's directory is found relative to the $GOROOT and + $GOPATH using similar logic to 'go build', and the *.go files in + that directory are loaded, parsed and type-checked as a single + package. + + In addition, all *_test.go files in the directory are then loaded + and parsed. Those files whose package declaration equals that of + the non-*_test.go files are included in the primary package. Test + files whose package declaration ends with "_test" are type-checked + as another package, the 'external' test package, so that a single + import path may denote two packages. (Whether this behaviour is + enabled is tool-specific, and may depend on additional flags.) + +A '--' argument terminates the list of packages. +` + +// FromArgs interprets args as a set of initial packages to load from +// source and updates the configuration. It returns the list of +// unconsumed arguments. +// +// It is intended for use in command-line interfaces that require a +// set of initial packages to be specified; see FromArgsUsage message +// for details. +// +// Only superficial errors are reported at this stage; errors dependent +// on I/O are detected during Load. +func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) { + var rest []string + for i, arg := range args { + if arg == "--" { + rest = args[i+1:] + args = args[:i] + break // consume "--" and return the remaining args + } + } + + if len(args) > 0 && strings.HasSuffix(args[0], ".go") { + // Assume args is a list of a *.go files + // denoting a single ad hoc package. + for _, arg := range args { + if !strings.HasSuffix(arg, ".go") { + return nil, fmt.Errorf("named files must be .go files: %s", arg) + } + } + conf.CreateFromFilenames("", args...) + } else { + // Assume args are directories each denoting a + // package and (perhaps) an external test, iff xtest. + for _, arg := range args { + if xtest { + conf.ImportWithTests(arg) + } else { + conf.Import(arg) + } + } + } + + return rest, nil +} + +// CreateFromFilenames is a convenience function that adds +// a conf.CreatePkgs entry to create a package of the specified *.go +// files. +func (conf *Config) CreateFromFilenames(path string, filenames ...string) { + conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames}) +} + +// CreateFromFiles is a convenience function that adds a conf.CreatePkgs +// entry to create package of the specified path and parsed files. +func (conf *Config) CreateFromFiles(path string, files ...*ast.File) { + conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files}) +} + +// ImportWithTests is a convenience function that adds path to +// ImportPkgs, the set of initial source packages located relative to +// $GOPATH. The package will be augmented by any *_test.go files in +// its directory that contain a "package x" (not "package x_test") +// declaration. +// +// In addition, if any *_test.go files contain a "package x_test" +// declaration, an additional package comprising just those files will +// be added to CreatePkgs. +func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) } + +// Import is a convenience function that adds path to ImportPkgs, the +// set of initial packages that will be imported from source. +func (conf *Config) Import(path string) { conf.addImport(path, false) } + +func (conf *Config) addImport(path string, tests bool) { + if path == "C" { + return // ignore; not a real package + } + if conf.ImportPkgs == nil { + conf.ImportPkgs = make(map[string]bool) + } + conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests +} + +// PathEnclosingInterval returns the PackageInfo and ast.Node that +// contain source interval [start, end), and all the node's ancestors +// up to the AST root. It searches all ast.Files of all packages in prog. +// exact is defined as for astutil.PathEnclosingInterval. +// +// The zero value is returned if not found. +func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { + for _, info := range prog.AllPackages { + for _, f := range info.Files { + if f.FileStart == token.NoPos { + // Workaround for #70162 (undefined FileStart). + // TODO(adonovan): delete once go1.24 is assured. + continue + } + if !tokenFileContainsPos(prog.Fset.File(f.FileStart), start) { + continue + } + if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { + return info, path, exact + } + } + } + return nil, nil, false +} + +// InitialPackages returns a new slice containing the set of initial +// packages (Created + Imported) in unspecified order. +func (prog *Program) InitialPackages() []*PackageInfo { + infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported)) + infos = append(infos, prog.Created...) + for _, info := range prog.Imported { + infos = append(infos, info) + } + return infos +} + +// Package returns the ASTs and results of type checking for the +// specified package. +func (prog *Program) Package(path string) *PackageInfo { + if info, ok := prog.AllPackages[prog.importMap[path]]; ok { + return info + } + for _, info := range prog.Created { + if path == info.Pkg.Path() { + return info + } + } + return nil +} + +// ---------- Implementation ---------- + +// importer holds the working state of the algorithm. +type importer struct { + conf *Config // the client configuration + start time.Time // for logging + + progMu sync.Mutex // guards prog + prog *Program // the resulting program + + // findpkg is a memoization of FindPackage. + findpkgMu sync.Mutex // guards findpkg + findpkg map[findpkgKey]*findpkgValue + + importedMu sync.Mutex // guards imported + imported map[string]*importInfo // all imported packages (incl. failures) by import path + + // import dependency graph: graph[x][y] => x imports y + // + // Since non-importable packages cannot be cyclic, we ignore + // their imports, thus we only need the subgraph over importable + // packages. Nodes are identified by their import paths. + graphMu sync.Mutex + graph map[string]map[string]bool +} + +type findpkgKey struct { + importPath string + fromDir string + mode build.ImportMode +} + +type findpkgValue struct { + ready chan struct{} // closed to broadcast readiness + bp *build.Package + err error +} + +// importInfo tracks the success or failure of a single import. +// +// Upon completion, exactly one of info and err is non-nil: +// info on successful creation of a package, err otherwise. +// A successful package may still contain type errors. +type importInfo struct { + path string // import path + info *PackageInfo // results of typechecking (including errors) + complete chan struct{} // closed to broadcast that info is set. +} + +// awaitCompletion blocks until ii is complete, +// i.e. the info field is safe to inspect. +func (ii *importInfo) awaitCompletion() { + <-ii.complete // wait for close +} + +// Complete marks ii as complete. +// Its info and err fields will not be subsequently updated. +func (ii *importInfo) Complete(info *PackageInfo) { + if info == nil { + panic("info == nil") + } + ii.info = info + close(ii.complete) +} + +type importError struct { + path string // import path + err error // reason for failure to create a package +} + +// Load creates the initial packages specified by conf.{Create,Import}Pkgs, +// loading their dependencies packages as needed. +// +// On success, Load returns a Program containing a PackageInfo for +// each package. On failure, it returns an error. +// +// If AllowErrors is true, Load will return a Program even if some +// packages contained I/O, parser or type errors, or if dependencies +// were missing. (Such errors are accessible via PackageInfo.Errors. If +// false, Load will fail if any package had an error. +// +// It is an error if no packages were loaded. +func (conf *Config) Load() (*Program, error) { + // Create a simple default error handler for parse/type errors. + if conf.TypeChecker.Error == nil { + conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) } + } + + // Set default working directory for relative package references. + if conf.Cwd == "" { + var err error + conf.Cwd, err = os.Getwd() + if err != nil { + return nil, err + } + } + + // Install default FindPackage hook using go/build logic. + if conf.FindPackage == nil { + conf.FindPackage = (*build.Context).Import + } + + prog := &Program{ + Fset: conf.fset(), + Imported: make(map[string]*PackageInfo), + importMap: make(map[string]*types.Package), + AllPackages: make(map[*types.Package]*PackageInfo), + } + + imp := importer{ + conf: conf, + prog: prog, + findpkg: make(map[findpkgKey]*findpkgValue), + imported: make(map[string]*importInfo), + start: time.Now(), + graph: make(map[string]map[string]bool), + } + + // -- loading proper (concurrent phase) -------------------------------- + + var errpkgs []string // packages that contained errors + + // Load the initially imported packages and their dependencies, + // in parallel. + // No vendor check on packages imported from the command line. + infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor) + for _, ie := range importErrors { + conf.TypeChecker.Error(ie.err) // failed to create package + errpkgs = append(errpkgs, ie.path) + } + for _, info := range infos { + prog.Imported[info.Pkg.Path()] = info + } + + // Augment the designated initial packages by their tests. + // Dependencies are loaded in parallel. + var xtestPkgs []*build.Package + for importPath, augment := range conf.ImportPkgs { + if !augment { + continue + } + + // No vendor check on packages imported from command line. + bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor) + if err != nil { + // Package not found, or can't even parse package declaration. + // Already reported by previous loop; ignore it. + continue + } + + // Needs external test package? + if len(bp.XTestGoFiles) > 0 { + xtestPkgs = append(xtestPkgs, bp) + } + + // Consult the cache using the canonical package path. + path := bp.ImportPath + imp.importedMu.Lock() // (unnecessary, we're sequential here) + ii, ok := imp.imported[path] + // Paranoid checks added due to issue #11012. + if !ok { + // Unreachable. + // The previous loop called importAll and thus + // startLoad for each path in ImportPkgs, which + // populates imp.imported[path] with a non-zero value. + panic(fmt.Sprintf("imported[%q] not found", path)) + } + if ii == nil { + // Unreachable. + // The ii values in this loop are the same as in + // the previous loop, which enforced the invariant + // that at least one of ii.err and ii.info is non-nil. + panic(fmt.Sprintf("imported[%q] == nil", path)) + } + if ii.info == nil { + // Unreachable. + // awaitCompletion has the postcondition + // ii.info != nil. + panic(fmt.Sprintf("imported[%q].info = nil", path)) + } + info := ii.info + imp.importedMu.Unlock() + + // Parse the in-package test files. + files, errs := imp.conf.parsePackageFiles(bp, 't') + for _, err := range errs { + info.appendError(err) + } + + // The test files augmenting package P cannot be imported, + // but may import packages that import P, + // so we must disable the cycle check. + imp.addFiles(info, files, false) + } + + createPkg := func(path, dir string, files []*ast.File, errs []error) { + info := imp.newPackageInfo(path, dir) + for _, err := range errs { + info.appendError(err) + } + + // Ad hoc packages are non-importable, + // so no cycle check is needed. + // addFiles loads dependencies in parallel. + imp.addFiles(info, files, false) + prog.Created = append(prog.Created, info) + } + + // Create packages specified by conf.CreatePkgs. + for _, cp := range conf.CreatePkgs { + files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode) + files = append(files, cp.Files...) + + path := cp.Path + if path == "" { + if len(files) > 0 { + path = files[0].Name.Name + } else { + path = "(unnamed)" + } + } + + dir := conf.Cwd + if len(files) > 0 && files[0].Pos().IsValid() { + dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name()) + } + createPkg(path, dir, files, errs) + } + + // Create external test packages. + sort.Sort(byImportPath(xtestPkgs)) + for _, bp := range xtestPkgs { + files, errs := imp.conf.parsePackageFiles(bp, 'x') + createPkg(bp.ImportPath+"_test", bp.Dir, files, errs) + } + + // -- finishing up (sequential) ---------------------------------------- + + if len(prog.Imported)+len(prog.Created) == 0 { + return nil, errors.New("no initial packages were loaded") + } + + // Create infos for indirectly imported packages. + // e.g. incomplete packages without syntax, loaded from export data. + for _, obj := range prog.importMap { + info := prog.AllPackages[obj] + if info == nil { + prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true} + } else { + // finished + info.checker = nil + info.errorFunc = nil + } + } + + if !conf.AllowErrors { + // Report errors in indirectly imported packages. + for _, info := range prog.AllPackages { + if len(info.Errors) > 0 { + errpkgs = append(errpkgs, info.Pkg.Path()) + } + } + if errpkgs != nil { + var more string + if len(errpkgs) > 3 { + more = fmt.Sprintf(" and %d more", len(errpkgs)-3) + errpkgs = errpkgs[:3] + } + return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", + strings.Join(errpkgs, ", "), more) + } + } + + markErrorFreePackages(prog.AllPackages) + + return prog, nil +} + +type byImportPath []*build.Package + +func (b byImportPath) Len() int { return len(b) } +func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath } +func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// markErrorFreePackages sets the TransitivelyErrorFree flag on all +// applicable packages. +func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) { + // Build the transpose of the import graph. + importedBy := make(map[*types.Package]map[*types.Package]bool) + for P := range allPackages { + for _, Q := range P.Imports() { + clients, ok := importedBy[Q] + if !ok { + clients = make(map[*types.Package]bool) + importedBy[Q] = clients + } + clients[P] = true + } + } + + // Find all packages reachable from some error package. + reachable := make(map[*types.Package]bool) + var visit func(*types.Package) + visit = func(p *types.Package) { + if !reachable[p] { + reachable[p] = true + for q := range importedBy[p] { + visit(q) + } + } + } + for _, info := range allPackages { + if len(info.Errors) > 0 { + visit(info.Pkg) + } + } + + // Mark the others as "transitively error-free". + for _, info := range allPackages { + if !reachable[info.Pkg] { + info.TransitivelyErrorFree = true + } + } +} + +// build returns the effective build context. +func (conf *Config) build() *build.Context { + if conf.Build != nil { + return conf.Build + } + return &build.Default +} + +// parsePackageFiles enumerates the files belonging to package path, +// then loads, parses and returns them, plus a list of I/O or parse +// errors that were encountered. +// +// 'which' indicates which files to include: +// +// 'g': include non-test *.go source files (GoFiles + processed CgoFiles) +// 't': include in-package *_test.go source files (TestGoFiles) +// 'x': include external *_test.go source files. (XTestGoFiles) +func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) { + if bp.ImportPath == "unsafe" { + return nil, nil + } + var filenames []string + switch which { + case 'g': + filenames = bp.GoFiles + case 't': + filenames = bp.TestGoFiles + case 'x': + filenames = bp.XTestGoFiles + default: + panic(which) + } + + files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode) + + // Preprocess CgoFiles and parse the outputs (sequentially). + if which == 'g' && bp.CgoFiles != nil { + cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode) + if err != nil { + errs = append(errs, err) + } else { + files = append(files, cgofiles...) + } + } + + return files, errs +} + +// doImport imports the package denoted by path. +// It implements the types.Importer signature. +// +// It returns an error if a package could not be created +// (e.g. go/build or parse error), but type errors are reported via +// the types.Config.Error callback (the first of which is also saved +// in the package's PackageInfo). +// +// Idempotent. +func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) { + if to == "C" { + // This should be unreachable, but ad hoc packages are + // not currently subject to cgo preprocessing. + // See https://golang.org/issue/11627. + return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`, + from.Pkg.Path()) + } + + bp, err := imp.findPackage(to, from.dir, 0) + if err != nil { + return nil, err + } + + // The standard unsafe package is handled specially, + // and has no PackageInfo. + if bp.ImportPath == "unsafe" { + return types.Unsafe, nil + } + + // Look for the package in the cache using its canonical path. + path := bp.ImportPath + imp.importedMu.Lock() + ii := imp.imported[path] + imp.importedMu.Unlock() + if ii == nil { + panic("internal error: unexpected import: " + path) + } + if ii.info != nil { + return ii.info.Pkg, nil + } + + // Import of incomplete package: this indicates a cycle. + fromPath := from.Pkg.Path() + if cycle := imp.findPath(path, fromPath); cycle != nil { + // Normalize cycle: start from alphabetically largest node. + pos, start := -1, "" + for i, s := range cycle { + if pos < 0 || s > start { + pos, start = i, s + } + } + cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest + cycle = append(cycle, cycle[0]) // add start node to end to show cycliness + return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> ")) + } + + panic("internal error: import of incomplete (yet acyclic) package: " + fromPath) +} + +// findPackage locates the package denoted by the importPath in the +// specified directory. +func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) { + // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7) + // to avoid holding the lock around FindPackage. + key := findpkgKey{importPath, fromDir, mode} + imp.findpkgMu.Lock() + v, ok := imp.findpkg[key] + if ok { + // cache hit + imp.findpkgMu.Unlock() + + <-v.ready // wait for entry to become ready + } else { + // Cache miss: this goroutine becomes responsible for + // populating the map entry and broadcasting its readiness. + v = &findpkgValue{ready: make(chan struct{})} + imp.findpkg[key] = v + imp.findpkgMu.Unlock() + + ioLimit <- true + v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode) + <-ioLimit + + if _, ok := v.err.(*build.NoGoError); ok { + v.err = nil // empty directory is not an error + } + + close(v.ready) // broadcast ready condition + } + return v.bp, v.err +} + +// importAll loads, parses, and type-checks the specified packages in +// parallel and returns their completed importInfos in unspecified order. +// +// fromPath is the package path of the importing package, if it is +// importable, "" otherwise. It is used for cycle detection. +// +// fromDir is the directory containing the import declaration that +// caused these imports. +func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) { + if fromPath != "" { + // We're loading a set of imports. + // + // We must record graph edges from the importing package + // to its dependencies, and check for cycles. + imp.graphMu.Lock() + deps, ok := imp.graph[fromPath] + if !ok { + deps = make(map[string]bool) + imp.graph[fromPath] = deps + } + for importPath := range imports { + deps[importPath] = true + } + imp.graphMu.Unlock() + } + + var pending []*importInfo + for importPath := range imports { + if fromPath != "" { + if cycle := imp.findPath(importPath, fromPath); cycle != nil { + // Cycle-forming import: we must not check it + // since it would deadlock. + if trace { + fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle) + } + continue + } + } + bp, err := imp.findPackage(importPath, fromDir, mode) + if err != nil { + errors = append(errors, importError{ + path: importPath, + err: err, + }) + continue + } + pending = append(pending, imp.startLoad(bp)) + } + + for _, ii := range pending { + ii.awaitCompletion() + infos = append(infos, ii.info) + } + + return infos, errors +} + +// findPath returns an arbitrary path from 'from' to 'to' in the import +// graph, or nil if there was none. +func (imp *importer) findPath(from, to string) []string { + imp.graphMu.Lock() + defer imp.graphMu.Unlock() + + seen := make(map[string]bool) + var search func(stack []string, importPath string) []string + search = func(stack []string, importPath string) []string { + if !seen[importPath] { + seen[importPath] = true + stack = append(stack, importPath) + if importPath == to { + return stack + } + for x := range imp.graph[importPath] { + if p := search(stack, x); p != nil { + return p + } + } + } + return nil + } + return search(make([]string, 0, 20), from) +} + +// startLoad initiates the loading, parsing and type-checking of the +// specified package and its dependencies, if it has not already begun. +// +// It returns an importInfo, not necessarily in a completed state. The +// caller must call awaitCompletion() before accessing its info field. +// +// startLoad is concurrency-safe and idempotent. +func (imp *importer) startLoad(bp *build.Package) *importInfo { + path := bp.ImportPath + imp.importedMu.Lock() + ii, ok := imp.imported[path] + if !ok { + ii = &importInfo{path: path, complete: make(chan struct{})} + imp.imported[path] = ii + go func() { + info := imp.load(bp) + ii.Complete(info) + }() + } + imp.importedMu.Unlock() + + return ii +} + +// load implements package loading by parsing Go source files +// located by go/build. +func (imp *importer) load(bp *build.Package) *PackageInfo { + info := imp.newPackageInfo(bp.ImportPath, bp.Dir) + info.Importable = true + files, errs := imp.conf.parsePackageFiles(bp, 'g') + for _, err := range errs { + info.appendError(err) + } + + imp.addFiles(info, files, true) + + imp.progMu.Lock() + imp.prog.importMap[bp.ImportPath] = info.Pkg + imp.progMu.Unlock() + + return info +} + +// addFiles adds and type-checks the specified files to info, loading +// their dependencies if needed. The order of files determines the +// package initialization order. It may be called multiple times on the +// same package. Errors are appended to the info.Errors field. +// +// cycleCheck determines whether the imports within files create +// dependency edges that should be checked for potential cycles. +func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) { + // Ensure the dependencies are loaded, in parallel. + var fromPath string + if cycleCheck { + fromPath = info.Pkg.Path() + } + // TODO(adonovan): opt: make the caller do scanImports. + // Callers with a build.Package can skip it. + imp.importAll(fromPath, info.dir, scanImports(files), 0) + + if trace { + fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n", + time.Since(imp.start), info.Pkg.Path(), len(files)) + } + + // Don't call checker.Files on Unsafe, even with zero files, + // because it would mutate the package, which is a global. + if info.Pkg == types.Unsafe { + if len(files) > 0 { + panic(`"unsafe" package contains unexpected files`) + } + } else { + // Ignore the returned (first) error since we + // already collect them all in the PackageInfo. + info.checker.Files(files) + info.Files = append(info.Files, files...) + } + + if imp.conf.AfterTypeCheck != nil { + imp.conf.AfterTypeCheck(info, files) + } + + if trace { + fmt.Fprintf(os.Stderr, "%s: stop %q\n", + time.Since(imp.start), info.Pkg.Path()) + } +} + +func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { + var pkg *types.Package + if path == "unsafe" { + pkg = types.Unsafe + } else { + pkg = types.NewPackage(path, "") + } + info := &PackageInfo{ + Pkg: pkg, + Info: types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + }, + errorFunc: imp.conf.TypeChecker.Error, + dir: dir, + } + + // Copy the types.Config so we can vary it across PackageInfos. + tc := imp.conf.TypeChecker + tc.IgnoreFuncBodies = false + if f := imp.conf.TypeCheckFuncBodies; f != nil { + tc.IgnoreFuncBodies = !f(path) + } + tc.Importer = closure{imp, info} + tc.Error = info.appendError // appendError wraps the user's Error function + + info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info) + imp.progMu.Lock() + imp.prog.AllPackages[pkg] = info + imp.progMu.Unlock() + return info +} + +type closure struct { + imp *importer + info *PackageInfo +} + +func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) } diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go new file mode 100644 index 00000000..3a80acae --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/util.go @@ -0,0 +1,123 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "os" + "strconv" + "sync" + + "golang.org/x/tools/go/buildutil" +) + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 10) + +// parseFiles parses the Go source files within directory dir and +// returns the ASTs of the ones that could be at least partially parsed, +// along with a list of I/O and parse errors encountered. +// +// I/O is done via ctxt, which may specify a virtual file system. +// displayPath is used to transform the filenames attached to the ASTs. +func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) { + if displayPath == nil { + displayPath = func(path string) string { return path } + } + var wg sync.WaitGroup + n := len(files) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range files { + if !buildutil.IsAbsPath(ctxt, file) { + file = buildutil.JoinPath(ctxt, dir, file) + } + wg.Add(1) + go func(i int, file string) { + ioLimit <- true // wait + defer func() { + wg.Done() + <-ioLimit // signal + }() + var rd io.ReadCloser + var err error + if ctxt.OpenFile != nil { + rd, err = ctxt.OpenFile(file) + } else { + rd, err = os.Open(file) + } + if err != nil { + errors[i] = err // open failed + return + } + + // ParseFile may return both an AST and an error. + parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode) + rd.Close() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// scanImports returns the set of all import paths from all +// import specs in the specified files. +func scanImports(files []*ast.File) map[string]bool { + imports := make(map[string]bool) + for _, f := range files { + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { + for _, spec := range decl.Specs { + spec := spec.(*ast.ImportSpec) + + // NB: do not assume the program is well-formed! + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue // quietly ignore the error + } + if path == "C" { + continue // skip pseudopackage + } + imports[path] = true + } + } + } + } + return imports +} + +// ---------- Internal helpers ---------- + +// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) +func tokenFileContainsPos(f *token.File, pos token.Pos) bool { + p := int(pos) + base := f.Base() + return base <= p && p < base+f.Size() +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 00000000..7348c50c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 00000000..866d74a7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000..b50c6e87 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000..acf71402 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000..129bc2a9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000..a1c2cc52 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000..0ee738e1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000..7c1f5fac --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000..4120e0c9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000..0b9bb603 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000..4c45e660 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000..a2dde608 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000..30813884 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000..f6a9c8e3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fa231b51..7c6baf9a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2,6 +2,9 @@ ## explicit; go 1.20 filippo.io/edwards25519 filippo.io/edwards25519/field +# github.com/KyleBanks/depth v1.2.1 +## explicit +github.com/KyleBanks/depth # github.com/NathanBaulch/protoc-gen-cobra v1.2.1 ## explicit; go 1.18 github.com/NathanBaulch/protoc-gen-cobra/client @@ -9,6 +12,10 @@ github.com/NathanBaulch/protoc-gen-cobra/flag github.com/NathanBaulch/protoc-gen-cobra/iocodec github.com/NathanBaulch/protoc-gen-cobra/naming github.com/NathanBaulch/protoc-gen-cobra/ptypes +# github.com/PuerkitoBio/purell v1.2.1 +## explicit; go 1.21 +# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 +## explicit # github.com/bits-and-blooms/bitset v1.15.0 ## explicit; go 1.16 github.com/bits-and-blooms/bitset @@ -37,9 +44,25 @@ github.com/davecgh/go-spew/spew # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 +# github.com/ghodss/yaml v1.0.0 +## explicit +github.com/ghodss/yaml # github.com/go-mail/mail/v2 v2.3.0 ## explicit github.com/go-mail/mail/v2 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 +github.com/go-openapi/jsonpointer +# github.com/go-openapi/jsonreference v0.21.0 +## explicit; go 1.20 +github.com/go-openapi/jsonreference +github.com/go-openapi/jsonreference/internal +# github.com/go-openapi/spec v0.21.0 +## explicit; go 1.20 +github.com/go-openapi/spec +# github.com/go-openapi/swag v0.23.0 +## explicit; go 1.20 +github.com/go-openapi/swag # github.com/go-sql-driver/mysql v1.8.1 ## explicit; go 1.18 github.com/go-sql-driver/mysql @@ -72,19 +95,27 @@ github.com/jinzhu/inflection # github.com/jinzhu/now v1.1.5 ## explicit; go 1.12 github.com/jinzhu/now +# github.com/josharian/intern v1.0.0 +## explicit; go 1.5 +github.com/josharian/intern # github.com/jszwec/csvutil v1.10.0 ## explicit; go 1.18 github.com/jszwec/csvutil # github.com/kilic/bls12-381 v0.1.0 ## explicit; go 1.13 github.com/kilic/bls12-381 -# github.com/labstack/echo/v4 v4.12.0 +# github.com/labstack/echo/v4 v4.13.1 ## explicit; go 1.18 github.com/labstack/echo/v4 # github.com/labstack/gommon v0.4.2 ## explicit; go 1.18 github.com/labstack/gommon/color github.com/labstack/gommon/log +# github.com/mailru/easyjson v0.7.7 +## explicit; go 1.12 +github.com/mailru/easyjson/buffer +github.com/mailru/easyjson/jlexer +github.com/mailru/easyjson/jwriter # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable @@ -165,6 +196,15 @@ github.com/spf13/pflag # github.com/stretchr/testify v1.9.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +# github.com/swaggo/echo-swagger v1.4.1 +## explicit; go 1.17 +github.com/swaggo/echo-swagger +# github.com/swaggo/files/v2 v2.0.1 +## explicit; go 1.16 +github.com/swaggo/files/v2 +# github.com/swaggo/swag v1.16.4 +## explicit; go 1.18 +github.com/swaggo/swag # github.com/tyler-smith/go-bip39 v1.1.0 ## explicit; go 1.14 github.com/tyler-smith/go-bip39 @@ -181,7 +221,7 @@ github.com/x448/float16 # go.uber.org/mock v0.5.0 ## explicit; go 1.22 go.uber.org/mock/gomock -# golang.org/x/crypto v0.29.0 +# golang.org/x/crypto v0.31.0 ## explicit; go 1.20 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert @@ -198,7 +238,7 @@ golang.org/x/crypto/salsa20/salsa ## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.31.0 +# golang.org/x/net v0.32.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -207,12 +247,12 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sys v0.27.0 +# golang.org/x/sys v0.28.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.20.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal @@ -224,6 +264,12 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm +# golang.org/x/tools v0.28.0 +## explicit; go 1.22.0 +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/buildutil +golang.org/x/tools/go/internal/cgo +golang.org/x/tools/go/loader # google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api/httpbody @@ -338,6 +384,9 @@ gopkg.in/natefinch/lumberjack.v2 # gopkg.in/telebot.v3 v3.3.8 ## explicit; go 1.16 gopkg.in/telebot.v3 +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 +gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3