From e4ca1a3f19378ecd3d04f1d00e8d4100995a7c2d Mon Sep 17 00:00:00 2001 From: Reuben Miller Date: Wed, 2 Oct 2024 09:09:14 +0200 Subject: [PATCH] chore: remove unused vendor folder --- .../github.com/cihub/seelog/LICENSE.txt | 24 - .../github.com/cihub/seelog/README.markdown | 116 - .../cihub/seelog/archive/archive.go | 197 -- .../cihub/seelog/archive/gzip/gzip.go | 64 - .../cihub/seelog/archive/tar/tar.go | 72 - .../cihub/seelog/archive/zip/zip.go | 89 - .../cihub/seelog/behavior_adaptivelogger.go | 129 - .../cihub/seelog/behavior_asynclogger.go | 142 -- .../cihub/seelog/behavior_asynclooplogger.go | 69 - .../cihub/seelog/behavior_asynctimerlogger.go | 82 - .../cihub/seelog/behavior_synclogger.go | 75 - .../github.com/cihub/seelog/cfg_config.go | 212 -- .../github.com/cihub/seelog/cfg_errors.go | 61 - .../github.com/cihub/seelog/cfg_logconfig.go | 141 -- .../github.com/cihub/seelog/cfg_parser.go | 1269 ---------- .../github.com/cihub/seelog/common_closer.go | 25 - .../cihub/seelog/common_constraints.go | 162 -- .../github.com/cihub/seelog/common_context.go | 234 -- .../cihub/seelog/common_exception.go | 194 -- .../github.com/cihub/seelog/common_flusher.go | 31 - .../cihub/seelog/common_loglevel.go | 81 - .../cihub/seelog/dispatch_custom.go | 242 -- .../cihub/seelog/dispatch_dispatcher.go | 189 -- .../cihub/seelog/dispatch_filterdispatcher.go | 66 - .../cihub/seelog/dispatch_splitdispatcher.go | 47 - pkg/c8y/vendor/github.com/cihub/seelog/doc.go | 175 -- .../vendor/github.com/cihub/seelog/format.go | 466 ---- .../cihub/seelog/internals_baseerror.go | 10 - .../cihub/seelog/internals_fsutils.go | 320 --- .../cihub/seelog/internals_xmlnode.go | 175 -- pkg/c8y/vendor/github.com/cihub/seelog/log.go | 307 --- .../vendor/github.com/cihub/seelog/logger.go | 370 --- .../cihub/seelog/writers_bufferedwriter.go | 161 -- .../cihub/seelog/writers_connwriter.go | 144 -- .../cihub/seelog/writers_consolewriter.go | 47 - .../cihub/seelog/writers_filewriter.go | 92 - .../cihub/seelog/writers_formattedwriter.go | 62 - .../cihub/seelog/writers_rollingfilewriter.go | 762 ------ .../cihub/seelog/writers_smtpwriter.go | 214 -- .../github.com/google/go-querystring/LICENSE | 27 - .../google/go-querystring/query/encode.go | 320 --- .../github.com/gorilla/websocket/.gitignore | 25 - .../github.com/gorilla/websocket/.travis.yml | 19 - .../github.com/gorilla/websocket/AUTHORS | 9 - .../github.com/gorilla/websocket/LICENSE | 22 - .../github.com/gorilla/websocket/README.md | 64 - .../github.com/gorilla/websocket/client.go | 394 --- .../gorilla/websocket/client_clone.go | 16 - .../gorilla/websocket/client_clone_legacy.go | 38 - .../gorilla/websocket/compression.go | 148 -- .../github.com/gorilla/websocket/conn.go | 1164 --------- .../gorilla/websocket/conn_write.go | 15 - .../gorilla/websocket/conn_write_legacy.go | 18 - .../github.com/gorilla/websocket/doc.go | 180 -- .../github.com/gorilla/websocket/json.go | 60 - .../github.com/gorilla/websocket/mask.go | 54 - .../github.com/gorilla/websocket/mask_safe.go | 15 - .../github.com/gorilla/websocket/prepared.go | 102 - .../github.com/gorilla/websocket/proxy.go | 77 - .../github.com/gorilla/websocket/server.go | 363 --- .../github.com/gorilla/websocket/trace.go | 19 - .../github.com/gorilla/websocket/trace_17.go | 12 - .../github.com/gorilla/websocket/util.go | 237 -- .../gorilla/websocket/x_net_proxy.go | 473 ---- .../github.com/obeattie/ohmyglob/.travis.yml | 18 - .../github.com/obeattie/ohmyglob/LICENSE | 19 - .../github.com/obeattie/ohmyglob/README.md | 38 - .../github.com/obeattie/ohmyglob/glob.go | 267 --- .../github.com/obeattie/ohmyglob/globset.go | 112 - .../github.com/obeattie/ohmyglob/tokeniser.go | 176 -- .../github.com/obeattie/ohmyglob/utils.go | 116 - .../vendor/github.com/pkg/errors/.gitignore | 24 - .../vendor/github.com/pkg/errors/.travis.yml | 15 - pkg/c8y/vendor/github.com/pkg/errors/LICENSE | 23 - .../vendor/github.com/pkg/errors/README.md | 52 - .../vendor/github.com/pkg/errors/appveyor.yml | 32 - .../vendor/github.com/pkg/errors/errors.go | 282 --- pkg/c8y/vendor/github.com/pkg/errors/stack.go | 147 -- .../github.com/tidwall/gjson/.travis.yml | 1 - .../vendor/github.com/tidwall/gjson/LICENSE | 20 - .../vendor/github.com/tidwall/gjson/README.md | 401 ---- .../vendor/github.com/tidwall/gjson/gjson.go | 2110 ----------------- .../github.com/tidwall/gjson/gjson_gae.go | 10 - .../github.com/tidwall/gjson/gjson_ngae.go | 73 - .../vendor/github.com/tidwall/gjson/logo.png | Bin 15936 -> 0 bytes .../github.com/tidwall/match/.travis.yml | 1 - .../vendor/github.com/tidwall/match/LICENSE | 20 - .../vendor/github.com/tidwall/match/README.md | 32 - .../vendor/github.com/tidwall/match/match.go | 192 -- .../vendor/go.uber.org/atomic/.codecov.yml | 15 - pkg/c8y/vendor/go.uber.org/atomic/.gitignore | 11 - pkg/c8y/vendor/go.uber.org/atomic/.travis.yml | 23 - pkg/c8y/vendor/go.uber.org/atomic/LICENSE.txt | 19 - pkg/c8y/vendor/go.uber.org/atomic/Makefile | 64 - pkg/c8y/vendor/go.uber.org/atomic/README.md | 36 - pkg/c8y/vendor/go.uber.org/atomic/atomic.go | 351 --- pkg/c8y/vendor/go.uber.org/atomic/glide.lock | 17 - pkg/c8y/vendor/go.uber.org/atomic/glide.yaml | 6 - pkg/c8y/vendor/go.uber.org/atomic/string.go | 49 - .../vendor/go.uber.org/multierr/.codecov.yml | 15 - .../vendor/go.uber.org/multierr/.gitignore | 1 - .../vendor/go.uber.org/multierr/.travis.yml | 33 - .../vendor/go.uber.org/multierr/CHANGELOG.md | 28 - .../vendor/go.uber.org/multierr/LICENSE.txt | 19 - pkg/c8y/vendor/go.uber.org/multierr/Makefile | 74 - pkg/c8y/vendor/go.uber.org/multierr/README.md | 23 - pkg/c8y/vendor/go.uber.org/multierr/error.go | 401 ---- .../vendor/go.uber.org/multierr/glide.lock | 19 - .../vendor/go.uber.org/multierr/glide.yaml | 8 - pkg/c8y/vendor/go.uber.org/zap/.codecov.yml | 17 - pkg/c8y/vendor/go.uber.org/zap/.gitignore | 28 - pkg/c8y/vendor/go.uber.org/zap/.readme.tmpl | 108 - pkg/c8y/vendor/go.uber.org/zap/.travis.yml | 21 - pkg/c8y/vendor/go.uber.org/zap/CHANGELOG.md | 305 --- .../vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 - .../vendor/go.uber.org/zap/CONTRIBUTING.md | 81 - pkg/c8y/vendor/go.uber.org/zap/FAQ.md | 155 -- pkg/c8y/vendor/go.uber.org/zap/LICENSE.txt | 19 - pkg/c8y/vendor/go.uber.org/zap/Makefile | 76 - pkg/c8y/vendor/go.uber.org/zap/README.md | 136 -- pkg/c8y/vendor/go.uber.org/zap/array.go | 320 --- .../vendor/go.uber.org/zap/buffer/buffer.go | 115 - pkg/c8y/vendor/go.uber.org/zap/buffer/pool.go | 49 - .../vendor/go.uber.org/zap/check_license.sh | 17 - pkg/c8y/vendor/go.uber.org/zap/config.go | 243 -- pkg/c8y/vendor/go.uber.org/zap/doc.go | 113 - pkg/c8y/vendor/go.uber.org/zap/encoder.go | 75 - pkg/c8y/vendor/go.uber.org/zap/error.go | 80 - pkg/c8y/vendor/go.uber.org/zap/field.go | 310 --- pkg/c8y/vendor/go.uber.org/zap/flag.go | 39 - pkg/c8y/vendor/go.uber.org/zap/glide.lock | 76 - pkg/c8y/vendor/go.uber.org/zap/glide.yaml | 35 - pkg/c8y/vendor/go.uber.org/zap/global.go | 169 -- .../vendor/go.uber.org/zap/http_handler.go | 81 - .../zap/internal/bufferpool/bufferpool.go | 31 - .../go.uber.org/zap/internal/color/color.go | 44 - .../go.uber.org/zap/internal/exit/exit.go | 64 - pkg/c8y/vendor/go.uber.org/zap/level.go | 132 -- pkg/c8y/vendor/go.uber.org/zap/logger.go | 305 --- pkg/c8y/vendor/go.uber.org/zap/options.go | 109 - pkg/c8y/vendor/go.uber.org/zap/sink.go | 161 -- pkg/c8y/vendor/go.uber.org/zap/stacktrace.go | 126 - pkg/c8y/vendor/go.uber.org/zap/sugar.go | 304 --- pkg/c8y/vendor/go.uber.org/zap/time.go | 27 - pkg/c8y/vendor/go.uber.org/zap/writer.go | 98 - .../zap/zapcore/console_encoder.go | 147 -- .../vendor/go.uber.org/zap/zapcore/core.go | 113 - pkg/c8y/vendor/go.uber.org/zap/zapcore/doc.go | 24 - .../vendor/go.uber.org/zap/zapcore/encoder.go | 348 --- .../vendor/go.uber.org/zap/zapcore/entry.go | 257 -- .../vendor/go.uber.org/zap/zapcore/error.go | 120 - .../vendor/go.uber.org/zap/zapcore/field.go | 201 -- .../vendor/go.uber.org/zap/zapcore/hook.go | 68 - .../go.uber.org/zap/zapcore/json_encoder.go | 502 ---- .../vendor/go.uber.org/zap/zapcore/level.go | 175 -- .../go.uber.org/zap/zapcore/level_strings.go | 46 - .../go.uber.org/zap/zapcore/marshaler.go | 53 - .../go.uber.org/zap/zapcore/memory_encoder.go | 179 -- .../vendor/go.uber.org/zap/zapcore/sampler.go | 134 -- pkg/c8y/vendor/go.uber.org/zap/zapcore/tee.go | 81 - .../go.uber.org/zap/zapcore/write_syncer.go | 123 - pkg/c8y/vendor/golang.org/x/net/AUTHORS | 3 - pkg/c8y/vendor/golang.org/x/net/CONTRIBUTORS | 3 - pkg/c8y/vendor/golang.org/x/net/LICENSE | 27 - pkg/c8y/vendor/golang.org/x/net/PATENTS | 22 - .../golang.org/x/net/context/context.go | 56 - .../vendor/golang.org/x/net/context/go17.go | 72 - .../vendor/golang.org/x/net/context/go19.go | 20 - .../golang.org/x/net/context/pre_go17.go | 300 --- .../golang.org/x/net/context/pre_go19.go | 109 - pkg/c8y/vendor/gopkg.in/tomb.v2/LICENSE | 29 - pkg/c8y/vendor/gopkg.in/tomb.v2/README.md | 4 - pkg/c8y/vendor/gopkg.in/tomb.v2/context.go | 74 - pkg/c8y/vendor/gopkg.in/tomb.v2/context16.go | 74 - pkg/c8y/vendor/gopkg.in/tomb.v2/tomb.go | 237 -- pkg/c8y/vendor/modules.txt | 33 - 176 files changed, 24360 deletions(-) delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/LICENSE.txt delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/README.markdown delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/archive/archive.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/archive/gzip/gzip.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/archive/tar/tar.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/archive/zip/zip.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclogger.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/behavior_synclogger.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/cfg_config.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/cfg_errors.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/cfg_logconfig.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/cfg_parser.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/common_closer.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/common_constraints.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/common_context.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/common_exception.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/common_flusher.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/common_loglevel.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/dispatch_custom.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/dispatch_dispatcher.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/doc.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/format.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/internals_baseerror.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/internals_fsutils.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/internals_xmlnode.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/log.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/logger.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_bufferedwriter.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_connwriter.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_consolewriter.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_filewriter.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_formattedwriter.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go delete mode 100644 pkg/c8y/vendor/github.com/cihub/seelog/writers_smtpwriter.go delete mode 100644 pkg/c8y/vendor/github.com/google/go-querystring/LICENSE delete mode 100644 pkg/c8y/vendor/github.com/google/go-querystring/query/encode.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/.gitignore delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/.travis.yml delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/AUTHORS delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/LICENSE delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/README.md delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/client.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/client_clone.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/client_clone_legacy.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/compression.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/conn.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/conn_write.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/conn_write_legacy.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/doc.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/json.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/mask.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/mask_safe.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/prepared.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/proxy.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/server.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/trace.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/trace_17.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/util.go delete mode 100644 pkg/c8y/vendor/github.com/gorilla/websocket/x_net_proxy.go delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/.travis.yml delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/LICENSE delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/README.md delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/glob.go delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/globset.go delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/tokeniser.go delete mode 100644 pkg/c8y/vendor/github.com/obeattie/ohmyglob/utils.go delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/.gitignore delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/.travis.yml delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/LICENSE delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/README.md delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/errors.go delete mode 100644 pkg/c8y/vendor/github.com/pkg/errors/stack.go delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/.travis.yml delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/LICENSE delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/README.md delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/gjson.go delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/gjson_gae.go delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/gjson_ngae.go delete mode 100644 pkg/c8y/vendor/github.com/tidwall/gjson/logo.png delete mode 100644 pkg/c8y/vendor/github.com/tidwall/match/.travis.yml delete mode 100644 pkg/c8y/vendor/github.com/tidwall/match/LICENSE delete mode 100644 pkg/c8y/vendor/github.com/tidwall/match/README.md delete mode 100644 pkg/c8y/vendor/github.com/tidwall/match/match.go delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/.codecov.yml delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/.gitignore delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/.travis.yml delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/LICENSE.txt delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/Makefile delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/README.md delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/atomic.go delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/glide.lock delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/glide.yaml delete mode 100644 pkg/c8y/vendor/go.uber.org/atomic/string.go delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/.codecov.yml delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/.gitignore delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/.travis.yml delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/CHANGELOG.md delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/LICENSE.txt delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/Makefile delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/README.md delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/error.go delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/glide.lock delete mode 100644 pkg/c8y/vendor/go.uber.org/multierr/glide.yaml delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/.codecov.yml delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/.gitignore delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/.readme.tmpl delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/.travis.yml delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/CHANGELOG.md delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/CONTRIBUTING.md delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/FAQ.md delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/LICENSE.txt delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/Makefile delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/README.md delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/array.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/buffer/buffer.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/buffer/pool.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/check_license.sh delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/config.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/doc.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/encoder.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/error.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/field.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/flag.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/glide.lock delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/glide.yaml delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/global.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/http_handler.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/internal/color/color.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/internal/exit/exit.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/level.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/logger.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/options.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/sink.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/stacktrace.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/sugar.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/time.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/writer.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/console_encoder.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/core.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/doc.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/encoder.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/entry.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/error.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/field.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/hook.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/json_encoder.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/level.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/level_strings.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/marshaler.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/memory_encoder.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/sampler.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/tee.go delete mode 100644 pkg/c8y/vendor/go.uber.org/zap/zapcore/write_syncer.go delete mode 100644 pkg/c8y/vendor/golang.org/x/net/AUTHORS delete mode 100644 pkg/c8y/vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 pkg/c8y/vendor/golang.org/x/net/LICENSE delete mode 100644 pkg/c8y/vendor/golang.org/x/net/PATENTS delete mode 100644 pkg/c8y/vendor/golang.org/x/net/context/context.go delete mode 100644 pkg/c8y/vendor/golang.org/x/net/context/go17.go delete mode 100644 pkg/c8y/vendor/golang.org/x/net/context/go19.go delete mode 100644 pkg/c8y/vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 pkg/c8y/vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 pkg/c8y/vendor/gopkg.in/tomb.v2/LICENSE delete mode 100644 pkg/c8y/vendor/gopkg.in/tomb.v2/README.md delete mode 100644 pkg/c8y/vendor/gopkg.in/tomb.v2/context.go delete mode 100644 pkg/c8y/vendor/gopkg.in/tomb.v2/context16.go delete mode 100644 pkg/c8y/vendor/gopkg.in/tomb.v2/tomb.go delete mode 100644 pkg/c8y/vendor/modules.txt diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/LICENSE.txt b/pkg/c8y/vendor/github.com/cihub/seelog/LICENSE.txt deleted file mode 100644 index bd5611d9..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/LICENSE.txt +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2012, Cloud Instruments Co., Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Cloud Instruments Co., Ltd. nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/README.markdown b/pkg/c8y/vendor/github.com/cihub/seelog/README.markdown deleted file mode 100644 index 7dd1ab35..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/README.markdown +++ /dev/null @@ -1,116 +0,0 @@ -Seelog -======= - -Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages. -It is natively written in the [Go](http://golang.org/) programming language. - -[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest) - -Features ------------------- - -* Xml configuring to be able to change logger parameters without recompilation -* Changing configurations on the fly without app restart -* Possibility to set different log configurations for different project files and functions -* Adjustable message formatting -* Simultaneous log output to multiple streams -* Choosing logger priority strategy to minimize performance hit -* Different output writers - * Console writer - * File writer - * Buffered writer (Chunk writer) - * Rolling log writer (Logging with rotation) - * SMTP writer - * Others... (See [Wiki](https://github.com/cihub/seelog/wiki)) -* Log message wrappers (JSON, XML, etc.) -* Global variables and functions for easy usage in standalone apps -* Functions for flexible usage in libraries - -Quick-start ------------ - -```go -package main - -import log "github.com/cihub/seelog" - -func main() { - defer log.Flush() - log.Info("Hello from Seelog!") -} -``` - -Installation ------------- - -If you don't have the Go development environment installed, visit the -[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command: - -``` -go get -u github.com/cihub/seelog -``` - -*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get') - -Documentation ---------------- - -Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki - -Examples ---------------- - -Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples) - -Issues ---------------- - -Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues - -Changelog ---------------- -* **v2.6** : Config using code and custom formatters - * Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported). - * Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters) - * Bugfixes and internal improvements. -* **v2.5** : Interaction with other systems. Part 2: custom receivers - * Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers) - * Added 'LoggerFromCustomReceiver' - * Added 'LoggerFromWriterWithMinLevelAndFormat' - * Added 'LoggerFromCustomReceiver' - * Added 'LoggerFromParamConfigAs...' -* **v2.4** : Interaction with other systems. Part 1: wrapping seelog - * Added configurable caller stack skip logic - * Added 'SetAdditionalStackDepth' to 'LoggerInterface' -* **v2.3** : Rethinking 'rolling' receiver - * Reimplemented 'rolling' receiver - * Added 'Max rolls' feature for 'rolling' receiver with type='date' - * Fixed 'rolling' receiver issue: renaming on Windows -* **v2.2** : go1.0 compatibility point [go1.0 tag] - * Fixed internal bugs - * Added 'ANSI n [;k]' format identifier: %EscN - * Made current release go1 compatible -* **v2.1** : Some new features - * Rolling receiver archiving option. - * Added format identifier: %Line - * Smtp: added paths to PEM files directories - * Added format identifier: %FuncShort - * Warn, Error and Critical methods now return an error -* **v2.0** : Second major release. BREAKING CHANGES. - * Support of binaries with stripped symbols - * Added log strategy: adaptive - * Critical message now forces Flush() - * Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast - * Added receiver: conn (network connection writer) - * BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle - * Bug fixes -* **v1.0** : Initial release. Features: - * Xml config - * Changing configurations on the fly without app restart - * Contraints and exceptions - * Formatting - * Log strategies: sync, async loop, async timer - * Receivers: buffered, console, file, rolling, smtp - - - diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/archive/archive.go b/pkg/c8y/vendor/github.com/cihub/seelog/archive/archive.go deleted file mode 100644 index 69b04177..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/archive/archive.go +++ /dev/null @@ -1,197 +0,0 @@ -package archive - -import ( - "archive/tar" - "archive/zip" - "fmt" - "io" - "os" - "time" - - "github.com/cihub/seelog/archive/gzip" -) - -// Reader is the interface for reading files from an archive. -type Reader interface { - NextFile() (name string, err error) - io.Reader -} - -// ReadCloser is the interface that groups Reader with the Close method. -type ReadCloser interface { - Reader - io.Closer -} - -// Writer is the interface for writing files to an archived format. -type Writer interface { - NextFile(name string, fi os.FileInfo) error - io.Writer -} - -// WriteCloser is the interface that groups Writer with the Close method. -type WriteCloser interface { - Writer - io.Closer -} - -type nopCloser struct{ Reader } - -func (nopCloser) Close() error { return nil } - -// NopCloser returns a ReadCloser with a no-op Close method wrapping the -// provided Reader r. -func NopCloser(r Reader) ReadCloser { - return nopCloser{r} -} - -// Copy copies from src to dest until either EOF is reached on src or an error -// occurs. -// -// When the archive format of src matches that of dst, Copy streams the files -// directly into dst. Otherwise, copy buffers the contents to disk to compute -// headers before writing to dst. -func Copy(dst Writer, src Reader) error { - switch src := src.(type) { - case tarReader: - if dst, ok := dst.(tarWriter); ok { - return copyTar(dst, src) - } - case zipReader: - if dst, ok := dst.(zipWriter); ok { - return copyZip(dst, src) - } - // Switch on concrete type because gzip has no special methods - case *gzip.Reader: - if dst, ok := dst.(*gzip.Writer); ok { - _, err := io.Copy(dst, src) - return err - } - } - - return copyBuffer(dst, src) -} - -func copyBuffer(dst Writer, src Reader) (err error) { - const defaultFileMode = 0666 - - buf, err := os.CreateTemp("", "archive_copy_buffer") - if err != nil { - return err - } - defer os.Remove(buf.Name()) // Do not care about failure removing temp - defer buf.Close() // Do not care about failure closing temp - for { - // Handle the next file - name, err := src.NextFile() - switch err { - case io.EOF: // Done copying - return nil - default: // Failed to write: bail out - return err - case nil: // Proceed below - } - - // Buffer the file - if _, err := io.Copy(buf, src); err != nil { - return fmt.Errorf("buffer to disk: %v", err) - } - - // Seek to the start of the file for full file copy - if _, err := buf.Seek(0, os.SEEK_SET); err != nil { - return err - } - - // Set desired file permissions - if err := os.Chmod(buf.Name(), defaultFileMode); err != nil { - return err - } - fi, err := buf.Stat() - if err != nil { - return err - } - - // Write the buffered file - if err := dst.NextFile(name, fi); err != nil { - return err - } - if _, err := io.Copy(dst, buf); err != nil { - return fmt.Errorf("copy to dst: %v", err) - } - if err := buf.Truncate(0); err != nil { - return err - } - if _, err := buf.Seek(0, os.SEEK_SET); err != nil { - return err - } - } -} - -type tarReader interface { - Next() (*tar.Header, error) - io.Reader -} - -type tarWriter interface { - WriteHeader(hdr *tar.Header) error - io.Writer -} - -type zipReader interface { - Files() []*zip.File -} - -type zipWriter interface { - CreateHeader(fh *zip.FileHeader) (io.Writer, error) -} - -func copyTar(w tarWriter, r tarReader) error { - for { - hdr, err := r.Next() - switch err { - case io.EOF: - return nil - default: // Handle error - return err - case nil: // Proceed below - } - - info := hdr.FileInfo() - // Skip directories - if info.IsDir() { - continue - } - if err := w.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(w, r); err != nil { - return err - } - } -} - -func copyZip(zw zipWriter, r zipReader) error { - for _, f := range r.Files() { - if err := copyZipFile(zw, f); err != nil { - return err - } - } - return nil -} - -func copyZipFile(zw zipWriter, f *zip.File) error { - rc, err := f.Open() - if err != nil { - return err - } - defer rc.Close() // Read-only - - hdr := f.FileHeader - hdr.SetModTime(time.Now()) - w, err := zw.CreateHeader(&hdr) - if err != nil { - return err - } - _, err = io.Copy(w, rc) - return err -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/archive/gzip/gzip.go b/pkg/c8y/vendor/github.com/cihub/seelog/archive/gzip/gzip.go deleted file mode 100644 index ea121018..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/archive/gzip/gzip.go +++ /dev/null @@ -1,64 +0,0 @@ -// Package gzip implements reading and writing of gzip format compressed files. -// See the compress/gzip package for more details. -package gzip - -import ( - "compress/gzip" - "fmt" - "io" - "os" -) - -// Reader is an io.Reader that can be read to retrieve uncompressed data from a -// gzip-format compressed file. -type Reader struct { - gzip.Reader - name string - isEOF bool -} - -// NewReader creates a new Reader reading the given reader. -func NewReader(r io.Reader, name string) (*Reader, error) { - gr, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - return &Reader{ - Reader: *gr, - name: name, - }, nil -} - -// NextFile returns the file name. Calls subsequent to the first call will -// return EOF. -func (r *Reader) NextFile() (name string, err error) { - if r.isEOF { - return "", io.EOF - } - - r.isEOF = true - return r.name, nil -} - -// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w. -type Writer struct { - gzip.Writer - name string - noMoreFiles bool -} - -// NextFile never returns a next file, and should not be called more than once. -func (w *Writer) NextFile(name string, _ os.FileInfo) error { - if w.noMoreFiles { - return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name) - } - w.noMoreFiles = true - w.name = name - return nil -} - -// NewWriter returns a new Writer. Writes to the returned writer are compressed -// and written to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{Writer: *gzip.NewWriter(w)} -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/archive/tar/tar.go b/pkg/c8y/vendor/github.com/cihub/seelog/archive/tar/tar.go deleted file mode 100644 index 8dd87f57..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/archive/tar/tar.go +++ /dev/null @@ -1,72 +0,0 @@ -package tar - -import ( - "archive/tar" - "io" - "os" -) - -// Reader provides sequential access to the contents of a tar archive. -type Reader struct { - tar.Reader -} - -// NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { - return &Reader{Reader: *tar.NewReader(r)} -} - -// NextFile advances to the next file in the tar archive. -func (r *Reader) NextFile() (name string, err error) { - hdr, err := r.Next() - if err != nil { - return "", err - } - return hdr.Name, nil -} - -// Writer provides sequential writing of a tar archive in POSIX.1 format. -type Writer struct { - tar.Writer - closers []io.Closer -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{Writer: *tar.NewWriter(w)} -} - -// NewWriteMultiCloser creates a new Writer writing to w that also closes all -// closers in order on close. -func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer { - return &Writer{ - Writer: *tar.NewWriter(w), - closers: closers, - } -} - -// NextFile computes and writes a header and prepares to accept the file's -// contents. -func (w *Writer) NextFile(name string, fi os.FileInfo) error { - if name == "" { - name = fi.Name() - } - hdr, err := tar.FileInfoHeader(fi, name) - if err != nil { - return err - } - hdr.Name = name - return w.WriteHeader(hdr) -} - -// Close closes the tar archive and all other closers, flushing any unwritten -// data to the underlying writer. -func (w *Writer) Close() error { - err := w.Writer.Close() - for _, c := range w.closers { - if cerr := c.Close(); cerr != nil && err == nil { - err = cerr - } - } - return err -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/archive/zip/zip.go b/pkg/c8y/vendor/github.com/cihub/seelog/archive/zip/zip.go deleted file mode 100644 index 4210b03b..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/archive/zip/zip.go +++ /dev/null @@ -1,89 +0,0 @@ -package zip - -import ( - "archive/zip" - "io" - "os" -) - -// Reader provides sequential access to the contents of a zip archive. -type Reader struct { - zip.Reader - unread []*zip.File - rc io.ReadCloser -} - -// NewReader returns a new Reader reading from r, which is assumed to have the -// given size in bytes. -func NewReader(r io.ReaderAt, size int64) (*Reader, error) { - zr, err := zip.NewReader(r, size) - if err != nil { - return nil, err - } - return &Reader{Reader: *zr}, nil -} - -// NextFile advances to the next file in the zip archive. -func (r *Reader) NextFile() (name string, err error) { - // Initialize unread - if r.unread == nil { - r.unread = r.Files()[:] - } - - // Close previous file - if r.rc != nil { - r.rc.Close() // Read-only - } - - if len(r.unread) == 0 { - return "", io.EOF - } - - // Open and return next unread - f := r.unread[0] - name, r.unread = f.Name, r.unread[1:] - r.rc, err = f.Open() - if err != nil { - return "", err - } - return name, nil -} - -func (r *Reader) Read(p []byte) (n int, err error) { - return r.rc.Read(p) -} - -// Files returns the full list of files in the zip archive. -func (r *Reader) Files() []*zip.File { - return r.File -} - -// Writer provides sequential writing of a zip archive.1 format. -type Writer struct { - zip.Writer - w io.Writer -} - -// NewWriter returns a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{Writer: *zip.NewWriter(w)} -} - -// NextFile computes and writes a header and prepares to accept the file's -// contents. -func (w *Writer) NextFile(name string, fi os.FileInfo) error { - if name == "" { - name = fi.Name() - } - hdr, err := zip.FileInfoHeader(fi) - if err != nil { - return err - } - hdr.Name = name - w.w, err = w.CreateHeader(hdr) - return err -} - -func (w *Writer) Write(p []byte) (n int, err error) { - return w.w.Write(p) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go b/pkg/c8y/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go deleted file mode 100644 index 0c640cae..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "math" - "time" -) - -var ( - adaptiveLoggerMaxInterval = time.Minute - adaptiveLoggerMaxCriticalMsgCount = uint32(1000) -) - -// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like -// an async timer logger, but its interval depends on the current message count -// in the queue. -// -// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c: -// I = m + (C - Min(c, C)) / C * (M - m) -type asyncAdaptiveLogger struct { - asyncLogger - minInterval time.Duration - criticalMsgCount uint32 - maxInterval time.Duration -} - -// NewAsyncLoopLogger creates a new asynchronous adaptive logger -func NewAsyncAdaptiveLogger( - config *logConfig, - minInterval time.Duration, - maxInterval time.Duration, - criticalMsgCount uint32) (*asyncAdaptiveLogger, error) { - - if minInterval <= 0 { - return nil, errors.New("async adaptive logger min interval should be > 0") - } - - if maxInterval > adaptiveLoggerMaxInterval { - return nil, fmt.Errorf("async adaptive logger max interval should be <= %s", - adaptiveLoggerMaxInterval) - } - - if criticalMsgCount <= 0 { - return nil, errors.New("async adaptive logger critical msg count should be > 0") - } - - if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount { - return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s", - adaptiveLoggerMaxInterval) - } - - asnAdaptiveLogger := new(asyncAdaptiveLogger) - - asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config) - asnAdaptiveLogger.minInterval = minInterval - asnAdaptiveLogger.maxInterval = maxInterval - asnAdaptiveLogger.criticalMsgCount = criticalMsgCount - - go asnAdaptiveLogger.processQueue() - - return asnAdaptiveLogger, nil -} - -func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) { - asnAdaptiveLogger.queueHasElements.L.Lock() - defer asnAdaptiveLogger.queueHasElements.L.Unlock() - - for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() { - asnAdaptiveLogger.queueHasElements.Wait() - } - - if asnAdaptiveLogger.Closed() { - return true, asnAdaptiveLogger.msgQueue.Len() - } - - asnAdaptiveLogger.processQueueElement() - return false, asnAdaptiveLogger.msgQueue.Len() - 1 -} - -// I = m + (C - Min(c, C)) / C * (M - m) => -// I = m + cDiff * mDiff, -// cDiff = (C - Min(c, C)) / C) -// mDiff = (M - m) -func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration { - critCountF := float64(asnAdaptiveLogger.criticalMsgCount) - cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF - mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval) - - return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff) -} - -func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() { - for !asnAdaptiveLogger.Closed() { - closed, itemCount := asnAdaptiveLogger.processItem() - - if closed { - break - } - - interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount) - - <-time.After(interval) - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclogger.go b/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclogger.go deleted file mode 100644 index 75231067..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclogger.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "container/list" - "fmt" - "sync" -) - -// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush. -const ( - MaxQueueSize = 10000 -) - -type msgQueueItem struct { - level LogLevel - context LogContextInterface - message fmt.Stringer -} - -// asyncLogger represents common data for all asynchronous loggers -type asyncLogger struct { - commonLogger - msgQueue *list.List - queueHasElements *sync.Cond -} - -// newAsyncLogger creates a new asynchronous logger -func newAsyncLogger(config *logConfig) *asyncLogger { - asnLogger := new(asyncLogger) - - asnLogger.msgQueue = list.New() - asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex)) - - asnLogger.commonLogger = *newCommonLogger(config, asnLogger) - - return asnLogger -} - -func (asnLogger *asyncLogger) innerLog( - level LogLevel, - context LogContextInterface, - message fmt.Stringer) { - - asnLogger.addMsgToQueue(level, context, message) -} - -func (asnLogger *asyncLogger) Close() { - asnLogger.m.Lock() - defer asnLogger.m.Unlock() - - if !asnLogger.Closed() { - asnLogger.flushQueue(true) - asnLogger.config.RootDispatcher.Flush() - - if err := asnLogger.config.RootDispatcher.Close(); err != nil { - reportInternalError(err) - } - - asnLogger.closedM.Lock() - asnLogger.closed = true - asnLogger.closedM.Unlock() - asnLogger.queueHasElements.Broadcast() - } -} - -func (asnLogger *asyncLogger) Flush() { - asnLogger.m.Lock() - defer asnLogger.m.Unlock() - - if !asnLogger.Closed() { - asnLogger.flushQueue(true) - asnLogger.config.RootDispatcher.Flush() - } -} - -func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) { - if lockNeeded { - asnLogger.queueHasElements.L.Lock() - defer asnLogger.queueHasElements.L.Unlock() - } - - for asnLogger.msgQueue.Len() > 0 { - asnLogger.processQueueElement() - } -} - -func (asnLogger *asyncLogger) processQueueElement() { - if asnLogger.msgQueue.Len() > 0 { - backElement := asnLogger.msgQueue.Front() - msg, _ := backElement.Value.(msgQueueItem) - asnLogger.processLogMsg(msg.level, msg.message, msg.context) - asnLogger.msgQueue.Remove(backElement) - } -} - -func (asnLogger *asyncLogger) addMsgToQueue( - level LogLevel, - context LogContextInterface, - message fmt.Stringer) { - - if !asnLogger.Closed() { - asnLogger.queueHasElements.L.Lock() - defer asnLogger.queueHasElements.L.Unlock() - - if asnLogger.msgQueue.Len() >= MaxQueueSize { - fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize) - asnLogger.flushQueue(false) - } - - queueItem := msgQueueItem{level, context, message} - - asnLogger.msgQueue.PushBack(queueItem) - asnLogger.queueHasElements.Broadcast() - } else { - err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message) - reportInternalError(err) - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go b/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go deleted file mode 100644 index 972467b3..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -// asyncLoopLogger represents asynchronous logger which processes the log queue in -// a 'for' loop -type asyncLoopLogger struct { - asyncLogger -} - -// NewAsyncLoopLogger creates a new asynchronous loop logger -func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger { - - asnLoopLogger := new(asyncLoopLogger) - - asnLoopLogger.asyncLogger = *newAsyncLogger(config) - - go asnLoopLogger.processQueue() - - return asnLoopLogger -} - -func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) { - asnLoopLogger.queueHasElements.L.Lock() - defer asnLoopLogger.queueHasElements.L.Unlock() - - for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() { - asnLoopLogger.queueHasElements.Wait() - } - - if asnLoopLogger.Closed() { - return true - } - - asnLoopLogger.processQueueElement() - return false -} - -func (asnLoopLogger *asyncLoopLogger) processQueue() { - for !asnLoopLogger.Closed() { - closed := asnLoopLogger.processItem() - - if closed { - break - } - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go b/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go deleted file mode 100644 index 8118f205..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "time" -) - -// asyncTimerLogger represents asynchronous logger which processes the log queue each -// 'duration' nanoseconds -type asyncTimerLogger struct { - asyncLogger - interval time.Duration -} - -// NewAsyncLoopLogger creates a new asynchronous loop logger -func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) { - - if interval <= 0 { - return nil, errors.New("async logger interval should be > 0") - } - - asnTimerLogger := new(asyncTimerLogger) - - asnTimerLogger.asyncLogger = *newAsyncLogger(config) - asnTimerLogger.interval = interval - - go asnTimerLogger.processQueue() - - return asnTimerLogger, nil -} - -func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) { - asnTimerLogger.queueHasElements.L.Lock() - defer asnTimerLogger.queueHasElements.L.Unlock() - - for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() { - asnTimerLogger.queueHasElements.Wait() - } - - if asnTimerLogger.Closed() { - return true - } - - asnTimerLogger.processQueueElement() - return false -} - -func (asnTimerLogger *asyncTimerLogger) processQueue() { - for !asnTimerLogger.Closed() { - closed := asnTimerLogger.processItem() - - if closed { - break - } - - <-time.After(asnTimerLogger.interval) - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_synclogger.go b/pkg/c8y/vendor/github.com/cihub/seelog/behavior_synclogger.go deleted file mode 100644 index 5a022ebc..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/behavior_synclogger.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" -) - -// syncLogger performs logging in the same goroutine where 'Trace/Debug/...' -// func was called -type syncLogger struct { - commonLogger -} - -// NewSyncLogger creates a new synchronous logger -func NewSyncLogger(config *logConfig) *syncLogger { - syncLogger := new(syncLogger) - - syncLogger.commonLogger = *newCommonLogger(config, syncLogger) - - return syncLogger -} - -func (syncLogger *syncLogger) innerLog( - level LogLevel, - context LogContextInterface, - message fmt.Stringer) { - - syncLogger.processLogMsg(level, message, context) -} - -func (syncLogger *syncLogger) Close() { - syncLogger.m.Lock() - defer syncLogger.m.Unlock() - - if !syncLogger.Closed() { - if err := syncLogger.config.RootDispatcher.Close(); err != nil { - reportInternalError(err) - } - syncLogger.closedM.Lock() - syncLogger.closed = true - syncLogger.closedM.Unlock() - } -} - -func (syncLogger *syncLogger) Flush() { - syncLogger.m.Lock() - defer syncLogger.m.Unlock() - - if !syncLogger.Closed() { - syncLogger.config.RootDispatcher.Flush() - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/cfg_config.go b/pkg/c8y/vendor/github.com/cihub/seelog/cfg_config.go deleted file mode 100644 index 76554fca..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/cfg_config.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "os" -) - -// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml. -func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) { - file, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer file.Close() - - conf, err := configFromReader(file) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml. -func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) { - conf, err := configFromReader(bytes.NewBuffer(data)) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml. -func LoggerFromConfigAsString(data string) (LoggerInterface, error) { - return LoggerFromConfigAsBytes([]byte(data)) -} - -// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options. -// See 'CfgParseParams' comments. -func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) { - file, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer file.Close() - - conf, err := configFromReaderWithConfig(file, parserParams) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options. -// See 'CfgParseParams' comments. -func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) { - conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options. -// See 'CfgParseParams' comments. -func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) { - return LoggerFromParamConfigAsBytes([]byte(data), parserParams) -} - -// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) -func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) { - return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) -} - -// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the -// receiver with minimal level = minLevel and with specified format. -// -// All messages with level more or equal to minLevel will be written to output and -// formatted using the default seelog format. -// -// Can be called for usage with non-Seelog systems -func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) { - constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl) - if err != nil { - return nil, err - } - formatter, err := NewFormatter(format) - if err != nil { - return nil, err - } - dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output}) - if err != nil { - return nil, err - } - - conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node. -// It should contain valid seelog xml, except for root node name. -func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) { - conf, err := configFromXMLDecoder(xmlParser, rootNode) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the -// receiver. -// -// All messages will be sent to the specified custom receiver without additional -// formatting ('%Msg' format is used). -// -// Check CustomReceiver, RegisterReceiver for additional info. -// -// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated -// by the config parser while parsing config. So, if you are not planning to use the -// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and -// loading from config, just leave AfterParse implementation empty. -// -// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized -// instance that implements CustomReceiver. So, fill it with data and perform any initialization -// logic before calling this func and it won't be lost. -// -// So: -// * RegisterReceiver takes value just to get the reflect.Type from it and then -// instantiate it as many times as config is reloaded. -// -// * LoggerFromCustomReceiver takes value and uses it without modification and -// reinstantiation, directy passing it to the dispatcher tree. -func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) { - constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl) - if err != nil { - return nil, err - } - - output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{}) - if err != nil { - return nil, err - } - dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output}) - if err != nil { - return nil, err - } - - conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -func CloneLogger(logger LoggerInterface) (LoggerInterface, error) { - switch logger := logger.(type) { - default: - return nil, fmt.Errorf("unexpected type %T", logger) - case *asyncAdaptiveLogger: - clone, err := NewAsyncAdaptiveLogger(logger.commonLogger.config, logger.minInterval, logger.maxInterval, logger.criticalMsgCount) - if err != nil { - return nil, err - } - return clone, nil - case *asyncLoopLogger: - return NewAsyncLoopLogger(logger.commonLogger.config), nil - case *asyncTimerLogger: - clone, err := NewAsyncTimerLogger(logger.commonLogger.config, logger.interval) - if err != nil { - return nil, err - } - return clone, nil - case *syncLogger: - return NewSyncLogger(logger.commonLogger.config), nil - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/cfg_errors.go b/pkg/c8y/vendor/github.com/cihub/seelog/cfg_errors.go deleted file mode 100644 index c1fb4d10..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/cfg_errors.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" -) - -var ( - errNodeMustHaveChildren = errors.New("node must have children") - errNodeCannotHaveChildren = errors.New("node cannot have children") -) - -type unexpectedChildElementError struct { - baseError -} - -func newUnexpectedChildElementError(msg string) *unexpectedChildElementError { - custmsg := "Unexpected child element: " + msg - return &unexpectedChildElementError{baseError{message: custmsg}} -} - -type missingArgumentError struct { - baseError -} - -func newMissingArgumentError(nodeName, attrName string) *missingArgumentError { - custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute" - return &missingArgumentError{baseError{message: custmsg}} -} - -type unexpectedAttributeError struct { - baseError -} - -func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError { - custmsg := nodeName + " has unexpected attribute: " + attr - return &unexpectedAttributeError{baseError{message: custmsg}} -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/cfg_logconfig.go b/pkg/c8y/vendor/github.com/cihub/seelog/cfg_logconfig.go deleted file mode 100644 index 6ba6f9a9..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/cfg_logconfig.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" -) - -type loggerTypeFromString uint8 - -const ( - syncloggerTypeFromString = iota - asyncLooploggerTypeFromString - asyncTimerloggerTypeFromString - adaptiveLoggerTypeFromString - defaultloggerTypeFromString = asyncLooploggerTypeFromString -) - -const ( - syncloggerTypeFromStringStr = "sync" - asyncloggerTypeFromStringStr = "asyncloop" - asyncTimerloggerTypeFromStringStr = "asynctimer" - adaptiveLoggerTypeFromStringStr = "adaptive" -) - -// asyncTimerLoggerData represents specific data for async timer logger -type asyncTimerLoggerData struct { - AsyncInterval uint32 -} - -// adaptiveLoggerData represents specific data for adaptive timer logger -type adaptiveLoggerData struct { - MinInterval uint32 - MaxInterval uint32 - CriticalMsgCount uint32 -} - -var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{ - syncloggerTypeFromString: syncloggerTypeFromStringStr, - asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr, - asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr, - adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr, -} - -// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful. -func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) { - for logType, logTypeStr := range loggerTypeToStringRepresentations { - if logTypeStr == logTypeString { - return logType, true - } - } - - return 0, false -} - -// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules -// (general constraints and exceptions) -type logConfig struct { - Constraints logLevelConstraints // General log level rules (>min and ' element. It takes the 'name' attribute - // of the element and tries to find a match in two places: - // 1) CfgParseParams.CustomReceiverProducers map - // 2) Global type map, filled by RegisterReceiver - // - // If a match is found in the CustomReceiverProducers map, parser calls the corresponding producer func - // passing the init args to it. The func takes exactly the same args as CustomReceiver.AfterParse. - // The producer func must return a correct receiver or an error. If case of error, seelog will behave - // in the same way as with any other config error. - // - // You may use this param to set custom producers in case you need to pass some context when instantiating - // a custom receiver or if you frequently change custom receivers with different parameters or in any other - // situation where package-level registering (RegisterReceiver) is not an option for you. - CustomReceiverProducers map[string]CustomReceiverProducer -} - -func (cfg *CfgParseParams) String() string { - return fmt.Sprintf("CfgParams: {custom_recs=%d}", len(cfg.CustomReceiverProducers)) -} - -type elementMapEntry struct { - constructor func(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) -} - -var elementMap map[string]elementMapEntry -var predefinedFormats map[string]*formatter - -func init() { - elementMap = map[string]elementMapEntry{ - fileWriterID: {createfileWriter}, - splitterDispatcherID: {createSplitter}, - customReceiverID: {createCustomReceiver}, - filterDispatcherID: {createFilter}, - consoleWriterID: {createConsoleWriter}, - rollingfileWriterID: {createRollingFileWriter}, - bufferedWriterID: {createbufferedWriter}, - smtpWriterID: {createSMTPWriter}, - connWriterID: {createconnWriter}, - } - - err := fillPredefinedFormats() - if err != nil { - panic(fmt.Sprintf("Seelog couldn't start: predefined formats creation failed. Error: %s", err.Error())) - } -} - -func fillPredefinedFormats() error { - predefinedFormatsWithoutPrefix := map[string]string{ - "xml-debug": `%Lev%Msg%RelFile%Func%Line`, - "xml-debug-short": `%Ns%l%Msg

%RelFile

%Func`, - "xml": `%Lev%Msg`, - "xml-short": `%Ns%l%Msg`, - - "json-debug": `{"time":%Ns,"lev":"%Lev","msg":"%Msg","path":"%RelFile","func":"%Func","line":"%Line"}`, - "json-debug-short": `{"t":%Ns,"l":"%Lev","m":"%Msg","p":"%RelFile","f":"%Func"}`, - "json": `{"time":%Ns,"lev":"%Lev","msg":"%Msg"}`, - "json-short": `{"t":%Ns,"l":"%Lev","m":"%Msg"}`, - - "debug": `[%LEVEL] %RelFile:%Func.%Line %Date %Time %Msg%n`, - "debug-short": `[%LEVEL] %Date %Time %Msg%n`, - "fast": `%Ns %l %Msg%n`, - } - - predefinedFormats = make(map[string]*formatter) - - for formatKey, format := range predefinedFormatsWithoutPrefix { - formatter, err := NewFormatter(format) - if err != nil { - return err - } - - predefinedFormats[predefinedPrefix+formatKey] = formatter - } - - return nil -} - -// configFromXMLDecoder parses data from a given XML decoder. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (*configForParsing, error) { - return configFromXMLDecoderWithConfig(xmlParser, rootNode, nil) -} - -// configFromXMLDecoderWithConfig parses data from a given XML decoder. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromXMLDecoderWithConfig(xmlParser *xml.Decoder, rootNode xml.Token, cfg *CfgParseParams) (*configForParsing, error) { - _, ok := rootNode.(xml.StartElement) - if !ok { - return nil, errors.New("rootNode must be XML startElement") - } - - config, err := unmarshalNode(xmlParser, rootNode) - if err != nil { - return nil, err - } - if config == nil { - return nil, errors.New("xml has no content") - } - - return configFromXMLNodeWithConfig(config, cfg) -} - -// configFromReader parses data from a given reader. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromReader(reader io.Reader) (*configForParsing, error) { - return configFromReaderWithConfig(reader, nil) -} - -// configFromReaderWithConfig parses data from a given reader. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromReaderWithConfig(reader io.Reader, cfg *CfgParseParams) (*configForParsing, error) { - config, err := unmarshalConfig(reader) - if err != nil { - return nil, err - } - - if config.name != seelogConfigID { - return nil, errors.New("root xml tag must be '" + seelogConfigID + "'") - } - - return configFromXMLNodeWithConfig(config, cfg) -} - -func configFromXMLNodeWithConfig(config *xmlNode, cfg *CfgParseParams) (*configForParsing, error) { - err := checkUnexpectedAttribute( - config, - minLevelID, - maxLevelID, - levelsID, - loggerTypeFromStringAttr, - asyncLoggerIntervalAttr, - adaptLoggerMinIntervalAttr, - adaptLoggerMaxIntervalAttr, - adaptLoggerCriticalMsgCountAttr, - ) - if err != nil { - return nil, err - } - - err = checkExpectedElements(config, optionalElement(outputsID), optionalElement(formatsID), optionalElement(exceptionsID)) - if err != nil { - return nil, err - } - - constraints, err := getConstraints(config) - if err != nil { - return nil, err - } - - exceptions, err := getExceptions(config) - if err != nil { - return nil, err - } - err = checkDistinctExceptions(exceptions) - if err != nil { - return nil, err - } - - formats, err := getFormats(config) - if err != nil { - return nil, err - } - - dispatcher, err := getOutputsTree(config, formats, cfg) - if err != nil { - // If we open several files, but then fail to parse the config, we should close - // those files before reporting that config is invalid. - if dispatcher != nil { - dispatcher.Close() - } - - return nil, err - } - - loggerType, logData, err := getloggerTypeFromStringData(config) - if err != nil { - return nil, err - } - - return newFullLoggerConfig(constraints, exceptions, dispatcher, loggerType, logData, cfg) -} - -func getConstraints(node *xmlNode) (logLevelConstraints, error) { - minLevelStr, isMinLevel := node.attributes[minLevelID] - maxLevelStr, isMaxLevel := node.attributes[maxLevelID] - levelsStr, isLevels := node.attributes[levelsID] - - if isLevels && (isMinLevel && isMaxLevel) { - return nil, errors.New("for level declaration use '" + levelsID + "'' OR '" + minLevelID + - "', '" + maxLevelID + "'") - } - - offString := LogLevel(Off).String() - - if (isLevels && strings.TrimSpace(levelsStr) == offString) || - (isMinLevel && !isMaxLevel && minLevelStr == offString) { - - return NewOffConstraints() - } - - if isLevels { - levels, err := parseLevels(levelsStr) - if err != nil { - return nil, err - } - return NewListConstraints(levels) - } - - var minLevel = LogLevel(TraceLvl) - if isMinLevel { - found := true - minLevel, found = LogLevelFromString(minLevelStr) - if !found { - return nil, errors.New("declared " + minLevelID + " not found: " + minLevelStr) - } - } - - var maxLevel = LogLevel(CriticalLvl) - if isMaxLevel { - found := true - maxLevel, found = LogLevelFromString(maxLevelStr) - if !found { - return nil, errors.New("declared " + maxLevelID + " not found: " + maxLevelStr) - } - } - - return NewMinMaxConstraints(minLevel, maxLevel) -} - -func parseLevels(str string) ([]LogLevel, error) { - levelsStrArr := strings.Split(strings.Replace(str, " ", "", -1), ",") - var levels []LogLevel - for _, levelStr := range levelsStrArr { - level, found := LogLevelFromString(levelStr) - if !found { - return nil, errors.New("declared level not found: " + levelStr) - } - - levels = append(levels, level) - } - - return levels, nil -} - -func getExceptions(config *xmlNode) ([]*LogLevelException, error) { - var exceptions []*LogLevelException - - var exceptionsNode *xmlNode - for _, child := range config.children { - if child.name == exceptionsID { - exceptionsNode = child - break - } - } - - if exceptionsNode == nil { - return exceptions, nil - } - - err := checkUnexpectedAttribute(exceptionsNode) - if err != nil { - return nil, err - } - - err = checkExpectedElements(exceptionsNode, multipleMandatoryElements("exception")) - if err != nil { - return nil, err - } - - for _, exceptionNode := range exceptionsNode.children { - if exceptionNode.name != exceptionID { - return nil, errors.New("incorrect nested element in exceptions section: " + exceptionNode.name) - } - - err := checkUnexpectedAttribute(exceptionNode, minLevelID, maxLevelID, levelsID, funcPatternID, filePatternID) - if err != nil { - return nil, err - } - - constraints, err := getConstraints(exceptionNode) - if err != nil { - return nil, errors.New("incorrect " + exceptionsID + " node: " + err.Error()) - } - - funcPattern, isFuncPattern := exceptionNode.attributes[funcPatternID] - filePattern, isFilePattern := exceptionNode.attributes[filePatternID] - if !isFuncPattern { - funcPattern = "*" - } - if !isFilePattern { - filePattern = "*" - } - - exception, err := NewLogLevelException(funcPattern, filePattern, constraints) - if err != nil { - return nil, errors.New("incorrect exception node: " + err.Error()) - } - - exceptions = append(exceptions, exception) - } - - return exceptions, nil -} - -func checkDistinctExceptions(exceptions []*LogLevelException) error { - for i, exception := range exceptions { - for j, exception1 := range exceptions { - if i == j { - continue - } - - if exception.FuncPattern() == exception1.FuncPattern() && - exception.FilePattern() == exception1.FilePattern() { - - return fmt.Errorf("there are two or more duplicate exceptions. Func: %v, file %v", - exception.FuncPattern(), exception.FilePattern()) - } - } - } - - return nil -} - -func getFormats(config *xmlNode) (map[string]*formatter, error) { - formats := make(map[string]*formatter, 0) - - var formatsNode *xmlNode - for _, child := range config.children { - if child.name == formatsID { - formatsNode = child - break - } - } - - if formatsNode == nil { - return formats, nil - } - - err := checkUnexpectedAttribute(formatsNode) - if err != nil { - return nil, err - } - - err = checkExpectedElements(formatsNode, multipleMandatoryElements("format")) - if err != nil { - return nil, err - } - - for _, formatNode := range formatsNode.children { - if formatNode.name != formatID { - return nil, errors.New("incorrect nested element in " + formatsID + " section: " + formatNode.name) - } - - err := checkUnexpectedAttribute(formatNode, formatKeyAttrID, formatID) - if err != nil { - return nil, err - } - - id, isID := formatNode.attributes[formatKeyAttrID] - formatStr, isFormat := formatNode.attributes[formatAttrID] - if !isID { - return nil, errors.New("format has no '" + formatKeyAttrID + "' attribute") - } - if !isFormat { - return nil, errors.New("format[" + id + "] has no '" + formatAttrID + "' attribute") - } - - formatter, err := NewFormatter(formatStr) - if err != nil { - return nil, err - } - - formats[id] = formatter - } - - return formats, nil -} - -func getloggerTypeFromStringData(config *xmlNode) (logType loggerTypeFromString, logData interface{}, err error) { - logTypeStr, loggerTypeExists := config.attributes[loggerTypeFromStringAttr] - - if !loggerTypeExists { - return defaultloggerTypeFromString, nil, nil - } - - logType, found := getLoggerTypeFromString(logTypeStr) - - if !found { - return 0, nil, fmt.Errorf("unknown logger type: %s", logTypeStr) - } - - if logType == asyncTimerloggerTypeFromString { - intervalStr, intervalExists := config.attributes[asyncLoggerIntervalAttr] - if !intervalExists { - return 0, nil, newMissingArgumentError(config.name, asyncLoggerIntervalAttr) - } - - interval, err := strconv.ParseUint(intervalStr, 10, 32) - if err != nil { - return 0, nil, err - } - - logData = asyncTimerLoggerData{uint32(interval)} - } else if logType == adaptiveLoggerTypeFromString { - - // Min interval - minIntStr, minIntExists := config.attributes[adaptLoggerMinIntervalAttr] - if !minIntExists { - return 0, nil, newMissingArgumentError(config.name, adaptLoggerMinIntervalAttr) - } - minInterval, err := strconv.ParseUint(minIntStr, 10, 32) - if err != nil { - return 0, nil, err - } - - // Max interval - maxIntStr, maxIntExists := config.attributes[adaptLoggerMaxIntervalAttr] - if !maxIntExists { - return 0, nil, newMissingArgumentError(config.name, adaptLoggerMaxIntervalAttr) - } - maxInterval, err := strconv.ParseUint(maxIntStr, 10, 32) - if err != nil { - return 0, nil, err - } - - // Critical msg count - criticalMsgCountStr, criticalMsgCountExists := config.attributes[adaptLoggerCriticalMsgCountAttr] - if !criticalMsgCountExists { - return 0, nil, newMissingArgumentError(config.name, adaptLoggerCriticalMsgCountAttr) - } - criticalMsgCount, err := strconv.ParseUint(criticalMsgCountStr, 10, 32) - if err != nil { - return 0, nil, err - } - - logData = adaptiveLoggerData{uint32(minInterval), uint32(maxInterval), uint32(criticalMsgCount)} - } - - return logType, logData, nil -} - -func getOutputsTree(config *xmlNode, formats map[string]*formatter, cfg *CfgParseParams) (dispatcherInterface, error) { - var outputsNode *xmlNode - for _, child := range config.children { - if child.name == outputsID { - outputsNode = child - break - } - } - - if outputsNode != nil { - err := checkUnexpectedAttribute(outputsNode, outputFormatID) - if err != nil { - return nil, err - } - - formatter, err := getCurrentFormat(outputsNode, DefaultFormatter, formats) - if err != nil { - return nil, err - } - - output, err := createSplitter(outputsNode, formatter, formats, cfg) - if err != nil { - return nil, err - } - - dispatcher, ok := output.(dispatcherInterface) - if ok { - return dispatcher, nil - } - } - - console, err := NewConsoleWriter() - if err != nil { - return nil, err - } - return NewSplitDispatcher(DefaultFormatter, []interface{}{console}) -} - -func getCurrentFormat(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter) (*formatter, error) { - formatID, isFormatID := node.attributes[outputFormatID] - if !isFormatID { - return formatFromParent, nil - } - - format, ok := formats[formatID] - if ok { - return format, nil - } - - // Test for predefined format match - pdFormat, pdOk := predefinedFormats[formatID] - - if !pdOk { - return nil, errors.New("formatid = '" + formatID + "' doesn't exist") - } - - return pdFormat, nil -} - -func createInnerReceivers(node *xmlNode, format *formatter, formats map[string]*formatter, cfg *CfgParseParams) ([]interface{}, error) { - var outputs []interface{} - for _, childNode := range node.children { - entry, ok := elementMap[childNode.name] - if !ok { - return nil, errors.New("unnknown tag '" + childNode.name + "' in outputs section") - } - - output, err := entry.constructor(childNode, format, formats, cfg) - if err != nil { - return nil, err - } - - outputs = append(outputs, output) - } - - return outputs, nil -} - -func createSplitter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID) - if err != nil { - return nil, err - } - - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) - if err != nil { - return nil, err - } - - return NewSplitDispatcher(currentFormat, receivers) -} - -func createCustomReceiver(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - dataCustomPrefixes := make(map[string]string) - // Expecting only 'formatid', 'name' and 'data-' attrs - for attr, attrval := range node.attributes { - isExpected := false - if attr == outputFormatID || - attr == customNameAttrID { - isExpected = true - } - if strings.HasPrefix(attr, customNameDataAttrPrefix) { - dataCustomPrefixes[attr[len(customNameDataAttrPrefix):]] = attrval - isExpected = true - } - if !isExpected { - return nil, newUnexpectedAttributeError(node.name, attr) - } - } - - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - customName, hasCustomName := node.attributes[customNameAttrID] - if !hasCustomName { - return nil, newMissingArgumentError(node.name, customNameAttrID) - } - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - args := CustomReceiverInitArgs{ - XmlCustomAttrs: dataCustomPrefixes, - } - - if cfg != nil && cfg.CustomReceiverProducers != nil { - if prod, ok := cfg.CustomReceiverProducers[customName]; ok { - rec, err := prod(args) - if err != nil { - return nil, err - } - creceiver, err := NewCustomReceiverDispatcherByValue(currentFormat, rec, customName, args) - if err != nil { - return nil, err - } - err = rec.AfterParse(args) - if err != nil { - return nil, err - } - return creceiver, nil - } - } - - return NewCustomReceiverDispatcher(currentFormat, customName, args) -} - -func createFilter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, filterLevelsAttrID) - if err != nil { - return nil, err - } - - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - levelsStr, isLevels := node.attributes[filterLevelsAttrID] - if !isLevels { - return nil, newMissingArgumentError(node.name, filterLevelsAttrID) - } - - levels, err := parseLevels(levelsStr) - if err != nil { - return nil, err - } - - receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) - if err != nil { - return nil, err - } - - return NewFilterDispatcher(currentFormat, receivers, levels...) -} - -func createfileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, pathID) - if err != nil { - return nil, err - } - - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - path, isPath := node.attributes[pathID] - if !isPath { - return nil, newMissingArgumentError(node.name, pathID) - } - - fileWriter, err := NewFileWriter(path) - if err != nil { - return nil, err - } - - return NewFormattedWriter(fileWriter, currentFormat) -} - -// Creates new SMTP writer if encountered in the config file. -func createSMTPWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, senderaddressID, senderNameID, hostNameID, hostPortID, userNameID, userPassID, subjectID) - if err != nil { - return nil, err - } - // Node must have children. - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - senderAddress, ok := node.attributes[senderaddressID] - if !ok { - return nil, newMissingArgumentError(node.name, senderaddressID) - } - senderName, ok := node.attributes[senderNameID] - if !ok { - return nil, newMissingArgumentError(node.name, senderNameID) - } - // Process child nodes scanning for recipient email addresses and/or CA certificate paths. - var recipientAddresses []string - var caCertDirPaths []string - var mailHeaders []string - for _, childNode := range node.children { - switch childNode.name { - // Extract recipient address from child nodes. - case recipientID: - address, ok := childNode.attributes[addressID] - if !ok { - return nil, newMissingArgumentError(childNode.name, addressID) - } - recipientAddresses = append(recipientAddresses, address) - // Extract CA certificate file path from child nodes. - case cACertDirpathID: - path, ok := childNode.attributes[pathID] - if !ok { - return nil, newMissingArgumentError(childNode.name, pathID) - } - caCertDirPaths = append(caCertDirPaths, path) - - // Extract email headers from child nodes. - case mailHeaderID: - headerName, ok := childNode.attributes[mailHeaderNameID] - if !ok { - return nil, newMissingArgumentError(childNode.name, mailHeaderNameID) - } - - headerValue, ok := childNode.attributes[mailHeaderValueID] - if !ok { - return nil, newMissingArgumentError(childNode.name, mailHeaderValueID) - } - - // Build header line - mailHeaders = append(mailHeaders, fmt.Sprintf("%s: %s", headerName, headerValue)) - default: - return nil, newUnexpectedChildElementError(childNode.name) - } - } - hostName, ok := node.attributes[hostNameID] - if !ok { - return nil, newMissingArgumentError(node.name, hostNameID) - } - - hostPort, ok := node.attributes[hostPortID] - if !ok { - return nil, newMissingArgumentError(node.name, hostPortID) - } - - // Check if the string can really be converted into int. - if _, err := strconv.Atoi(hostPort); err != nil { - return nil, errors.New("invalid host port number") - } - - userName, ok := node.attributes[userNameID] - if !ok { - return nil, newMissingArgumentError(node.name, userNameID) - } - - userPass, ok := node.attributes[userPassID] - if !ok { - return nil, newMissingArgumentError(node.name, userPassID) - } - - // subject is optionally set by configuration. - // default value is defined by DefaultSubjectPhrase constant in the writers_smtpwriter.go - var subjectPhrase = DefaultSubjectPhrase - - subject, ok := node.attributes[subjectID] - if ok { - subjectPhrase = subject - } - - smtpWriter := NewSMTPWriter( - senderAddress, - senderName, - recipientAddresses, - hostName, - hostPort, - userName, - userPass, - caCertDirPaths, - subjectPhrase, - mailHeaders, - ) - - return NewFormattedWriter(smtpWriter, currentFormat) -} - -func createConsoleWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID) - if err != nil { - return nil, err - } - - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - consoleWriter, err := NewConsoleWriter() - if err != nil { - return nil, err - } - - return NewFormattedWriter(consoleWriter, currentFormat) -} - -func createconnWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - err := checkUnexpectedAttribute(node, outputFormatID, connWriterAddrAttr, connWriterNetAttr, connWriterReconnectOnMsgAttr, connWriterUseTLSAttr, connWriterInsecureSkipVerifyAttr) - if err != nil { - return nil, err - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - addr, isAddr := node.attributes[connWriterAddrAttr] - if !isAddr { - return nil, newMissingArgumentError(node.name, connWriterAddrAttr) - } - - net, isNet := node.attributes[connWriterNetAttr] - if !isNet { - return nil, newMissingArgumentError(node.name, connWriterNetAttr) - } - - reconnectOnMsg := false - reconnectOnMsgStr, isReconnectOnMsgStr := node.attributes[connWriterReconnectOnMsgAttr] - if isReconnectOnMsgStr { - if reconnectOnMsgStr == "true" { - reconnectOnMsg = true - } else if reconnectOnMsgStr == "false" { - reconnectOnMsg = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterReconnectOnMsgAttr + "' attribute value") - } - } - - useTLS := false - useTLSStr, isUseTLSStr := node.attributes[connWriterUseTLSAttr] - if isUseTLSStr { - if useTLSStr == "true" { - useTLS = true - } else if useTLSStr == "false" { - useTLS = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterUseTLSAttr + "' attribute value") - } - if useTLS { - insecureSkipVerify := false - insecureSkipVerifyStr, isInsecureSkipVerify := node.attributes[connWriterInsecureSkipVerifyAttr] - if isInsecureSkipVerify { - if insecureSkipVerifyStr == "true" { - insecureSkipVerify = true - } else if insecureSkipVerifyStr == "false" { - insecureSkipVerify = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterInsecureSkipVerifyAttr + "' attribute value") - } - } - config := tls.Config{InsecureSkipVerify: insecureSkipVerify} - connWriter := newTLSWriter(net, addr, reconnectOnMsg, &config) - return NewFormattedWriter(connWriter, currentFormat) - } - } - - connWriter := NewConnWriter(net, addr, reconnectOnMsg) - - return NewFormattedWriter(connWriter, currentFormat) -} - -func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - rollingTypeStr, isRollingType := node.attributes[rollingFileTypeAttr] - if !isRollingType { - return nil, newMissingArgumentError(node.name, rollingFileTypeAttr) - } - - rollingType, ok := rollingTypeFromString(rollingTypeStr) - if !ok { - return nil, errors.New("unknown rolling file type: " + rollingTypeStr) - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - path, isPath := node.attributes[rollingFilePathAttr] - if !isPath { - return nil, newMissingArgumentError(node.name, rollingFilePathAttr) - } - - rollingArchiveStr, archiveAttrExists := node.attributes[rollingFileArchiveAttr] - - var rArchiveType rollingArchiveType - var rArchivePath string - var rArchiveExploded bool = false - if !archiveAttrExists { - rArchiveType = rollingArchiveNone - rArchivePath = "" - } else { - rArchiveType, ok = rollingArchiveTypeFromString(rollingArchiveStr) - if !ok { - return nil, errors.New("unknown rolling archive type: " + rollingArchiveStr) - } - - if rArchiveType == rollingArchiveNone { - rArchivePath = "" - } else { - if rArchiveExplodedAttr, ok := node.attributes[rollingFileArchiveExplodedAttr]; ok { - if rArchiveExploded, err = strconv.ParseBool(rArchiveExplodedAttr); err != nil { - return nil, fmt.Errorf("archive exploded should be true or false, but was %v", - rArchiveExploded) - } - } - - rArchivePath, ok = node.attributes[rollingFileArchivePathAttr] - if ok { - if rArchivePath == "" { - return nil, fmt.Errorf("empty archive path is not supported") - } - } else { - if rArchiveExploded { - rArchivePath = rollingArchiveDefaultExplodedName - - } else { - rArchivePath, err = rollingArchiveTypeDefaultName(rArchiveType, false) - if err != nil { - return nil, err - } - } - } - } - } - - nameMode := rollingNameMode(rollingNameModePostfix) - nameModeStr, ok := node.attributes[rollingFileNameModeAttr] - if ok { - mode, found := rollingNameModeFromString(nameModeStr) - if !found { - return nil, errors.New("unknown rolling filename mode: " + nameModeStr) - } else { - nameMode = mode - } - } - - if rollingType == rollingTypeSize { - err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, - rollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr, - rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr) - if err != nil { - return nil, err - } - - maxSizeStr, ok := node.attributes[rollingFileMaxSizeAttr] - if !ok { - return nil, newMissingArgumentError(node.name, rollingFileMaxSizeAttr) - } - - maxSize, err := strconv.ParseInt(maxSizeStr, 10, 64) - if err != nil { - return nil, err - } - - maxRolls := 0 - maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] - if ok { - maxRolls, err = strconv.Atoi(maxRollsStr) - if err != nil { - return nil, err - } - } - - rollingWriter, err := NewRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode, rArchiveExploded) - if err != nil { - return nil, err - } - - return NewFormattedWriter(rollingWriter, currentFormat) - - } else if rollingType == rollingTypeTime { - err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, - rollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr, - rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr, - rollingFileFullNameAttr) - if err != nil { - return nil, err - } - - maxRolls := 0 - maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] - if ok { - maxRolls, err = strconv.Atoi(maxRollsStr) - if err != nil { - return nil, err - } - } - - fullName := false - fn, ok := node.attributes[rollingFileFullNameAttr] - if ok { - if fn == "true" { - fullName = true - } else if fn == "false" { - fullName = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + rollingFileFullNameAttr + "' attribute value") - } - } - - dataPattern, ok := node.attributes[rollingFileDataPatternAttr] - if !ok { - return nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr) - } - - rollingWriter, err := NewRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, nameMode, rArchiveExploded, fullName) - if err != nil { - return nil, err - } - - return NewFormattedWriter(rollingWriter, currentFormat) - } - - return nil, errors.New("incorrect rolling writer type " + rollingTypeStr) -} - -func createbufferedWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, bufferedSizeAttr, bufferedFlushPeriodAttr) - if err != nil { - return nil, err - } - - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - sizeStr, isSize := node.attributes[bufferedSizeAttr] - if !isSize { - return nil, newMissingArgumentError(node.name, bufferedSizeAttr) - } - - size, err := strconv.Atoi(sizeStr) - if err != nil { - return nil, err - } - - flushPeriod := 0 - flushPeriodStr, isFlushPeriod := node.attributes[bufferedFlushPeriodAttr] - if isFlushPeriod { - flushPeriod, err = strconv.Atoi(flushPeriodStr) - if err != nil { - return nil, err - } - } - - // Inner writer couldn't have its own format, so we pass 'currentFormat' as its parent format - receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) - if err != nil { - return nil, err - } - - formattedWriter, ok := receivers[0].(*formattedWriter) - if !ok { - return nil, errors.New("buffered writer's child is not writer") - } - - // ... and then we check that it hasn't changed - if formattedWriter.Format() != currentFormat { - return nil, errors.New("inner writer cannot have his own format") - } - - bufferedWriter, err := NewBufferedWriter(formattedWriter.Writer(), size, time.Duration(flushPeriod)) - if err != nil { - return nil, err - } - - return NewFormattedWriter(bufferedWriter, currentFormat) -} - -// Returns an error if node has any attributes not listed in expectedAttrs. -func checkUnexpectedAttribute(node *xmlNode, expectedAttrs ...string) error { - for attr := range node.attributes { - isExpected := false - for _, expected := range expectedAttrs { - if attr == expected { - isExpected = true - break - } - } - if !isExpected { - return newUnexpectedAttributeError(node.name, attr) - } - } - - return nil -} - -type expectedElementInfo struct { - name string - mandatory bool - multiple bool -} - -func optionalElement(name string) expectedElementInfo { - return expectedElementInfo{name, false, false} -} -func mandatoryElement(name string) expectedElementInfo { - return expectedElementInfo{name, true, false} -} -func multipleElements(name string) expectedElementInfo { - return expectedElementInfo{name, false, true} -} -func multipleMandatoryElements(name string) expectedElementInfo { - return expectedElementInfo{name, true, true} -} - -func checkExpectedElements(node *xmlNode, elements ...expectedElementInfo) error { - for _, element := range elements { - count := 0 - for _, child := range node.children { - if child.name == element.name { - count++ - } - } - - if count == 0 && element.mandatory { - return errors.New(node.name + " does not have mandatory subnode - " + element.name) - } - if count > 1 && !element.multiple { - return errors.New(node.name + " has more then one subnode - " + element.name) - } - } - - for _, child := range node.children { - isExpected := false - for _, element := range elements { - if child.name == element.name { - isExpected = true - } - } - - if !isExpected { - return errors.New(node.name + " has unexpected child: " + child.name) - } - } - - return nil -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/common_closer.go b/pkg/c8y/vendor/github.com/cihub/seelog/common_closer.go deleted file mode 100644 index 1319c221..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/common_closer.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/common_constraints.go b/pkg/c8y/vendor/github.com/cihub/seelog/common_constraints.go deleted file mode 100644 index 7ec2fe5b..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/common_constraints.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "strings" -) - -// Represents constraints which form a general rule for log levels selection -type logLevelConstraints interface { - IsAllowed(level LogLevel) bool -} - -// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels. -type minMaxConstraints struct { - min LogLevel - max LogLevel -} - -// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels. -func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) { - if min > max { - return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max) - } - if min < TraceLvl || min > CriticalLvl { - return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min) - } - if max < TraceLvl || max > CriticalLvl { - return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max) - } - - return &minMaxConstraints{min, max}, nil -} - -// IsAllowed returns true, if log level is in [min, max] range (inclusive). -func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool { - return level >= minMaxConstr.min && level <= minMaxConstr.max -} - -func (minMaxConstr *minMaxConstraints) String() string { - return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max) -} - -//======================================================= - -// A listConstraints represents constraints which use allowed log levels list. -type listConstraints struct { - allowedLevels map[LogLevel]bool -} - -// NewListConstraints creates a new listConstraints struct with the specified allowed levels. -func NewListConstraints(allowList []LogLevel) (*listConstraints, error) { - if allowList == nil { - return nil, errors.New("list can't be nil") - } - - allowLevels, err := createMapFromList(allowList) - if err != nil { - return nil, err - } - err = validateOffLevel(allowLevels) - if err != nil { - return nil, err - } - - return &listConstraints{allowLevels}, nil -} - -func (listConstr *listConstraints) String() string { - allowedList := "List: " - - listLevel := make([]string, len(listConstr.allowedLevels)) - - var logLevel LogLevel - i := 0 - for logLevel = TraceLvl; logLevel <= Off; logLevel++ { - if listConstr.allowedLevels[logLevel] { - listLevel[i] = logLevel.String() - i++ - } - } - - allowedList += strings.Join(listLevel, ",") - - return allowedList -} - -func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) { - allowedLevels := make(map[LogLevel]bool, 0) - for _, level := range allowedList { - if level < TraceLvl || level > Off { - return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level) - } - allowedLevels[level] = true - } - return allowedLevels, nil -} -func validateOffLevel(allowedLevels map[LogLevel]bool) error { - if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 { - return errors.New("logLevel Off cant be mixed with other levels") - } - - return nil -} - -// IsAllowed returns true, if log level is in allowed log levels list. -// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values. -func (listConstr *listConstraints) IsAllowed(level LogLevel) bool { - for l := range listConstr.allowedLevels { - if l == level && level != Off { - return true - } - } - - return false -} - -// AllowedLevels returns allowed levels configuration as a map. -func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool { - return listConstr.allowedLevels -} - -//======================================================= - -type offConstraints struct { -} - -func NewOffConstraints() (*offConstraints, error) { - return &offConstraints{}, nil -} - -func (offConstr *offConstraints) IsAllowed(level LogLevel) bool { - return false -} - -func (offConstr *offConstraints) String() string { - return "Off constraint" -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/common_context.go b/pkg/c8y/vendor/github.com/cihub/seelog/common_context.go deleted file mode 100644 index 230a76ca..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/common_context.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "time" -) - -var ( - workingDir = "/" - stackCache map[uintptr]*logContext - stackCacheLock sync.RWMutex -) - -func init() { - wd, err := os.Getwd() - if err == nil { - workingDir = filepath.ToSlash(wd) + "/" - } - stackCache = make(map[uintptr]*logContext) -} - -// Represents runtime caller context. -type LogContextInterface interface { - // Caller's function name. - Func() string - // Caller's line number. - Line() int - // Caller's file short path (in slashed form). - ShortPath() string - // Caller's file full path (in slashed form). - FullPath() string - // Caller's file name (without path). - FileName() string - // True if the context is correct and may be used. - // If false, then an error in context evaluation occurred and - // all its other data may be corrupted. - IsValid() bool - // Time when log function was called. - CallTime() time.Time - // Custom context that can be set by calling logger.SetContext - CustomContext() interface{} -} - -// Returns context of the caller -func currentContext(custom interface{}) (LogContextInterface, error) { - return specifyContext(1, custom) -} - -func extractCallerInfo(skip int) (*logContext, error) { - var stack [1]uintptr - if runtime.Callers(skip+1, stack[:]) != 1 { - return nil, errors.New("error during runtime.Callers") - } - pc := stack[0] - - // do we have a cache entry? - stackCacheLock.RLock() - ctx, ok := stackCache[pc] - stackCacheLock.RUnlock() - if ok { - return ctx, nil - } - - // look up the details of the given caller - funcInfo := runtime.FuncForPC(pc) - if funcInfo == nil { - return nil, errors.New("error during runtime.FuncForPC") - } - - var shortPath string - fullPath, line := funcInfo.FileLine(pc) - if strings.HasPrefix(fullPath, workingDir) { - shortPath = fullPath[len(workingDir):] - } else { - shortPath = fullPath - } - funcName := funcInfo.Name() - if strings.HasPrefix(funcName, workingDir) { - funcName = funcName[len(workingDir):] - } - - ctx = &logContext{ - funcName: funcName, - line: line, - shortPath: shortPath, - fullPath: fullPath, - fileName: filepath.Base(fullPath), - } - - // save the details in the cache; note that it's possible we might - // have written an entry into the map in between the test above and - // this section, but the behaviour is still correct - stackCacheLock.Lock() - stackCache[pc] = ctx - stackCacheLock.Unlock() - return ctx, nil -} - -// Returns context of the function with placed "skip" stack frames of the caller -// If skip == 0 then behaves like currentContext -// Context is returned in any situation, even if error occurs. But, if an error -// occurs, the returned context is an error context, which contains no paths -// or names, but states that they can't be extracted. -func specifyContext(skip int, custom interface{}) (LogContextInterface, error) { - callTime := time.Now() - if skip < 0 { - err := fmt.Errorf("can not skip negative stack frames") - return &errorContext{callTime, err}, err - } - caller, err := extractCallerInfo(skip + 2) - if err != nil { - return &errorContext{callTime, err}, err - } - ctx := new(logContext) - *ctx = *caller - ctx.callTime = callTime - ctx.custom = custom - return ctx, nil -} - -// Represents a normal runtime caller context. -type logContext struct { - funcName string - line int - shortPath string - fullPath string - fileName string - callTime time.Time - custom interface{} -} - -func (context *logContext) IsValid() bool { - return true -} - -func (context *logContext) Func() string { - return context.funcName -} - -func (context *logContext) Line() int { - return context.line -} - -func (context *logContext) ShortPath() string { - return context.shortPath -} - -func (context *logContext) FullPath() string { - return context.fullPath -} - -func (context *logContext) FileName() string { - return context.fileName -} - -func (context *logContext) CallTime() time.Time { - return context.callTime -} - -func (context *logContext) CustomContext() interface{} { - return context.custom -} - -// Represents an error context -type errorContext struct { - errorTime time.Time - err error -} - -func (errContext *errorContext) getErrorText(prefix string) string { - return fmt.Sprintf("%s() error: %s", prefix, errContext.err) -} - -func (errContext *errorContext) IsValid() bool { - return false -} - -func (errContext *errorContext) Line() int { - return -1 -} - -func (errContext *errorContext) Func() string { - return errContext.getErrorText("Func") -} - -func (errContext *errorContext) ShortPath() string { - return errContext.getErrorText("ShortPath") -} - -func (errContext *errorContext) FullPath() string { - return errContext.getErrorText("FullPath") -} - -func (errContext *errorContext) FileName() string { - return errContext.getErrorText("FileName") -} - -func (errContext *errorContext) CallTime() time.Time { - return errContext.errorTime -} - -func (errContext *errorContext) CustomContext() interface{} { - return nil -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/common_exception.go b/pkg/c8y/vendor/github.com/cihub/seelog/common_exception.go deleted file mode 100644 index 9acc2750..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/common_exception.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "regexp" - "strings" -) - -// Used in rules creation to validate input file and func filters -var ( - fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`) - funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`) -) - -// LogLevelException represents an exceptional case used when you need some specific files or funcs to -// override general constraints and to use their own. -type LogLevelException struct { - funcPatternParts []string - filePatternParts []string - - funcPattern string - filePattern string - - constraints logLevelConstraints -} - -// NewLogLevelException creates a new exception. -func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) { - if constraints == nil { - return nil, errors.New("constraints can not be nil") - } - - exception := new(LogLevelException) - - err := exception.initFuncPatternParts(funcPattern) - if err != nil { - return nil, err - } - exception.funcPattern = strings.Join(exception.funcPatternParts, "") - - err = exception.initFilePatternParts(filePattern) - if err != nil { - return nil, err - } - exception.filePattern = strings.Join(exception.filePatternParts, "") - - exception.constraints = constraints - - return exception, nil -} - -// MatchesContext returns true if context matches the patterns of this LogLevelException -func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool { - return logLevelEx.match(context.Func(), context.FullPath()) -} - -// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException -func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool { - return logLevelEx.constraints.IsAllowed(level) -} - -// FuncPattern returns the function pattern of a exception -func (logLevelEx *LogLevelException) FuncPattern() string { - return logLevelEx.funcPattern -} - -// FuncPattern returns the file pattern of a exception -func (logLevelEx *LogLevelException) FilePattern() string { - return logLevelEx.filePattern -} - -// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts -func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) { - - if funcFormatValidator.FindString(funcPattern) != funcPattern { - return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)") - } - - logLevelEx.funcPatternParts = splitPattern(funcPattern) - return nil -} - -// Checks whether the file filter has a correct format and splits file patterns using splitPattern. -func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) { - - if fileFormatValidator.FindString(filePattern) != filePattern { - return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)") - } - - logLevelEx.filePatternParts = splitPattern(filePattern) - return err -} - -func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool { - if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) { - return false - } - return stringMatchesPattern(logLevelEx.filePatternParts, filePath) -} - -func (logLevelEx *LogLevelException) String() string { - str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern) - - if logLevelEx.constraints != nil { - str += fmt.Sprintf("Constr: %s", logLevelEx.constraints) - } else { - str += "nil" - } - - return str -} - -// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"] -func splitPattern(pattern string) []string { - var patternParts []string - var lastChar rune - for _, char := range pattern { - if char == '*' { - if lastChar != '*' { - patternParts = append(patternParts, "*") - } - } else { - if len(patternParts) != 0 && lastChar != '*' { - patternParts[len(patternParts)-1] += string(char) - } else { - patternParts = append(patternParts, string(char)) - } - } - lastChar = char - } - - return patternParts -} - -// stringMatchesPattern check whether testString matches pattern with asterisks. -// Standard regexp functionality is not used here because of performance issues. -func stringMatchesPattern(patternparts []string, testString string) bool { - if len(patternparts) == 0 { - return len(testString) == 0 - } - - part := patternparts[0] - if part != "*" { - index := strings.Index(testString, part) - if index == 0 { - return stringMatchesPattern(patternparts[1:], testString[len(part):]) - } - } else { - if len(patternparts) == 1 { - return true - } - - newTestString := testString - part = patternparts[1] - for { - index := strings.Index(newTestString, part) - if index == -1 { - break - } - - newTestString = newTestString[index+len(part):] - result := stringMatchesPattern(patternparts[2:], newTestString) - if result { - return true - } - } - } - return false -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/common_flusher.go b/pkg/c8y/vendor/github.com/cihub/seelog/common_flusher.go deleted file mode 100644 index 0ef077c8..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/common_flusher.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -// flusherInterface represents all objects that have to do cleanup -// at certain moments of time (e.g. before app shutdown to avoid data loss) -type flusherInterface interface { - Flush() -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/common_loglevel.go b/pkg/c8y/vendor/github.com/cihub/seelog/common_loglevel.go deleted file mode 100644 index d54ecf27..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/common_loglevel.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -// Log level type -type LogLevel uint8 - -// Log levels -const ( - TraceLvl = iota - DebugLvl - InfoLvl - WarnLvl - ErrorLvl - CriticalLvl - Off -) - -// Log level string representations (used in configuration files) -const ( - TraceStr = "trace" - DebugStr = "debug" - InfoStr = "info" - WarnStr = "warn" - ErrorStr = "error" - CriticalStr = "critical" - OffStr = "off" -) - -var levelToStringRepresentations = map[LogLevel]string{ - TraceLvl: TraceStr, - DebugLvl: DebugStr, - InfoLvl: InfoStr, - WarnLvl: WarnStr, - ErrorLvl: ErrorStr, - CriticalLvl: CriticalStr, - Off: OffStr, -} - -// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull. -func LogLevelFromString(levelStr string) (level LogLevel, found bool) { - for lvl, lvlStr := range levelToStringRepresentations { - if lvlStr == levelStr { - return lvl, true - } - } - - return 0, false -} - -// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels. -func (level LogLevel) String() string { - levelStr, ok := levelToStringRepresentations[level] - if ok { - return levelStr - } - - return "" -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_custom.go b/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_custom.go deleted file mode 100644 index 383a7705..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_custom.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2013 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -var registeredReceivers = make(map[string]reflect.Type) - -// RegisterReceiver records a custom receiver type, identified by a value -// of that type (second argument), under the specified name. Registered -// names can be used in the "name" attribute of config items. -// -// RegisterReceiver takes the type of the receiver argument, without taking -// the value into the account. So do NOT enter any data to the second argument -// and only call it like: -// RegisterReceiver("somename", &MyReceiverType{}) -// -// After that, when a '' config tag with this name is used, -// a receiver of the specified type would be instantiated. Check -// CustomReceiver comments for interface details. -// -// NOTE 1: RegisterReceiver fails if you attempt to register different types -// with the same name. -// -// NOTE 2: RegisterReceiver registers those receivers that must be used in -// the configuration files ( items). Basically it is just the way -// you tell seelog config parser what should it do when it meets a -// tag with a specific name and data attributes. -// -// But If you are only using seelog as a proxy to an already instantiated -// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver. -func RegisterReceiver(name string, receiver CustomReceiver) { - newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface()) - if t, ok := registeredReceivers[name]; ok && t != newType { - panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType)) - } - registeredReceivers[name] = newType -} - -func customReceiverByName(name string) (creceiver CustomReceiver, err error) { - rt, ok := registeredReceivers[name] - if !ok { - return nil, fmt.Errorf("custom receiver name not registered: '%s'", name) - } - v, ok := reflect.New(rt).Interface().(CustomReceiver) - if !ok { - return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name) - } - return v, nil -} - -// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init -// func when custom receiver is being initialized. -type CustomReceiverInitArgs struct { - // XmlCustomAttrs represent '' xml config item attributes that - // start with "data-". Map keys will be the attribute names without the "data-". - // Map values will the those attribute values. - // - // E.g. if you have a '' - // you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2" - // - // Note that in custom items you can only use allowed attributes, like "name" and - // your custom attributes, starting with "data-". Any other will lead to a - // parsing error. - XmlCustomAttrs map[string]string -} - -// CustomReceiver is the interface that external custom seelog message receivers -// must implement in order to be able to process seelog messages. Those receivers -// are set in the xml config file using the tag. Check receivers reference -// wiki section on that. -// -// Use seelog.RegisterReceiver on the receiver type before using it. -type CustomReceiver interface { - // ReceiveMessage is called when the custom receiver gets seelog message from - // a parent dispatcher. - // - // Message, level and context args represent all data that was included in the seelog - // message at the time it was logged. - // - // The formatting is already applied to the message and depends on the config - // like with any other receiver. - // - // If you would like to inform seelog of an error that happened during the handling of - // the message, return a non-nil error. This way you'll end up seeing your error like - // any other internal seelog error. - ReceiveMessage(message string, level LogLevel, context LogContextInterface) error - - // AfterParse is called immediately after your custom receiver is instantiated by - // the xml config parser. So, if you need to do any startup logic after config parsing, - // like opening file or allocating any resources after the receiver is instantiated, do it here. - // - // If this func returns a non-nil error, then the loading procedure will fail. E.g. - // if you are loading a seelog xml config, the parser would not finish the loading - // procedure and inform about an error like with any other config error. - // - // If your custom logger needs some configuration, you can use custom attributes in - // your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments. - // - // IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used - // to create seelog proxy logger using the custom receiver. This func is only called when - // receiver is instantiated from a config. - AfterParse(initArgs CustomReceiverInitArgs) error - - // Flush is called when the custom receiver gets a 'flush' directive from a - // parent receiver. If custom receiver implements some kind of buffering or - // queing, then the appropriate reaction on a flush message is synchronous - // flushing of all those queues/buffers. If custom receiver doesn't have - // such mechanisms, then flush implementation may be left empty. - Flush() - - // Close is called when the custom receiver gets a 'close' directive from a - // parent receiver. This happens when a top-level seelog dispatcher is sending - // 'close' to all child nodes and it means that current seelog logger is being closed. - // If you need to do any cleanup after your custom receiver is done, you should do - // it here. - Close() error -} - -type customReceiverDispatcher struct { - formatter *formatter - innerReceiver CustomReceiver - customReceiverName string - usedArgs CustomReceiverInitArgs -} - -// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created -// using a tag in the config file. -func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { - if formatter == nil { - return nil, errors.New("formatter cannot be nil") - } - if len(customReceiverName) == 0 { - return nil, errors.New("custom receiver name cannot be empty") - } - - creceiver, err := customReceiverByName(customReceiverName) - if err != nil { - return nil, err - } - err = creceiver.AfterParse(cArgs) - if err != nil { - return nil, err - } - disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs} - - return disp, nil -} - -// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using -// a specific CustomReceiver value instead of instantiating a new one by type. -func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { - if formatter == nil { - return nil, errors.New("formatter cannot be nil") - } - if customReceiver == nil { - return nil, errors.New("customReceiver cannot be nil") - } - disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs} - - return disp, nil -} - -// CustomReceiver implementation. Check CustomReceiver comments. -func (disp *customReceiverDispatcher) Dispatch( - message string, - level LogLevel, - context LogContextInterface, - errorFunc func(err error)) { - - defer func() { - if err := recover(); err != nil { - errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err)) - } - }() - - err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context) - if err != nil { - errorFunc(err) - } -} - -// CustomReceiver implementation. Check CustomReceiver comments. -func (disp *customReceiverDispatcher) Flush() { - disp.innerReceiver.Flush() -} - -// CustomReceiver implementation. Check CustomReceiver comments. -func (disp *customReceiverDispatcher) Close() error { - disp.innerReceiver.Flush() - - err := disp.innerReceiver.Close() - if err != nil { - return err - } - - return nil -} - -func (disp *customReceiverDispatcher) String() string { - datas := "" - skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs)) - for i := range disp.usedArgs.XmlCustomAttrs { - skeys = append(skeys, i) - } - sort.Strings(skeys) - for _, key := range skeys { - datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key]) - } - - str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n", - disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver) - - return str -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_dispatcher.go b/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_dispatcher.go deleted file mode 100644 index 2bd3b4a4..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_dispatcher.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "io" -) - -// A dispatcherInterface is used to dispatch message to all underlying receivers. -// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc. -// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs -// an immediate cleanup of all data that is stored in the receivers -type dispatcherInterface interface { - flusherInterface - io.Closer - Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) -} - -type dispatcher struct { - formatter *formatter - writers []*formattedWriter - dispatchers []dispatcherInterface -} - -// Creates a dispatcher which dispatches data to a list of receivers. -// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned -func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) { - if formatter == nil { - return nil, errors.New("formatter cannot be nil") - } - if receivers == nil || len(receivers) == 0 { - return nil, errors.New("receivers cannot be nil or empty") - } - - disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)} - for _, receiver := range receivers { - writer, ok := receiver.(*formattedWriter) - if ok { - disp.writers = append(disp.writers, writer) - continue - } - - ioWriter, ok := receiver.(io.Writer) - if ok { - writer, err := NewFormattedWriter(ioWriter, disp.formatter) - if err != nil { - return nil, err - } - disp.writers = append(disp.writers, writer) - continue - } - - dispInterface, ok := receiver.(dispatcherInterface) - if ok { - disp.dispatchers = append(disp.dispatchers, dispInterface) - continue - } - - return nil, errors.New("method can receive either io.Writer or dispatcherInterface") - } - - return disp, nil -} - -func (disp *dispatcher) Dispatch( - message string, - level LogLevel, - context LogContextInterface, - errorFunc func(err error)) { - - for _, writer := range disp.writers { - err := writer.Write(message, level, context) - if err != nil { - errorFunc(err) - } - } - - for _, dispInterface := range disp.dispatchers { - dispInterface.Dispatch(message, level, context, errorFunc) - } -} - -// Flush goes through all underlying writers which implement flusherInterface interface -// and closes them. Recursively performs the same action for underlying dispatchers -func (disp *dispatcher) Flush() { - for _, disp := range disp.Dispatchers() { - disp.Flush() - } - - for _, formatWriter := range disp.Writers() { - flusher, ok := formatWriter.Writer().(flusherInterface) - if ok { - flusher.Flush() - } - } -} - -// Close goes through all underlying writers which implement io.Closer interface -// and closes them. Recursively performs the same action for underlying dispatchers -// Before closing, writers are flushed to prevent loss of any buffered data, so -// a call to Flush() func before Close() is not necessary -func (disp *dispatcher) Close() error { - for _, disp := range disp.Dispatchers() { - disp.Flush() - err := disp.Close() - if err != nil { - return err - } - } - - for _, formatWriter := range disp.Writers() { - flusher, ok := formatWriter.Writer().(flusherInterface) - if ok { - flusher.Flush() - } - - closer, ok := formatWriter.Writer().(io.Closer) - if ok { - err := closer.Close() - if err != nil { - return err - } - } - } - - return nil -} - -func (disp *dispatcher) Writers() []*formattedWriter { - return disp.writers -} - -func (disp *dispatcher) Dispatchers() []dispatcherInterface { - return disp.dispatchers -} - -func (disp *dispatcher) String() string { - str := "formatter: " + disp.formatter.String() + "\n" - - str += " ->Dispatchers:" - - if len(disp.dispatchers) == 0 { - str += "none\n" - } else { - str += "\n" - - for _, disp := range disp.dispatchers { - str += fmt.Sprintf(" ->%s", disp) - } - } - - str += " ->Writers:" - - if len(disp.writers) == 0 { - str += "none\n" - } else { - str += "\n" - - for _, writer := range disp.writers { - str += fmt.Sprintf(" ->%s\n", writer) - } - } - - return str -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go b/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go deleted file mode 100644 index 9de8a722..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" -) - -// A filterDispatcher writes the given message to underlying receivers only if message log level -// is in the allowed list. -type filterDispatcher struct { - *dispatcher - allowList map[LogLevel]bool -} - -// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels. -func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) { - disp, err := createDispatcher(formatter, receivers) - if err != nil { - return nil, err - } - - allows := make(map[LogLevel]bool) - for _, allowLevel := range allowList { - allows[allowLevel] = true - } - - return &filterDispatcher{disp, allows}, nil -} - -func (filter *filterDispatcher) Dispatch( - message string, - level LogLevel, - context LogContextInterface, - errorFunc func(err error)) { - isAllowed, ok := filter.allowList[level] - if ok && isAllowed { - filter.dispatcher.Dispatch(message, level, context, errorFunc) - } -} - -func (filter *filterDispatcher) String() string { - return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go b/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go deleted file mode 100644 index 1d0fe7ea..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" -) - -// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.) -type splitDispatcher struct { - *dispatcher -} - -func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) { - disp, err := createDispatcher(formatter, receivers) - if err != nil { - return nil, err - } - - return &splitDispatcher{disp}, nil -} - -func (splitter *splitDispatcher) String() string { - return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String()) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/doc.go b/pkg/c8y/vendor/github.com/cihub/seelog/doc.go deleted file mode 100644 index 2734c9cb..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/doc.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2014 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package seelog implements logging functionality with flexible dispatching, filtering, and formatting. - -Creation - -To create a logger, use one of the following constructors: - func LoggerFromConfigAsBytes - func LoggerFromConfigAsFile - func LoggerFromConfigAsString - func LoggerFromWriterWithMinLevel - func LoggerFromWriterWithMinLevelAndFormat - func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers) -Example: - import log "github.com/cihub/seelog" - - func main() { - logger, err := log.LoggerFromConfigAsFile("seelog.xml") - if err != nil { - panic(err) - } - defer logger.Flush() - ... use logger ... - } -The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some -messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you -explicitly defer flushing all messages before closing. - -Usage - -Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs. -Example: - import log "github.com/cihub/seelog" - - func main() { - logger, err := log.LoggerFromConfigAsFile("seelog.xml") - if err != nil { - panic(err) - } - defer logger.Flush() - logger.Trace("test") - logger.Debugf("var = %s", "abc") - } - -Having loggers as variables is convenient if you are writing your own package with internal logging or if you have -several loggers with different options. -But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level -var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs: - import log "github.com/cihub/seelog" - - func main() { - logger, err := log.LoggerFromConfigAsFile("seelog.xml") - if err != nil { - panic(err) - } - log.ReplaceLogger(logger) - defer log.Flush() - log.Trace("test") - log.Debugf("var = %s", "abc") - } -Last lines - log.Trace("test") - log.Debugf("var = %s", "abc") -do the same as - log.Current.Trace("test") - log.Current.Debugf("var = %s", "abc") -In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config. -This way you are able to use package level funcs instead of passing the logger variable. - -Configuration - -Main seelog point is to configure logger via config files and not the code. -The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try -to create a logger using it. - -All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki. -There are many sections covering different aspects of seelog, but the most important for understanding configs are: - https://github.com/cihub/seelog/wiki/Constraints-and-exceptions - https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers - https://github.com/cihub/seelog/wiki/Formatting - https://github.com/cihub/seelog/wiki/Logger-types -After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date -list of dispatchers, receivers, formats, and logger types. - -Here is an example config with all these features: - - - - - - - - - - - - - - - - - - - - - -This config represents a logger with adaptive timeout between log messages (check logger types reference) which -logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only -use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test' -this logger prohibits all levels below 'error'. - -Configuration using code - -Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what -you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New* -functions in this package are used to provide such capabilities. - -Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with -a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for -the 'main.go' file. So, this is basically a demonstration of configuration of most of the features: - - package main - - import log "github.com/cihub/seelog" - - func main() { - defer log.Flush() - log.Info("Hello from Seelog!") - - consoleWriter, _ := log.NewConsoleWriter() - formatter, _ := log.NewFormatter("%Level %Msg %File%n") - root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter}) - constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl) - specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl}) - ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints) - exceptions := []*log.LogLevelException{ex} - - logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root)) - log.ReplaceLogger(logger) - - log.Trace("This should not be seen") - log.Debug("This should not be seen") - log.Info("Test") - log.Error("Test2") - } - -Examples - -To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples -It contains many example configs and usecases. -*/ -package seelog diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/format.go b/pkg/c8y/vendor/github.com/cihub/seelog/format.go deleted file mode 100644 index ec47b457..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/format.go +++ /dev/null @@ -1,466 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// FormatterSymbol is a special symbol used in config files to mark special format aliases. -const ( - FormatterSymbol = '%' -) - -const ( - formatterParameterStart = '(' - formatterParameterEnd = ')' -) - -// Time and date formats used for %Date and %Time aliases. -const ( - DateDefaultFormat = "2006-01-02" - TimeFormat = "15:04:05" -) - -var DefaultMsgFormat = "%Ns [%Level] %Msg%n" - -var ( - DefaultFormatter *formatter - msgonlyformatter *formatter -) - -func init() { - var err error - if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil { - reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err)) - } - if msgonlyformatter, err = NewFormatter("%Msg"); err != nil { - reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err)) - } -} - -// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute -// of the 'format' config item. These special symbols are replaced with context values or special -// strings when message is written to byte receiver. -// -// Check https://github.com/cihub/seelog/wiki/Formatting for details. -// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference -// -// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object -// that can be evaluated as string. -type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{} - -// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized -// formatters (such as %Date or %EscM) and custom user formatters. -type FormatterFuncCreator func(param string) FormatterFunc - -var formatterFuncs = map[string]FormatterFunc{ - "Level": formatterLevel, - "Lev": formatterLev, - "LEVEL": formatterLEVEL, - "LEV": formatterLEV, - "l": formatterl, - "Msg": formatterMsg, - "FullPath": formatterFullPath, - "File": formatterFile, - "RelFile": formatterRelFile, - "Func": FormatterFunction, - "FuncShort": FormatterFunctionShort, - "Line": formatterLine, - "Time": formatterTime, - "UTCTime": formatterUTCTime, - "Ns": formatterNs, - "UTCNs": formatterUTCNs, - "r": formatterr, - "n": formattern, - "t": formattert, -} - -var formatterFuncsParameterized = map[string]FormatterFuncCreator{ - "Date": createDateTimeFormatterFunc, - "UTCDate": createUTCDateTimeFormatterFunc, - "EscM": createANSIEscapeFunc, -} - -func errorAliasReserved(name string) error { - return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name) -} - -// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil, -// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and -// it will be treated like the standard parameterized formatter identifiers. -// -// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation -// is to call it once in 'init' func of your application or any initializer func. -// -// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters. -// -// Name must only consist of letters (unicode.IsLetter). -// -// Name must not be one of the already registered standard formatter names -// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered -// custom format names. To avoid any potential name conflicts (in future releases), it is recommended -// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword. -func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error { - if _, ok := formatterFuncs[name]; ok { - return errorAliasReserved(name) - } - if _, ok := formatterFuncsParameterized[name]; ok { - return errorAliasReserved(name) - } - formatterFuncsParameterized[name] = creator - return nil -} - -// formatter is used to write messages in a specific format, inserting such additional data -// as log level, date/time, etc. -type formatter struct { - fmtStringOriginal string - fmtString string - formatterFuncs []FormatterFunc -} - -// NewFormatter creates a new formatter using a format string -func NewFormatter(formatString string) (*formatter, error) { - fmtr := new(formatter) - fmtr.fmtStringOriginal = formatString - if err := buildFormatterFuncs(fmtr); err != nil { - return nil, err - } - return fmtr, nil -} - -func buildFormatterFuncs(formatter *formatter) error { - var ( - fsbuf = new(bytes.Buffer) - fsolm1 = len(formatter.fmtStringOriginal) - 1 - ) - for i := 0; i <= fsolm1; i++ { - if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol { - fsbuf.WriteByte(char) - continue - } - // Check if the index is at the end of the string. - if i == fsolm1 { - return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol) - } - // Check if the formatter symbol is doubled and skip it as nonmatching. - if formatter.fmtStringOriginal[i+1] == FormatterSymbol { - fsbuf.WriteRune(FormatterSymbol) - i++ - continue - } - function, ni, err := formatter.extractFormatterFunc(i + 1) - if err != nil { - return err - } - // Append formatting string "%v". - fsbuf.Write([]byte{37, 118}) - i = ni - formatter.formatterFuncs = append(formatter.formatterFuncs, function) - } - formatter.fmtString = fsbuf.String() - return nil -} - -func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) { - letterSequence := formatter.extractLetterSequence(index) - if len(letterSequence) == 0 { - return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index) - } - - function, formatterLength, ok := formatter.findFormatterFunc(letterSequence) - if ok { - return function, index + formatterLength - 1, nil - } - - function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index) - if err != nil { - return nil, 0, err - } - if ok { - return function, index + formatterLength - 1, nil - } - - return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence) -} - -func (formatter *formatter) extractLetterSequence(index int) string { - letters := "" - - bytesToParse := []byte(formatter.fmtStringOriginal[index:]) - runeCount := utf8.RuneCount(bytesToParse) - for i := 0; i < runeCount; i++ { - rune, runeSize := utf8.DecodeRune(bytesToParse) - bytesToParse = bytesToParse[runeSize:] - - if unicode.IsLetter(rune) { - letters += string(rune) - } else { - break - } - } - return letters -} - -func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) { - currentVerb := letters - for i := 0; i < len(letters); i++ { - function, ok := formatterFuncs[currentVerb] - if ok { - return function, len(currentVerb), ok - } - currentVerb = currentVerb[:len(currentVerb)-1] - } - - return nil, 0, false -} - -func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) { - currentVerb := letters - for i := 0; i < len(letters); i++ { - functionCreator, ok := formatterFuncsParameterized[currentVerb] - if ok { - parameter := "" - parameterLen := 0 - isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless - if isVerbEqualsLetters { - userParameter := "" - var err error - userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb)) - if ok { - parameter = userParameter - } else if err != nil { - return nil, 0, false, err - } - } - - return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil - } - - currentVerb = currentVerb[:len(currentVerb)-1] - } - - return nil, 0, false, nil -} - -func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) { - if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart { - return "", 0, false, nil - } - - endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd)) - if endIndex == -1 { - return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s", - startIndex, formatter.fmtStringOriginal[startIndex:]) - } - endIndex += startIndex - - length := endIndex - startIndex + 1 - - return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil -} - -// Format processes a message with special formatters, log level, and context. Returns formatted string -// with all formatter identifiers changed to appropriate values. -func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string { - if len(formatter.formatterFuncs) == 0 { - return formatter.fmtString - } - - params := make([]interface{}, len(formatter.formatterFuncs)) - for i, function := range formatter.formatterFuncs { - params[i] = function(message, level, context) - } - - return fmt.Sprintf(formatter.fmtString, params...) -} - -func (formatter *formatter) String() string { - return formatter.fmtStringOriginal -} - -//===================================================== - -const ( - wrongLogLevel = "WRONG_LOGLEVEL" - wrongEscapeCode = "WRONG_ESCAPE" -) - -var levelToString = map[LogLevel]string{ - TraceLvl: "Trace", - DebugLvl: "Debug", - InfoLvl: "Info", - WarnLvl: "Warn", - ErrorLvl: "Error", - CriticalLvl: "Critical", - Off: "Off", -} - -var levelToShortString = map[LogLevel]string{ - TraceLvl: "Trc", - DebugLvl: "Dbg", - InfoLvl: "Inf", - WarnLvl: "Wrn", - ErrorLvl: "Err", - CriticalLvl: "Crt", - Off: "Off", -} - -var levelToShortestString = map[LogLevel]string{ - TraceLvl: "t", - DebugLvl: "d", - InfoLvl: "i", - WarnLvl: "w", - ErrorLvl: "e", - CriticalLvl: "c", - Off: "o", -} - -func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} { - levelStr, ok := levelToString[level] - if !ok { - return wrongLogLevel - } - return levelStr -} - -func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} { - levelStr, ok := levelToShortString[level] - if !ok { - return wrongLogLevel - } - return levelStr -} - -func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} { - return strings.ToTitle(formatterLevel(message, level, context).(string)) -} - -func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} { - return strings.ToTitle(formatterLev(message, level, context).(string)) -} - -func formatterl(message string, level LogLevel, context LogContextInterface) interface{} { - levelStr, ok := levelToShortestString[level] - if !ok { - return wrongLogLevel - } - return levelStr -} - -func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} { - return message -} - -func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} { - return context.FullPath() -} - -func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} { - return context.FileName() -} - -func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} { - return context.ShortPath() -} - -func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} { - return context.Func() -} - -func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} { - f := context.Func() - spl := strings.Split(f, ".") - return spl[len(spl)-1] -} - -func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} { - return context.Line() -} - -func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().Format(TimeFormat) -} - -func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UTC().Format(TimeFormat) -} - -func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UnixNano() -} - -func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UTC().UnixNano() -} - -func formatterr(message string, level LogLevel, context LogContextInterface) interface{} { - return "\r" -} - -func formattern(message string, level LogLevel, context LogContextInterface) interface{} { - return "\n" -} - -func formattert(message string, level LogLevel, context LogContextInterface) interface{} { - return "\t" -} - -func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { - format := dateTimeFormat - if format == "" { - format = DateDefaultFormat - } - return func(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().Format(format) - } -} - -func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { - format := dateTimeFormat - if format == "" { - format = DateDefaultFormat - } - return func(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UTC().Format(format) - } -} - -func createANSIEscapeFunc(escapeCodeString string) FormatterFunc { - return func(message string, level LogLevel, context LogContextInterface) interface{} { - if len(escapeCodeString) == 0 { - return wrongEscapeCode - } - - return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString) - } -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/internals_baseerror.go b/pkg/c8y/vendor/github.com/cihub/seelog/internals_baseerror.go deleted file mode 100644 index c0b271d7..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/internals_baseerror.go +++ /dev/null @@ -1,10 +0,0 @@ -package seelog - -// Base struct for custom errors. -type baseError struct { - message string -} - -func (be baseError) Error() string { - return be.message -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/internals_fsutils.go b/pkg/c8y/vendor/github.com/cihub/seelog/internals_fsutils.go deleted file mode 100644 index c0a0e0e4..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/internals_fsutils.go +++ /dev/null @@ -1,320 +0,0 @@ -package seelog - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sync" -) - -// File and directory permitions. -const ( - defaultFilePermissions = 0666 - defaultDirectoryPermissions = 0767 -) - -const ( - // Max number of directories can be read asynchronously. - maxDirNumberReadAsync = 1000 -) - -type cannotOpenFileError struct { - baseError -} - -func newCannotOpenFileError(fname string) *cannotOpenFileError { - return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}} -} - -type notDirectoryError struct { - baseError -} - -func newNotDirectoryError(dname string) *notDirectoryError { - return ¬DirectoryError{baseError{message: dname + " is not directory"}} -} - -// fileFilter is a filtering criteria function for '*os.File'. -// Must return 'false' to set aside the given file. -type fileFilter func(os.FileInfo, *os.File) bool - -// filePathFilter is a filtering creteria function for file path. -// Must return 'false' to set aside the given file. -type filePathFilter func(filePath string) bool - -// GetSubdirNames returns a list of directories found in -// the given one with dirPath. -func getSubdirNames(dirPath string) ([]string, error) { - fi, err := os.Stat(dirPath) - if err != nil { - return nil, err - } - if !fi.IsDir() { - return nil, newNotDirectoryError(dirPath) - } - dd, err := os.Open(dirPath) - // Cannot open file. - if err != nil { - if dd != nil { - dd.Close() - } - return nil, err - } - defer dd.Close() - // TODO: Improve performance by buffering reading. - allEntities, err := dd.Readdir(-1) - if err != nil { - return nil, err - } - subDirs := []string{} - for _, entity := range allEntities { - if entity.IsDir() { - subDirs = append(subDirs, entity.Name()) - } - } - return subDirs, nil -} - -// getSubdirAbsPaths recursively visit all the subdirectories -// starting from the given directory and returns absolute paths for them. -func getAllSubdirAbsPaths(dirPath string) (res []string, err error) { - dps, err := getSubdirAbsPaths(dirPath) - if err != nil { - res = []string{} - return - } - res = append(res, dps...) - for _, dp := range dps { - sdps, err := getAllSubdirAbsPaths(dp) - if err != nil { - return []string{}, err - } - res = append(res, sdps...) - } - return -} - -// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory. -// Input: (I1) dirPath - absolute path of a directory in question. -// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation. -// Remark: If error (O2) is non-nil then (O1) is nil and vice versa. -func getSubdirAbsPaths(dirPath string) ([]string, error) { - sdns, err := getSubdirNames(dirPath) - if err != nil { - return nil, err - } - rsdns := []string{} - for _, sdn := range sdns { - rsdns = append(rsdns, filepath.Join(dirPath, sdn)) - } - return rsdns, nil -} - -// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory. -// Remark: Ignores files for which fileFilter returns false -func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) { - dfi, err := os.Open(dirPath) - if err != nil { - return nil, newCannotOpenFileError("Cannot open directory " + dirPath) - } - defer dfi.Close() - // Size of read buffer (i.e. chunk of items read at a time). - rbs := 64 - resFiles := []*os.File{} -L: - for { - // Read directory entities by reasonable chuncks - // to prevent overflows on big number of files. - fis, e := dfi.Readdir(rbs) - switch e { - // It's OK. - case nil: - // Do nothing, just continue cycle. - case io.EOF: - break L - // Something went wrong. - default: - return nil, e - } - // THINK: Maybe, use async running. - for _, fi := range fis { - // NB: On Linux this could be a problem as - // there are lots of file types available. - if !fi.IsDir() { - f, e := os.Open(filepath.Join(dirPath, fi.Name())) - if e != nil { - if f != nil { - f.Close() - } - // THINK: Add nil as indicator that a problem occurred. - resFiles = append(resFiles, nil) - continue - } - // Check filter condition. - if fFilter != nil && !fFilter(fi, f) { - continue - } - resFiles = append(resFiles, f) - } - } - } - return resFiles, nil -} - -func isRegular(m os.FileMode) bool { - return m&os.ModeType == 0 -} - -// getDirFilePaths return full paths of the files located in the directory. -// Remark: Ignores files for which fileFilter returns false. -func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) { - dfi, err := os.Open(dirPath) - if err != nil { - return nil, newCannotOpenFileError("Cannot open directory " + dirPath) - } - defer dfi.Close() - - var absDirPath string - if !filepath.IsAbs(dirPath) { - absDirPath, err = filepath.Abs(dirPath) - if err != nil { - return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error()) - } - } else { - absDirPath = dirPath - } - - // TODO: check if dirPath is really directory. - // Size of read buffer (i.e. chunk of items read at a time). - rbs := 2 << 5 - filePaths := []string{} - - var fp string -L: - for { - // Read directory entities by reasonable chuncks - // to prevent overflows on big number of files. - fis, e := dfi.Readdir(rbs) - switch e { - // It's OK. - case nil: - // Do nothing, just continue cycle. - case io.EOF: - break L - // Indicate that something went wrong. - default: - return nil, e - } - // THINK: Maybe, use async running. - for _, fi := range fis { - // NB: Should work on every Windows and non-Windows OS. - if isRegular(fi.Mode()) { - if pathIsName { - fp = fi.Name() - } else { - // Build full path of a file. - fp = filepath.Join(absDirPath, fi.Name()) - } - // Check filter condition. - if fpFilter != nil && !fpFilter(fp) { - continue - } - filePaths = append(filePaths, fp) - } - } - } - return filePaths, nil -} - -// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs -// in map 'filesInDirMap': Key - directory name, value - *os.File slice. -func getOpenFilesByDirectoryAsync( - dirPaths []string, - fFilter fileFilter, - filesInDirMap map[string][]*os.File, -) error { - n := len(dirPaths) - if n > maxDirNumberReadAsync { - return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync) - } - type filesInDirResult struct { - DirName string - Files []*os.File - Error error - } - dirFilesChan := make(chan *filesInDirResult, n) - var wg sync.WaitGroup - // Register n goroutines which are going to do work. - wg.Add(n) - for i := 0; i < n; i++ { - // Launch asynchronously the piece of work. - go func(dirPath string) { - fs, e := getOpenFilesInDir(dirPath, fFilter) - dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e} - // Mark the current goroutine as finished (work is done). - wg.Done() - }(dirPaths[i]) - } - // Wait for all goroutines to finish their work. - wg.Wait() - // Close the error channel to let for-range clause - // get all the buffered values without blocking and quit in the end. - close(dirFilesChan) - for fidr := range dirFilesChan { - if fidr.Error == nil { - // THINK: What will happen if the key is already present? - filesInDirMap[fidr.DirName] = fidr.Files - } else { - return fidr.Error - } - } - return nil -} - -// fileExists return flag whether a given file exists -// and operation error if an unclassified failure occurs. -func fileExists(path string) (bool, error) { - _, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - return true, nil -} - -// createDirectory makes directory with a given name -// making all parent directories if necessary. -func createDirectory(dirPath string) error { - var dPath string - var err error - if !filepath.IsAbs(dirPath) { - dPath, err = filepath.Abs(dirPath) - if err != nil { - return err - } - } else { - dPath = dirPath - } - exists, err := fileExists(dPath) - if err != nil { - return err - } - if exists { - return nil - } - return os.MkdirAll(dPath, os.ModeDir) -} - -// tryRemoveFile gives a try removing the file -// only ignoring an error when the file does not exist. -func tryRemoveFile(filePath string) (err error) { - err = os.Remove(filePath) - if os.IsNotExist(err) { - err = nil - return - } - return -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/internals_xmlnode.go b/pkg/c8y/vendor/github.com/cihub/seelog/internals_xmlnode.go deleted file mode 100644 index 98588493..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/internals_xmlnode.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "strings" -) - -type xmlNode struct { - name string - attributes map[string]string - children []*xmlNode - value string -} - -func newNode() *xmlNode { - node := new(xmlNode) - node.children = make([]*xmlNode, 0) - node.attributes = make(map[string]string) - return node -} - -func (node *xmlNode) String() string { - str := fmt.Sprintf("<%s", node.name) - - for attrName, attrVal := range node.attributes { - str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal) - } - - str += ">" - str += node.value - - if len(node.children) != 0 { - for _, child := range node.children { - str += fmt.Sprintf("%s", child) - } - } - - str += fmt.Sprintf("", node.name) - - return str -} - -func (node *xmlNode) unmarshal(startEl xml.StartElement) error { - node.name = startEl.Name.Local - - for _, v := range startEl.Attr { - _, alreadyExists := node.attributes[v.Name.Local] - if alreadyExists { - return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'") - } - node.attributes[v.Name.Local] = v.Value - } - - return nil -} - -func (node *xmlNode) add(child *xmlNode) { - if node.children == nil { - node.children = make([]*xmlNode, 0) - } - - node.children = append(node.children, child) -} - -func (node *xmlNode) hasChildren() bool { - return node.children != nil && len(node.children) > 0 -} - -//============================================= - -func unmarshalConfig(reader io.Reader) (*xmlNode, error) { - xmlParser := xml.NewDecoder(reader) - - config, err := unmarshalNode(xmlParser, nil) - if err != nil { - return nil, err - } - if config == nil { - return nil, errors.New("xml has no content") - } - - nextConfigEntry, err := unmarshalNode(xmlParser, nil) - if nextConfigEntry != nil { - return nil, errors.New("xml contains more than one root element") - } - - return config, nil -} - -func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) { - firstLoop := true - for { - var tok xml.Token - if firstLoop && curToken != nil { - tok = curToken - firstLoop = false - } else { - tok, err = getNextToken(xmlParser) - if err != nil || tok == nil { - return - } - } - - switch tt := tok.(type) { - case xml.SyntaxError: - err = errors.New(tt.Error()) - return - case xml.CharData: - value := strings.TrimSpace(string([]byte(tt))) - if node != nil { - node.value += value - } - case xml.StartElement: - if node == nil { - node = newNode() - err := node.unmarshal(tt) - if err != nil { - return nil, err - } - } else { - childNode, childErr := unmarshalNode(xmlParser, tok) - if childErr != nil { - return nil, childErr - } - - if childNode != nil { - node.add(childNode) - } else { - return - } - } - case xml.EndElement: - return - } - } -} - -func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) { - if tok, err = xmlParser.Token(); err != nil { - if err == io.EOF { - err = nil - return - } - return - } - - return -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/log.go b/pkg/c8y/vendor/github.com/cihub/seelog/log.go deleted file mode 100644 index f775e1fd..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/log.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "sync" - "time" -) - -const ( - staticFuncCallDepth = 3 // See 'commonLogger.log' method comments - loggerFuncCallDepth = 3 -) - -// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc. -var Current LoggerInterface - -// Default logger that is created from an empty config: "". It is not closed by a ReplaceLogger call. -var Default LoggerInterface - -// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call. -var Disabled LoggerInterface - -var pkgOperationsMutex *sync.Mutex - -func init() { - pkgOperationsMutex = new(sync.Mutex) - var err error - - if Default == nil { - Default, err = LoggerFromConfigAsBytes([]byte("")) - } - - if Disabled == nil { - Disabled, err = LoggerFromConfigAsBytes([]byte("")) - } - - if err != nil { - panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error())) - } - - Current = Default -} - -func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) { - if config.LogType == syncloggerTypeFromString { - return NewSyncLogger(&config.logConfig), nil - } else if config.LogType == asyncLooploggerTypeFromString { - return NewAsyncLoopLogger(&config.logConfig), nil - } else if config.LogType == asyncTimerloggerTypeFromString { - logData := config.LoggerData - if logData == nil { - return nil, errors.New("async timer data not set") - } - - asyncInt, ok := logData.(asyncTimerLoggerData) - if !ok { - return nil, errors.New("invalid async timer data") - } - - logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval)) - if !ok { - return nil, err - } - - return logger, nil - } else if config.LogType == adaptiveLoggerTypeFromString { - logData := config.LoggerData - if logData == nil { - return nil, errors.New("adaptive logger parameters not set") - } - - adaptData, ok := logData.(adaptiveLoggerData) - if !ok { - return nil, errors.New("invalid adaptive logger parameters") - } - - logger, err := NewAsyncAdaptiveLogger( - &config.logConfig, - time.Duration(adaptData.MinInterval), - time.Duration(adaptData.MaxInterval), - adaptData.CriticalMsgCount, - ) - if err != nil { - return nil, err - } - - return logger, nil - } - return nil, errors.New("invalid config log type/data") -} - -// UseLogger sets the 'Current' package level logger variable to the specified value. -// This variable is used in all Trace/Debug/... package level convenience funcs. -// -// Example: -// -// after calling -// seelog.UseLogger(somelogger) -// the following: -// seelog.Debug("abc") -// will be equal to -// somelogger.Debug("abc") -// -// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if -// you constantly use it to replace loggers and don't close them in other code, you'll -// end up having memory leaks. -// -// To safely replace loggers, use ReplaceLogger. -func UseLogger(logger LoggerInterface) error { - if logger == nil { - return errors.New("logger can not be nil") - } - - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - - oldLogger := Current - Current = logger - - if oldLogger != nil { - oldLogger.Flush() - } - - return nil -} - -// ReplaceLogger acts as UseLogger but the logger that was previously -// used is disposed (except Default and Disabled loggers). -// -// Example: -// import log "github.com/cihub/seelog" -// -// func main() { -// logger, err := log.LoggerFromConfigAsFile("seelog.xml") -// -// if err != nil { -// panic(err) -// } -// -// log.ReplaceLogger(logger) -// defer log.Flush() -// -// log.Trace("test") -// log.Debugf("var = %s", "abc") -// } -func ReplaceLogger(logger LoggerInterface) error { - if logger == nil { - return errors.New("logger can not be nil") - } - - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - - defer func() { - if err := recover(); err != nil { - reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err)) - } - }() - - if Current == Default { - Current.Flush() - } else if Current != nil && !Current.Closed() && Current != Disabled { - Current.Flush() - Current.Close() - } - - Current = logger - - return nil -} - -// Tracef formats message according to format specifier -// and writes to default logger with log level = Trace. -func Tracef(format string, params ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) -} - -// Debugf formats message according to format specifier -// and writes to default logger with log level = Debug. -func Debugf(format string, params ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) -} - -// Infof formats message according to format specifier -// and writes to default logger with log level = Info. -func Infof(format string, params ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) -} - -// Warnf formats message according to format specifier and writes to default logger with log level = Warn -func Warnf(format string, params ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogFormattedMessage(format, params) - Current.warnWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Errorf formats message according to format specifier and writes to default logger with log level = Error -func Errorf(format string, params ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogFormattedMessage(format, params) - Current.errorWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Criticalf formats message according to format specifier and writes to default logger with log level = Critical -func Criticalf(format string, params ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogFormattedMessage(format, params) - Current.criticalWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace -func Trace(v ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v)) -} - -// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug -func Debug(v ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v)) -} - -// Info formats message using the default formats for its operands and writes to default logger with log level = Info -func Info(v ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v)) -} - -// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn -func Warn(v ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogMessage(v) - Current.warnWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Error formats message using the default formats for its operands and writes to default logger with log level = Error -func Error(v ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogMessage(v) - Current.errorWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical -func Critical(v ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogMessage(v) - Current.criticalWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Flush immediately processes all currently queued messages and all currently buffered messages. -// It is a blocking call which returns only after the queue is empty and all the buffers are empty. -// -// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '' receivers) -// , because there is no queue. -// -// Call this method when your app is going to shut down not to lose any log messages. -func Flush() { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.Flush() -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/logger.go b/pkg/c8y/vendor/github.com/cihub/seelog/logger.go deleted file mode 100644 index fc96aed4..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/logger.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "os" - "sync" -) - -func reportInternalError(err error) { - fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err) -} - -// LoggerInterface represents structs capable of logging Seelog messages -type LoggerInterface interface { - - // Tracef formats message according to format specifier - // and writes to log with level = Trace. - Tracef(format string, params ...interface{}) - - // Debugf formats message according to format specifier - // and writes to log with level = Debug. - Debugf(format string, params ...interface{}) - - // Infof formats message according to format specifier - // and writes to log with level = Info. - Infof(format string, params ...interface{}) - - // Warnf formats message according to format specifier - // and writes to log with level = Warn. - Warnf(format string, params ...interface{}) error - - // Errorf formats message according to format specifier - // and writes to log with level = Error. - Errorf(format string, params ...interface{}) error - - // Criticalf formats message according to format specifier - // and writes to log with level = Critical. - Criticalf(format string, params ...interface{}) error - - // Trace formats message using the default formats for its operands - // and writes to log with level = Trace - Trace(v ...interface{}) - - // Debug formats message using the default formats for its operands - // and writes to log with level = Debug - Debug(v ...interface{}) - - // Info formats message using the default formats for its operands - // and writes to log with level = Info - Info(v ...interface{}) - - // Warn formats message using the default formats for its operands - // and writes to log with level = Warn - Warn(v ...interface{}) error - - // Error formats message using the default formats for its operands - // and writes to log with level = Error - Error(v ...interface{}) error - - // Critical formats message using the default formats for its operands - // and writes to log with level = Critical - Critical(v ...interface{}) error - - traceWithCallDepth(callDepth int, message fmt.Stringer) - debugWithCallDepth(callDepth int, message fmt.Stringer) - infoWithCallDepth(callDepth int, message fmt.Stringer) - warnWithCallDepth(callDepth int, message fmt.Stringer) - errorWithCallDepth(callDepth int, message fmt.Stringer) - criticalWithCallDepth(callDepth int, message fmt.Stringer) - - // Close flushes all the messages in the logger and closes it. It cannot be used after this operation. - Close() - - // Flush flushes all the messages in the logger. - Flush() - - // Closed returns true if the logger was previously closed. - Closed() bool - - // SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller - // when getting function information needed to print seelog format identifiers such as %Func or %File. - // - // This func may be used when you wrap seelog funcs and want to print caller info of you own - // wrappers instead of seelog func callers. In this case you should set depth = 1. If you then - // wrap your wrapper, you should set depth = 2, etc. - // - // NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect - // function/file names in log files. Do not use it if you are not going to wrap seelog funcs. - // You may reset the value to default using a SetAdditionalStackDepth(0) call. - SetAdditionalStackDepth(depth int) error - - // Sets logger context that can be used in formatter funcs and custom receivers - SetContext(context interface{}) -} - -// innerLoggerInterface is an internal logging interface -type innerLoggerInterface interface { - innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer) - Flush() -} - -// [file path][func name][level] -> [allowed] -type allowedContextCache map[string]map[string]map[LogLevel]bool - -// commonLogger contains all common data needed for logging and contains methods used to log messages. -type commonLogger struct { - config *logConfig // Config used for logging - contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets - closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM - closedM sync.RWMutex - m sync.Mutex // Mutex for main operations - unusedLevels []bool - innerLogger innerLoggerInterface - addStackDepth int // Additional stack depth needed for correct seelog caller context detection - customContext interface{} -} - -func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger { - cLogger := new(commonLogger) - - cLogger.config = config - cLogger.contextCache = make(allowedContextCache) - cLogger.unusedLevels = make([]bool, Off) - cLogger.fillUnusedLevels() - cLogger.innerLogger = internalLogger - - return cLogger -} - -func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error { - if depth < 0 { - return fmt.Errorf("negative depth: %d", depth) - } - cLogger.m.Lock() - cLogger.addStackDepth = depth - cLogger.m.Unlock() - return nil -} - -func (cLogger *commonLogger) Tracef(format string, params ...interface{}) { - cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) -} - -func (cLogger *commonLogger) Debugf(format string, params ...interface{}) { - cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) -} - -func (cLogger *commonLogger) Infof(format string, params ...interface{}) { - cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) -} - -func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error { - message := newLogFormattedMessage(format, params) - cLogger.warnWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error { - message := newLogFormattedMessage(format, params) - cLogger.errorWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error { - message := newLogFormattedMessage(format, params) - cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Trace(v ...interface{}) { - cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) -} - -func (cLogger *commonLogger) Debug(v ...interface{}) { - cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) -} - -func (cLogger *commonLogger) Info(v ...interface{}) { - cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) -} - -func (cLogger *commonLogger) Warn(v ...interface{}) error { - message := newLogMessage(v) - cLogger.warnWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Error(v ...interface{}) error { - message := newLogMessage(v) - cLogger.errorWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Critical(v ...interface{}) error { - message := newLogMessage(v) - cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) SetContext(c interface{}) { - cLogger.customContext = c -} - -func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(TraceLvl, message, callDepth) -} - -func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(DebugLvl, message, callDepth) -} - -func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(InfoLvl, message, callDepth) -} - -func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(WarnLvl, message, callDepth) -} - -func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(ErrorLvl, message, callDepth) -} - -func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(CriticalLvl, message, callDepth) - cLogger.innerLogger.Flush() -} - -func (cLogger *commonLogger) Closed() bool { - cLogger.closedM.RLock() - defer cLogger.closedM.RUnlock() - return cLogger.closed -} - -func (cLogger *commonLogger) fillUnusedLevels() { - for i := 0; i < len(cLogger.unusedLevels); i++ { - cLogger.unusedLevels[i] = true - } - - cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints) - - for _, exception := range cLogger.config.Exceptions { - cLogger.fillUnusedLevelsByContraint(exception) - } -} - -func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) { - for i := 0; i < len(cLogger.unusedLevels); i++ { - if constraint.IsAllowed(LogLevel(i)) { - cLogger.unusedLevels[i] = false - } - } -} - -// stackCallDepth is used to indicate the call depth of 'log' func. -// This depth level is used in the runtime.Caller(...) call. See -// common_context.go -> specifyContext, extractCallerInfo for details. -func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) { - if cLogger.unusedLevels[level] { - return - } - cLogger.m.Lock() - defer cLogger.m.Unlock() - - if cLogger.Closed() { - return - } - context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext) - // Context errors are not reported because there are situations - // in which context errors are normal Seelog usage cases. For - // example in executables with stripped symbols. - // Error contexts are returned instead. See common_context.go. - /*if err != nil { - reportInternalError(err) - return - }*/ - cLogger.innerLogger.innerLog(level, context, message) -} - -func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) { - defer func() { - if err := recover(); err != nil { - reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err)) - } - }() - if cLogger.config.IsAllowed(level, context) { - cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError) - } -} - -func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool { - funcMap, ok := cLogger.contextCache[context.FullPath()] - if !ok { - funcMap = make(map[string]map[LogLevel]bool, 0) - cLogger.contextCache[context.FullPath()] = funcMap - } - - levelMap, ok := funcMap[context.Func()] - if !ok { - levelMap = make(map[LogLevel]bool, 0) - funcMap[context.Func()] = levelMap - } - - isAllowValue, ok := levelMap[level] - if !ok { - isAllowValue = cLogger.config.IsAllowed(level, context) - levelMap[level] = isAllowValue - } - - return isAllowValue -} - -type logMessage struct { - params []interface{} -} - -type logFormattedMessage struct { - format string - params []interface{} -} - -func newLogMessage(params []interface{}) fmt.Stringer { - message := new(logMessage) - - message.params = params - - return message -} - -func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage { - message := new(logFormattedMessage) - - message.params = params - message.format = format - - return message -} - -func (message *logMessage) String() string { - return fmt.Sprint(message.params...) -} - -func (message *logFormattedMessage) String() string { - return fmt.Sprintf(message.format, message.params...) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_bufferedwriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_bufferedwriter.go deleted file mode 100644 index 37d75c82..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_bufferedwriter.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bufio" - "errors" - "fmt" - "io" - "sync" - "time" -) - -// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full -type bufferedWriter struct { - flushPeriod time.Duration // data flushes interval (in microseconds) - bufferMutex *sync.Mutex // mutex for buffer operations syncronization - innerWriter io.Writer // inner writer - buffer *bufio.Writer // buffered wrapper for inner writer - bufferSize int // max size of data chunk in bytes -} - -// NewBufferedWriter creates a new buffered writer struct. -// bufferSize -- size of memory buffer in bytes -// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality -func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) { - - if innerWriter == nil { - return nil, errors.New("argument is nil: innerWriter") - } - if flushPeriod < 0 { - return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod) - } - - if bufferSize <= 0 { - return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize) - } - - buffer := bufio.NewWriterSize(innerWriter, bufferSize) - - /*if err != nil { - return nil, err - }*/ - - newWriter := new(bufferedWriter) - - newWriter.innerWriter = innerWriter - newWriter.buffer = buffer - newWriter.bufferSize = bufferSize - newWriter.flushPeriod = flushPeriod * 1e6 - newWriter.bufferMutex = new(sync.Mutex) - - if flushPeriod != 0 { - go newWriter.flushPeriodically() - } - - return newWriter, nil -} - -func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) { - bufferedLen := bufWriter.buffer.Buffered() - - n, err = bufWriter.flushInner() - if err != nil { - return - } - - written, writeErr := bufWriter.innerWriter.Write(bytes) - return bufferedLen + written, writeErr -} - -// Sends data to buffer manager. Waits until all buffers are full. -func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) { - - bufWriter.bufferMutex.Lock() - defer bufWriter.bufferMutex.Unlock() - - bytesLen := len(bytes) - - if bytesLen > bufWriter.bufferSize { - return bufWriter.writeBigChunk(bytes) - } - - if bytesLen > bufWriter.buffer.Available() { - n, err = bufWriter.flushInner() - if err != nil { - return - } - } - - bufWriter.buffer.Write(bytes) - - return len(bytes), nil -} - -func (bufWriter *bufferedWriter) Close() error { - closer, ok := bufWriter.innerWriter.(io.Closer) - if ok { - return closer.Close() - } - - return nil -} - -func (bufWriter *bufferedWriter) Flush() { - - bufWriter.bufferMutex.Lock() - defer bufWriter.bufferMutex.Unlock() - - bufWriter.flushInner() -} - -func (bufWriter *bufferedWriter) flushInner() (n int, err error) { - bufferedLen := bufWriter.buffer.Buffered() - flushErr := bufWriter.buffer.Flush() - - return bufWriter.buffer.Buffered() - bufferedLen, flushErr -} - -func (bufWriter *bufferedWriter) flushBuffer() { - bufWriter.bufferMutex.Lock() - defer bufWriter.bufferMutex.Unlock() - - bufWriter.buffer.Flush() -} - -func (bufWriter *bufferedWriter) flushPeriodically() { - if bufWriter.flushPeriod > 0 { - ticker := time.NewTicker(bufWriter.flushPeriod) - for { - <-ticker.C - bufWriter.flushBuffer() - } - } -} - -func (bufWriter *bufferedWriter) String() string { - return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_connwriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_connwriter.go deleted file mode 100644 index d199894e..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_connwriter.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "crypto/tls" - "fmt" - "io" - "net" -) - -// connWriter is used to write to a stream-oriented network connection. -type connWriter struct { - innerWriter io.WriteCloser - reconnectOnMsg bool - reconnect bool - net string - addr string - useTLS bool - configTLS *tls.Config -} - -// Creates writer to the address addr on the network netName. -// Connection will be opened on each write if reconnectOnMsg = true -func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter { - newWriter := new(connWriter) - - newWriter.net = netName - newWriter.addr = addr - newWriter.reconnectOnMsg = reconnectOnMsg - - return newWriter -} - -// Creates a writer that uses SSL/TLS -func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter { - newWriter := new(connWriter) - - newWriter.net = netName - newWriter.addr = addr - newWriter.reconnectOnMsg = reconnectOnMsg - newWriter.useTLS = true - newWriter.configTLS = config - - return newWriter -} - -func (connWriter *connWriter) Close() error { - if connWriter.innerWriter == nil { - return nil - } - - return connWriter.innerWriter.Close() -} - -func (connWriter *connWriter) Write(bytes []byte) (n int, err error) { - if connWriter.neededConnectOnMsg() { - err = connWriter.connect() - if err != nil { - return 0, err - } - } - - if connWriter.reconnectOnMsg { - defer connWriter.innerWriter.Close() - } - - n, err = connWriter.innerWriter.Write(bytes) - if err != nil { - connWriter.reconnect = true - } - - return -} - -func (connWriter *connWriter) String() string { - return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg) -} - -func (connWriter *connWriter) connect() error { - if connWriter.innerWriter != nil { - connWriter.innerWriter.Close() - connWriter.innerWriter = nil - } - - if connWriter.useTLS { - conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS) - if err != nil { - return err - } - connWriter.innerWriter = conn - - return nil - } - - conn, err := net.Dial(connWriter.net, connWriter.addr) - if err != nil { - return err - } - - tcpConn, ok := conn.(*net.TCPConn) - if ok { - tcpConn.SetKeepAlive(true) - } - - connWriter.innerWriter = conn - - return nil -} - -func (connWriter *connWriter) neededConnectOnMsg() bool { - if connWriter.reconnect { - connWriter.reconnect = false - return true - } - - if connWriter.innerWriter == nil { - return true - } - - return connWriter.reconnectOnMsg -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_consolewriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_consolewriter.go deleted file mode 100644 index 3eb79afa..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_consolewriter.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import "fmt" - -// consoleWriter is used to write to console -type consoleWriter struct { -} - -// Creates a new console writer. Returns error, if the console writer couldn't be created. -func NewConsoleWriter() (writer *consoleWriter, err error) { - newWriter := new(consoleWriter) - - return newWriter, nil -} - -// Create folder and file on WriteLog/Write first call -func (console *consoleWriter) Write(bytes []byte) (int, error) { - return fmt.Print(string(bytes)) -} - -func (console *consoleWriter) String() string { - return "Console writer" -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_filewriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_filewriter.go deleted file mode 100644 index 8d3ae270..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_filewriter.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "io" - "os" - "path/filepath" -) - -// fileWriter is used to write to a file. -type fileWriter struct { - innerWriter io.WriteCloser - fileName string -} - -// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created. -func NewFileWriter(fileName string) (writer *fileWriter, err error) { - newWriter := new(fileWriter) - newWriter.fileName = fileName - - return newWriter, nil -} - -func (fw *fileWriter) Close() error { - if fw.innerWriter != nil { - err := fw.innerWriter.Close() - if err != nil { - return err - } - fw.innerWriter = nil - } - return nil -} - -// Create folder and file on WriteLog/Write first call -func (fw *fileWriter) Write(bytes []byte) (n int, err error) { - if fw.innerWriter == nil { - if err := fw.createFile(); err != nil { - return 0, err - } - } - return fw.innerWriter.Write(bytes) -} - -func (fw *fileWriter) createFile() error { - folder, _ := filepath.Split(fw.fileName) - var err error - - if 0 != len(folder) { - err = os.MkdirAll(folder, defaultDirectoryPermissions) - if err != nil { - return err - } - } - - // If exists - fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) - - if err != nil { - return err - } - - return nil -} - -func (fw *fileWriter) String() string { - return fmt.Sprintf("File writer: %s", fw.fileName) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_formattedwriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_formattedwriter.go deleted file mode 100644 index bf44a410..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_formattedwriter.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "io" -) - -type formattedWriter struct { - writer io.Writer - formatter *formatter -} - -func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) { - if formatter == nil { - return nil, errors.New("formatter can not be nil") - } - - return &formattedWriter{writer, formatter}, nil -} - -func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error { - str := formattedWriter.formatter.Format(message, level, context) - _, err := formattedWriter.writer.Write([]byte(str)) - return err -} - -func (formattedWriter *formattedWriter) String() string { - return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter) -} - -func (formattedWriter *formattedWriter) Writer() io.Writer { - return formattedWriter.writer -} - -func (formattedWriter *formattedWriter) Format() *formatter { - return formattedWriter.formatter -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go deleted file mode 100644 index 728f19d1..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go +++ /dev/null @@ -1,762 +0,0 @@ -// Copyright (c) 2013 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/cihub/seelog/archive" - "github.com/cihub/seelog/archive/gzip" - "github.com/cihub/seelog/archive/tar" - "github.com/cihub/seelog/archive/zip" -) - -// Common constants -const ( - rollingLogHistoryDelimiter = "." -) - -// Types of the rolling writer: roll by date, by time, etc. -type rollingType uint8 - -const ( - rollingTypeSize = iota - rollingTypeTime -) - -// Types of the rolled file naming mode: prefix, postfix, etc. -type rollingNameMode uint8 - -const ( - rollingNameModePostfix = iota - rollingNameModePrefix -) - -var rollingNameModesStringRepresentation = map[rollingNameMode]string{ - rollingNameModePostfix: "postfix", - rollingNameModePrefix: "prefix", -} - -func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) { - for tp, tpStr := range rollingNameModesStringRepresentation { - if tpStr == rollingNameStr { - return tp, true - } - } - - return 0, false -} - -var rollingTypesStringRepresentation = map[rollingType]string{ - rollingTypeSize: "size", - rollingTypeTime: "date", -} - -func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) { - for tp, tpStr := range rollingTypesStringRepresentation { - if tpStr == rollingTypeStr { - return tp, true - } - } - - return 0, false -} - -// Old logs archivation type. -type rollingArchiveType uint8 - -const ( - rollingArchiveNone = iota - rollingArchiveZip - rollingArchiveGzip -) - -var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{ - rollingArchiveNone: "none", - rollingArchiveZip: "zip", - rollingArchiveGzip: "gzip", -} - -type archiver func(f *os.File, exploded bool) archive.WriteCloser - -type unarchiver func(f *os.File) (archive.ReadCloser, error) - -type compressionType struct { - extension string - handleMultipleEntries bool - archiver archiver - unarchiver unarchiver -} - -var compressionTypes = map[rollingArchiveType]compressionType{ - rollingArchiveZip: { - extension: ".zip", - handleMultipleEntries: true, - archiver: func(f *os.File, _ bool) archive.WriteCloser { - return zip.NewWriter(f) - }, - unarchiver: func(f *os.File) (archive.ReadCloser, error) { - fi, err := f.Stat() - if err != nil { - return nil, err - } - r, err := zip.NewReader(f, fi.Size()) - if err != nil { - return nil, err - } - return archive.NopCloser(r), nil - }, - }, - rollingArchiveGzip: { - extension: ".gz", - handleMultipleEntries: false, - archiver: func(f *os.File, exploded bool) archive.WriteCloser { - gw := gzip.NewWriter(f) - if exploded { - return gw - } - return tar.NewWriteMultiCloser(gw, gw) - }, - unarchiver: func(f *os.File) (archive.ReadCloser, error) { - gr, err := gzip.NewReader(f, f.Name()) - if err != nil { - return nil, err - } - - // Determine if the gzip is a tar - tr := tar.NewReader(gr) - _, err = tr.Next() - isTar := err == nil - - // Reset to beginning of file - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - gr.Reset(f) - - if isTar { - return archive.NopCloser(tar.NewReader(gr)), nil - } - return gr, nil - }, - }, -} - -func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string { - if !compressionType.handleMultipleEntries && !exploded { - return name + ".tar" + compressionType.extension - } else { - return name + compressionType.extension - } - -} - -func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) { - for tp, tpStr := range rollingArchiveTypesStringRepresentation { - if tpStr == rollingArchiveTypeStr { - return tp, true - } - } - - return 0, false -} - -// Default names for different archive types -var rollingArchiveDefaultExplodedName = "old" - -func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) { - compressionType, ok := compressionTypes[archiveType] - if !ok { - return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType) - } - return compressionType.rollingArchiveTypeName("log", exploded), nil -} - -// rollerVirtual is an interface that represents all virtual funcs that are -// called in different rolling writer subtypes. -type rollerVirtual interface { - needsToRoll() bool // Returns true if needs to switch to another file. - isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok. - sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger. - - // getNewHistoryRollFileName is called whenever we are about to roll the - // current log file. It returns the name the current log file should be - // rolled to. - getNewHistoryRollFileName(otherHistoryFiles []string) string - - getCurrentFileName() string -} - -// rollingFileWriter writes received messages to a file, until time interval passes -// or file exceeds a specified limit. After that the current log file is renamed -// and writer starts to log into a new file. You can set a limit for such renamed -// files count, if you want, and then the rolling writer would delete older ones when -// the files count exceed the specified limit. -type rollingFileWriter struct { - fileName string // log file name - currentDirPath string - currentFile *os.File - currentName string - currentFileSize int64 - rollingType rollingType // Rolling mode (Files roll by size/date/...) - archiveType rollingArchiveType - archivePath string - archiveExploded bool - fullName bool - maxRolls int - nameMode rollingNameMode - self rollerVirtual // Used for virtual calls - rollLock sync.Mutex -} - -func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode, - archiveExploded bool, fullName bool) (*rollingFileWriter, error) { - rw := new(rollingFileWriter) - rw.currentDirPath, rw.fileName = filepath.Split(fpath) - if len(rw.currentDirPath) == 0 { - rw.currentDirPath = "." - } - - rw.rollingType = rtype - rw.archiveType = atype - rw.archivePath = apath - rw.nameMode = namemode - rw.maxRolls = maxr - rw.archiveExploded = archiveExploded - rw.fullName = fullName - return rw, nil -} - -func (rw *rollingFileWriter) hasRollName(file string) bool { - switch rw.nameMode { - case rollingNameModePostfix: - rname := rw.fileName + rollingLogHistoryDelimiter - return strings.HasPrefix(file, rname) - case rollingNameModePrefix: - rname := rollingLogHistoryDelimiter + rw.fileName - return strings.HasSuffix(file, rname) - } - return false -} - -func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string { - switch rw.nameMode { - case rollingNameModePostfix: - return originalName + rollingLogHistoryDelimiter + rollname - case rollingNameModePrefix: - return rollname + rollingLogHistoryDelimiter + originalName - } - return "" -} - -func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) { - files, err := getDirFilePaths(rw.currentDirPath, nil, true) - if err != nil { - return nil, err - } - var validRollNames []string - for _, file := range files { - if rw.hasRollName(file) { - rname := rw.getFileRollName(file) - if rw.self.isFileRollNameValid(rname) { - validRollNames = append(validRollNames, rname) - } - } - } - sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames) - if err != nil { - return nil, err - } - validSortedFiles := make([]string, len(sortedTails)) - for i, v := range sortedTails { - validSortedFiles[i] = rw.createFullFileName(rw.fileName, v) - } - return validSortedFiles, nil -} - -func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error { - var err error - - if len(rw.currentDirPath) != 0 { - err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions) - - if err != nil { - return err - } - } - rw.currentName = rw.self.getCurrentFileName() - filePath := filepath.Join(rw.currentDirPath, rw.currentName) - - // This will either open the existing file (without truncating it) or - // create if necessary. Append mode avoids any race conditions. - rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) - if err != nil { - return err - } - - stat, err := rw.currentFile.Stat() - if err != nil { - rw.currentFile.Close() - rw.currentFile = nil - return err - } - - rw.currentFileSize = stat.Size() - return nil -} - -func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) (err error) { - closeWithError := func(c io.Closer) { - if cerr := c.Close(); cerr != nil && err == nil { - err = cerr - } - } - - rollPath := filepath.Join(rw.currentDirPath, logFilename) - src, err := os.Open(rollPath) - if err != nil { - return err - } - defer src.Close() // Read-only - - // Buffer to a temporary file on the same partition - // Note: archivePath is a path to a directory when handling exploded logs - dst, err := rw.tempArchiveFile(rw.archivePath) - if err != nil { - return err - } - defer func() { - closeWithError(dst) - if err != nil { - os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file - return - } - - // Finalize archive by swapping the buffered archive into place - err = os.Rename(dst.Name(), filepath.Join(rw.archivePath, - compressionType.rollingArchiveTypeName(logFilename, true))) - }() - - // archive entry - w := compressionType.archiver(dst, true) - defer closeWithError(w) - fi, err := src.Stat() - if err != nil { - return err - } - if err := w.NextFile(logFilename, fi); err != nil { - return err - } - _, err = io.Copy(w, src) - return err -} - -func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) (err error) { - closeWithError := func(c io.Closer) { - if cerr := c.Close(); cerr != nil && err == nil { - err = cerr - } - } - - // Buffer to a temporary file on the same partition - // Note: archivePath is a path to a file when handling unexploded logs - dst, err := rw.tempArchiveFile(filepath.Dir(rw.archivePath)) - if err != nil { - return err - } - defer func() { - closeWithError(dst) - if err != nil { - os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file - return - } - - // Finalize archive by moving the buffered archive into place - err = os.Rename(dst.Name(), rw.archivePath) - }() - - w := compressionType.archiver(dst, false) - defer closeWithError(w) - - src, err := os.Open(rw.archivePath) - switch { - // Archive exists - case err == nil: - defer src.Close() // Read-only - - r, err := compressionType.unarchiver(src) - if err != nil { - return err - } - defer r.Close() // Read-only - - if err := archive.Copy(w, r); err != nil { - return err - } - - // Failed to stat - case !os.IsNotExist(err): - return err - } - - // Add new files to the archive - for i := 0; i < rollsToDelete; i++ { - rollPath := filepath.Join(rw.currentDirPath, history[i]) - src, err := os.Open(rollPath) - if err != nil { - return err - } - defer src.Close() // Read-only - fi, err := src.Stat() - if err != nil { - return err - } - if err := w.NextFile(src.Name(), fi); err != nil { - return err - } - if _, err := io.Copy(w, src); err != nil { - return err - } - } - return nil -} - -func (rw *rollingFileWriter) deleteOldRolls(history []string) error { - if rw.maxRolls <= 0 { - return nil - } - - rollsToDelete := len(history) - rw.maxRolls - if rollsToDelete <= 0 { - return nil - } - - if rw.archiveType != rollingArchiveNone { - if rw.archiveExploded { - os.MkdirAll(rw.archivePath, defaultDirectoryPermissions) - - // Archive logs - for i := 0; i < rollsToDelete; i++ { - rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType]) - } - } else { - os.MkdirAll(filepath.Dir(rw.archivePath), defaultDirectoryPermissions) - - rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history) - } - } - - var err error - // In all cases (archive files or not) the files should be deleted. - for i := 0; i < rollsToDelete; i++ { - // Try best to delete files without breaking the loop. - if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil { - reportInternalError(err) - } - } - - return nil -} - -func (rw *rollingFileWriter) getFileRollName(fileName string) string { - switch rw.nameMode { - case rollingNameModePostfix: - return fileName[len(rw.fileName+rollingLogHistoryDelimiter):] - case rollingNameModePrefix: - return fileName[:len(fileName)-len(rw.fileName+rollingLogHistoryDelimiter)] - } - return "" -} - -func (rw *rollingFileWriter) roll() error { - // First, close current file. - err := rw.currentFile.Close() - if err != nil { - return err - } - rw.currentFile = nil - - // Current history of all previous log files. - // For file roller it may be like this: - // * ... - // * file.log.4 - // * file.log.5 - // * file.log.6 - // - // For date roller it may look like this: - // * ... - // * file.log.11.Aug.13 - // * file.log.15.Aug.13 - // * file.log.16.Aug.13 - // Sorted log history does NOT include current file. - history, err := rw.getSortedLogHistory() - if err != nil { - return err - } - // Renames current file to create a new roll history entry - // For file roller it may be like this: - // * ... - // * file.log.4 - // * file.log.5 - // * file.log.6 - // n file.log.7 <---- RENAMED (from file.log) - newHistoryName := rw.createFullFileName(rw.fileName, - rw.self.getNewHistoryRollFileName(history)) - - err = os.Rename(filepath.Join(rw.currentDirPath, rw.currentName), filepath.Join(rw.currentDirPath, newHistoryName)) - if err != nil { - return err - } - - // Finally, add the newly added history file to the history archive - // and, if after that the archive exceeds the allowed max limit, older rolls - // must the removed/archived. - history = append(history, newHistoryName) - if len(history) > rw.maxRolls { - err = rw.deleteOldRolls(history) - if err != nil { - return err - } - } - - return nil -} - -func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) { - rw.rollLock.Lock() - defer rw.rollLock.Unlock() - - if rw.self.needsToRoll() { - if err := rw.roll(); err != nil { - return 0, err - } - } - - if rw.currentFile == nil { - err := rw.createFileAndFolderIfNeeded(true) - if err != nil { - return 0, err - } - } - - n, err = rw.currentFile.Write(bytes) - rw.currentFileSize += int64(n) - return n, err -} - -func (rw *rollingFileWriter) Close() error { - if rw.currentFile != nil { - e := rw.currentFile.Close() - if e != nil { - return e - } - rw.currentFile = nil - } - return nil -} - -func (rw *rollingFileWriter) tempArchiveFile(archiveDir string) (*os.File, error) { - tmp := filepath.Join(archiveDir, ".seelog_tmp") - if err := os.MkdirAll(tmp, defaultDirectoryPermissions); err != nil { - return nil, err - } - return os.CreateTemp(tmp, "archived_logs") -} - -// ============================================================================================= -// Different types of rolling writers -// ============================================================================================= - -// -------------------------------------------------- -// Rolling writer by SIZE -// -------------------------------------------------- - -// rollingFileWriterSize performs roll when file exceeds a specified limit. -type rollingFileWriterSize struct { - *rollingFileWriter - maxFileSize int64 -} - -func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode, archiveExploded bool) (*rollingFileWriterSize, error) { - rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode, archiveExploded, false) - if err != nil { - return nil, err - } - rws := &rollingFileWriterSize{rw, maxSize} - rws.self = rws - return rws, nil -} - -func (rws *rollingFileWriterSize) needsToRoll() bool { - return rws.currentFileSize >= rws.maxFileSize -} - -func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool { - if len(rname) == 0 { - return false - } - _, err := strconv.Atoi(rname) - return err == nil -} - -type rollSizeFileTailsSlice []string - -func (p rollSizeFileTailsSlice) Len() int { - return len(p) -} -func (p rollSizeFileTailsSlice) Less(i, j int) bool { - v1, _ := strconv.Atoi(p[i]) - v2, _ := strconv.Atoi(p[j]) - return v1 < v2 -} -func (p rollSizeFileTailsSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) { - ss := rollSizeFileTailsSlice(fs) - sort.Sort(ss) - return ss, nil -} - -func (rws *rollingFileWriterSize) getNewHistoryRollFileName(otherLogFiles []string) string { - v := 0 - if len(otherLogFiles) != 0 { - latest := otherLogFiles[len(otherLogFiles)-1] - v, _ = strconv.Atoi(rws.getFileRollName(latest)) - } - return fmt.Sprintf("%d", v+1) -} - -func (rws *rollingFileWriterSize) getCurrentFileName() string { - return rws.fileName -} - -func (rws *rollingFileWriterSize) String() string { - return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v", - rws.fileName, - rollingArchiveTypesStringRepresentation[rws.archiveType], - rws.archivePath, - rws.maxFileSize, - rws.maxRolls) -} - -// -------------------------------------------------- -// Rolling writer by TIME -// -------------------------------------------------- - -// rollingFileWriterTime performs roll when a specified time interval has passed. -type rollingFileWriterTime struct { - *rollingFileWriter - timePattern string - currentTimeFileName string -} - -func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int, - timePattern string, namemode rollingNameMode, archiveExploded bool, fullName bool) (*rollingFileWriterTime, error) { - - rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode, archiveExploded, fullName) - if err != nil { - return nil, err - } - rws := &rollingFileWriterTime{rw, timePattern, ""} - rws.self = rws - return rws, nil -} - -func (rwt *rollingFileWriterTime) needsToRoll() bool { - newName := time.Now().Format(rwt.timePattern) - - if rwt.currentTimeFileName == "" { - // first run; capture the current name - rwt.currentTimeFileName = newName - return false - } - - return newName != rwt.currentTimeFileName -} - -func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool { - if len(rname) == 0 { - return false - } - _, err := time.ParseInLocation(rwt.timePattern, rname, time.Local) - return err == nil -} - -type rollTimeFileTailsSlice struct { - data []string - pattern string -} - -func (p rollTimeFileTailsSlice) Len() int { - return len(p.data) -} - -func (p rollTimeFileTailsSlice) Less(i, j int) bool { - t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local) - t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local) - return t1.Before(t2) -} - -func (p rollTimeFileTailsSlice) Swap(i, j int) { - p.data[i], p.data[j] = p.data[j], p.data[i] -} - -func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) { - ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern} - sort.Sort(ss) - return ss.data, nil -} - -func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(_ []string) string { - newFileName := rwt.currentTimeFileName - rwt.currentTimeFileName = time.Now().Format(rwt.timePattern) - return newFileName -} - -func (rwt *rollingFileWriterTime) getCurrentFileName() string { - if rwt.fullName { - return rwt.createFullFileName(rwt.fileName, time.Now().Format(rwt.timePattern)) - } - return rwt.fileName -} - -func (rwt *rollingFileWriterTime) String() string { - return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, pattern: %s, maxRolls: %v", - rwt.fileName, - rollingArchiveTypesStringRepresentation[rwt.archiveType], - rwt.archivePath, - rwt.timePattern, - rwt.maxRolls) -} diff --git a/pkg/c8y/vendor/github.com/cihub/seelog/writers_smtpwriter.go b/pkg/c8y/vendor/github.com/cihub/seelog/writers_smtpwriter.go deleted file mode 100644 index bbc57a34..00000000 --- a/pkg/c8y/vendor/github.com/cihub/seelog/writers_smtpwriter.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "net/smtp" - "os" - "path/filepath" - "strings" -) - -const ( - // Default subject phrase for sending emails. - DefaultSubjectPhrase = "Diagnostic message from server: " - - // Message subject pattern composed according to RFC 5321. - rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n" -) - -// smtpWriter is used to send emails via given SMTP-server. -type smtpWriter struct { - auth smtp.Auth - hostName string - hostPort string - hostNameWithPort string - senderAddress string - senderName string - recipientAddresses []string - caCertDirPaths []string - mailHeaders []string - subject string -} - -// NewSMTPWriter returns a new SMTP-writer. -func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter { - return &smtpWriter{ - auth: smtp.PlainAuth("", un, pwd, hn), - hostName: hn, - hostPort: hp, - hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp), - senderAddress: sa, - senderName: sn, - recipientAddresses: ras, - caCertDirPaths: cacdps, - subject: subj, - mailHeaders: headers, - } -} - -func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte { - headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject) - // Build header lines if configured. - if headers != nil && len(headers) > 0 { - headerLines += strings.Join(headers, "\n") - headerLines += "\n" - } - return append([]byte(headerLines), body...) -} - -// getTLSConfig gets paths of PEM files with certificates, -// host server name and tries to create an appropriate TLS.Config. -func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) { - if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 { - err = errors.New("invalid PEM file paths") - return - } - pemEncodedContent := []byte{} - var ( - e error - bytes []byte - ) - // Create a file-filter-by-extension, set aside non-pem files. - pemFilePathFilter := func(fp string) bool { - if filepath.Ext(fp) == ".pem" { - return true - } - return false - } - for _, pemFileDirPath := range pemFileDirPaths { - pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false) - if err != nil { - return nil, err - } - - // Put together all the PEM files to decode them as a whole byte slice. - for _, pfp := range pemFilePaths { - if bytes, e = os.ReadFile(pfp); e == nil { - pemEncodedContent = append(pemEncodedContent, bytes...) - } else { - return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error()) - } - } - } - config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName} - isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent) - if !isAppended { - // Extract this into a separate error. - err = errors.New("invalid PEM content") - return - } - return -} - -// SendMail accepts TLS configuration, connects to the server at addr, -// switches to TLS if possible, authenticates with mechanism a if possible, -// and then sends an email from address from, to addresses to, with message msg. -func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error { - c, err := smtp.Dial(addr) - if err != nil { - return err - } - // Check if the server supports STARTTLS extension. - if ok, _ := c.Extension("STARTTLS"); ok { - if err = c.StartTLS(config); err != nil { - return err - } - } - // Check if the server supports AUTH extension and use given smtp.Auth. - if a != nil { - if isSupported, _ := c.Extension("AUTH"); isSupported { - if err = c.Auth(a); err != nil { - return err - } - } - } - // Portion of code from the official smtp.SendMail function, - // see http://golang.org/src/pkg/net/smtp/smtp.go. - if err = c.Mail(from); err != nil { - return err - } - for _, addr := range to { - if err = c.Rcpt(addr); err != nil { - return err - } - } - w, err := c.Data() - if err != nil { - return err - } - _, err = w.Write(msg) - if err != nil { - return err - } - err = w.Close() - if err != nil { - return err - } - return c.Quit() -} - -// Write pushes a text message properly composed according to RFC 5321 -// to a post server, which sends it to the recipients. -func (smtpw *smtpWriter) Write(data []byte) (int, error) { - var err error - - if smtpw.caCertDirPaths == nil { - err = smtp.SendMail( - smtpw.hostNameWithPort, - smtpw.auth, - smtpw.senderAddress, - smtpw.recipientAddresses, - prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), - ) - } else { - config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName) - if e != nil { - return 0, e - } - err = sendMailWithTLSConfig( - config, - smtpw.hostNameWithPort, - smtpw.auth, - smtpw.senderAddress, - smtpw.recipientAddresses, - prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), - ) - } - if err != nil { - return 0, err - } - return len(data), nil -} - -// Close closes down SMTP-connection. -func (smtpw *smtpWriter) Close() error { - // Do nothing as Write method opens and closes connection automatically. - return nil -} diff --git a/pkg/c8y/vendor/github.com/google/go-querystring/LICENSE b/pkg/c8y/vendor/github.com/google/go-querystring/LICENSE deleted file mode 100644 index ae121a1e..00000000 --- a/pkg/c8y/vendor/github.com/google/go-querystring/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Google. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/c8y/vendor/github.com/google/go-querystring/query/encode.go b/pkg/c8y/vendor/github.com/google/go-querystring/query/encode.go deleted file mode 100644 index 37080b19..00000000 --- a/pkg/c8y/vendor/github.com/google/go-querystring/query/encode.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package query implements encoding of structs into URL query parameters. -// -// As a simple example: -// -// type Options struct { -// Query string `url:"q"` -// ShowAll bool `url:"all"` -// Page int `url:"page"` -// } -// -// opt := Options{ "foo", true, 2 } -// v, _ := query.Values(opt) -// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" -// -// The exact mapping between Go values and url.Values is described in the -// documentation for the Values() function. -package query - -import ( - "bytes" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var timeType = reflect.TypeOf(time.Time{}) - -var encoderType = reflect.TypeOf(new(Encoder)).Elem() - -// Encoder is an interface implemented by any type that wishes to encode -// itself into URL values in a non-standard way. -type Encoder interface { - EncodeValues(key string, v *url.Values) error -} - -// Values returns the url.Values encoding of v. -// -// Values expects to be passed a struct, and traverses it recursively using the -// following encoding rules. -// -// Each exported struct field is encoded as a URL parameter unless -// -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option -// -// The empty values are false, 0, any nil pointer or interface value, any array -// slice, map, or string of length zero, and any time.Time that returns true -// for IsZero(). -// -// The URL parameter name defaults to the struct field name but can be -// specified in the struct field's tag value. The "url" key in the struct -// field's tag value is the key name, followed by an optional comma and -// options. For example: -// -// // Field is ignored by this package. -// Field int `url:"-"` -// -// // Field appears as URL parameter "myName". -// Field int `url:"myName"` -// -// // Field appears as URL parameter "myName" and the field is omitted if -// // its value is empty -// Field int `url:"myName,omitempty"` -// -// // Field appears as URL parameter "Field" (the default), but the field -// // is skipped if empty. Note the leading comma. -// Field int `url:",omitempty"` -// -// For encoding individual field values, the following type-dependent rules -// apply: -// -// Boolean values default to encoding as the strings "true" or "false". -// Including the "int" option signals that the field should be encoded as the -// strings "1" or "0". -// -// time.Time values default to encoding as RFC3339 timestamps. Including the -// "unix" option signals that the field should be encoded as a Unix time (see -// time.Unix()) -// -// Slice and Array values default to encoding as multiple URL values of the -// same name. Including the "comma" option signals that the field should be -// encoded as a single comma-delimited value. Including the "space" option -// similarly encodes the value as a single space-delimited string. Including -// the "semicolon" option will encode the value as a semicolon-delimited string. -// Including the "brackets" option signals that the multiple URL values should -// have "[]" appended to the value name. "numbered" will append a number to -// the end of each incidence of the value name, example: -// name0=value0&name1=value1, etc. -// -// Anonymous struct fields are usually encoded as if their inner exported -// fields were fields in the outer struct, subject to the standard Go -// visibility rules. An anonymous struct field with a name given in its URL -// tag is treated as having that name, rather than being anonymous. -// -// Non-nil pointer values are encoded as the value pointed to. -// -// Nested structs are encoded including parent fields in value names for -// scoping. e.g: -// -// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" -// -// All other values are encoded using their default string representation. -// -// Multiple fields that encode to the same URL parameter name will be included -// as multiple URL values of the same name. -func Values(v interface{}) (url.Values, error) { - values := make(url.Values) - val := reflect.ValueOf(v) - for val.Kind() == reflect.Ptr { - if val.IsNil() { - return values, nil - } - val = val.Elem() - } - - if v == nil { - return values, nil - } - - if val.Kind() != reflect.Struct { - return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) - } - - err := reflectValue(values, val, "") - return values, err -} - -// reflectValue populates the values parameter from the struct fields in val. -// Embedded structs are followed recursively (using the rules defined in the -// Values function documentation) breadth-first. -func reflectValue(values url.Values, val reflect.Value, scope string) error { - var embedded []reflect.Value - - typ := val.Type() - for i := 0; i < typ.NumField(); i++ { - sf := typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - - sv := val.Field(i) - tag := sf.Tag.Get("url") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if name == "" { - if sf.Anonymous && sv.Kind() == reflect.Struct { - // save embedded struct for later processing - embedded = append(embedded, sv) - continue - } - - name = sf.Name - } - - if scope != "" { - name = scope + "[" + name + "]" - } - - if opts.Contains("omitempty") && isEmptyValue(sv) { - continue - } - - if sv.Type().Implements(encoderType) { - if !reflect.Indirect(sv).IsValid() { - sv = reflect.New(sv.Type().Elem()) - } - - m := sv.Interface().(Encoder) - if err := m.EncodeValues(name, &values); err != nil { - return err - } - continue - } - - if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { - var del byte - if opts.Contains("comma") { - del = ',' - } else if opts.Contains("space") { - del = ' ' - } else if opts.Contains("semicolon") { - del = ';' - } else if opts.Contains("brackets") { - name = name + "[]" - } - - if del != 0 { - s := new(bytes.Buffer) - first := true - for i := 0; i < sv.Len(); i++ { - if first { - first = false - } else { - s.WriteByte(del) - } - s.WriteString(valueString(sv.Index(i), opts)) - } - values.Add(name, s.String()) - } else { - for i := 0; i < sv.Len(); i++ { - k := name - if opts.Contains("numbered") { - k = fmt.Sprintf("%s%d", name, i) - } - values.Add(k, valueString(sv.Index(i), opts)) - } - } - continue - } - - for sv.Kind() == reflect.Ptr { - if sv.IsNil() { - break - } - sv = sv.Elem() - } - - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts)) - continue - } - - if sv.Kind() == reflect.Struct { - reflectValue(values, sv, name) - continue - } - - values.Add(name, valueString(sv, opts)) - } - - for _, f := range embedded { - if err := reflectValue(values, f, scope); err != nil { - return err - } - } - - return nil -} - -// valueString returns the string representation of a value. -func valueString(v reflect.Value, opts tagOptions) string { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return "" - } - v = v.Elem() - } - - if v.Kind() == reflect.Bool && opts.Contains("int") { - if v.Bool() { - return "1" - } - return "0" - } - - if v.Type() == timeType { - t := v.Interface().(time.Time) - if opts.Contains("unix") { - return strconv.FormatInt(t.Unix(), 10) - } - return t.Format(time.RFC3339) - } - - return fmt.Sprint(v.Interface()) -} - -// isEmptyValue checks if a value should be considered empty for the purposes -// of omitting fields with the "omitempty" option. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - if v.Type() == timeType { - return v.Interface().(time.Time).IsZero() - } - - return false -} - -// tagOptions is the string following a comma in a struct field's "url" tag, or -// the empty string. It does not include the leading comma. -type tagOptions []string - -// parseTag splits a struct field's url tag into its name and comma-separated -// options. -func parseTag(tag string) (string, tagOptions) { - s := strings.Split(tag, ",") - return s[0], s[1:] -} - -// Contains checks whether the tagOptions contains the specified option. -func (o tagOptions) Contains(option string) bool { - for _, s := range o { - if s == option { - return true - } - } - return false -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/.gitignore b/pkg/c8y/vendor/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index cd3fcd1e..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/.travis.yml b/pkg/c8y/vendor/github.com/gorilla/websocket/.travis.yml deleted file mode 100644 index a49db51c..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x - - go: tip - allow_failures: - - go: tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go vet $(go list ./... | grep -v /vendor/) - - go test -v -race ./... diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/AUTHORS b/pkg/c8y/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f400..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/LICENSE b/pkg/c8y/vendor/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index 9171c972..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/README.md b/pkg/c8y/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index 20e391f8..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Gorilla WebSocket - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) - -### Documentation - -* [API Reference](http://godoc.org/github.com/gorilla/websocket) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - -### Gorilla WebSocket compared with other packages - - - - - - - - - - - - - - - - - - -
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
- -Notes: - -1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). -2. The application can get the type of a received data message by implementing - a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) - function. -3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. - Read returns when the input buffer is full or a frame boundary is - encountered. Each call to Write sends a single frame message. The Gorilla - io.Reader and io.WriteCloser operate on a single WebSocket message. - diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/client.go b/pkg/c8y/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index cc5faef9..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "io" - "net" - "net/http" - "net/http/httptrace" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -var errInvalidCompression = errors.New("websocket: invalid compression negotiation") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // NetDialContext specifies the dial function for creating TCP connections. If - // NetDialContext is nil, net.DialContext is used. - NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then a useful default size is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string - - // EnableCompression specifies if the client should attempt to negotiate - // per message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar http.CookieJar -} - -// Dial creates a new client connection by calling DialContext with a background context. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - return d.DialContext(context.Background(), urlStr, requestHeader) -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, - HandshakeTimeout: 45 * time.Second, -} - -// nilDialer is dialer to use when receiver is nil. -var nilDialer = *DefaultDialer - -// DialContext creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// # The context will be used in the request and in the Dialer -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - if d == nil { - d = &nilDialer - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: "GET", - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - req = req.WithContext(ctx) - - // Set the cookies present in the cookie jar of the dialer - if d.Jar != nil { - for _, cookie := range d.Jar.Cookies(u) { - req.AddCookie(cookie) - } - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - k == "Sec-Websocket-Extensions" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - case k == "Sec-Websocket-Protocol": - req.Header["Sec-WebSocket-Protocol"] = vs - default: - req.Header[k] = vs - } - } - - if d.EnableCompression { - req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} - } - - if d.HandshakeTimeout != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) - defer cancel() - } - - // Get network dial function. - var netDial func(network, add string) (net.Conn, error) - - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } else { - netDialer := &net.Dialer{} - netDial = func(network, addr string) (net.Conn, error) { - return netDialer.DialContext(ctx, network, addr) - } - } - - // If needed, wrap the dial function to set the connection deadline. - if deadline, ok := ctx.Deadline(); ok { - forwardDial := netDial - netDial = func(network, addr string) (net.Conn, error) { - c, err := forwardDial(network, addr) - if err != nil { - return nil, err - } - err = c.SetDeadline(deadline) - if err != nil { - c.Close() - return nil, err - } - return c, nil - } - } - - // If needed, wrap the dial function to connect through a proxy. - if d.Proxy != nil { - proxyURL, err := d.Proxy(req) - if err != nil { - return nil, nil, err - } - if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) - if err != nil { - return nil, nil, err - } - netDial = dialer.Dial - } - } - - hostPort, hostNoPort := hostPortNoPort(u) - trace := httptrace.ContextClientTrace(ctx) - if trace != nil && trace.GetConn != nil { - trace.GetConn(hostPort) - } - - netConn, err := netDial("tcp", hostPort) - if trace != nil && trace.GotConn != nil { - trace.GotConn(httptrace.GotConnInfo{ - Conn: netConn, - }) - } - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if u.Scheme == "https" { - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - - var err error - if trace != nil { - err = doHandshakeWithTrace(trace, tlsConn, cfg) - } else { - err = doHandshake(tlsConn, cfg) - } - - if err != nil { - return nil, nil, err - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - if trace != nil && trace.GotFirstResponseByte != nil { - if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { - trace.GotFirstResponseByte() - } - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - return nil, nil, err - } - - if d.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - d.Jar.SetCookies(u, rc) - } - } - - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - for _, ext := range parseExtensions(resp.Header) { - if ext[""] != "permessage-deflate" { - continue - } - _, snct := ext["server_no_context_takeover"] - _, cnct := ext["client_no_context_takeover"] - if !snct || !cnct { - return nil, resp, errInvalidCompression - } - conn.newCompressionWriter = compressNoContextTakeover - conn.newDecompressionReader = decompressNoContextTakeover - break - } - - resp.Body = io.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} - -func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/client_clone.go b/pkg/c8y/vendor/github.com/gorilla/websocket/client_clone.go deleted file mode 100644 index 4f0d9437..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/client_clone.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return cfg.Clone() -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/pkg/c8y/vendor/github.com/gorilla/websocket/client_clone_legacy.go deleted file mode 100644 index babb007f..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/client_clone_legacy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -import "crypto/tls" - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/compression.go b/pkg/c8y/vendor/github.com/gorilla/websocket/compression.go deleted file mode 100644 index 813ffb1e..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/compression.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "compress/flate" - "errors" - "io" - "strings" - "sync" -) - -const ( - minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 - maxCompressionLevel = flate.BestCompression - defaultCompressionLevel = 1 -) - -var ( - flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool - flateReaderPool = sync.Pool{New: func() interface{} { - return flate.NewReader(nil) - }} -) - -func decompressNoContextTakeover(r io.Reader) io.ReadCloser { - const tail = - // Add four bytes as specified in RFC - "\x00\x00\xff\xff" + - // Add final block to squelch unexpected EOF error from flate reader. - "\x01\x00\x00\xff\xff" - - fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) - return &flateReadWrapper{fr} -} - -func isValidCompressionLevel(level int) bool { - return minCompressionLevel <= level && level <= maxCompressionLevel -} - -func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { - p := &flateWriterPools[level-minCompressionLevel] - tw := &truncWriter{w: w} - fw, _ := p.Get().(*flate.Writer) - if fw == nil { - fw, _ = flate.NewWriter(tw, level) - } else { - fw.Reset(tw) - } - return &flateWriteWrapper{fw: fw, tw: tw, p: p} -} - -// truncWriter is an io.Writer that writes all but the last four bytes of the -// stream to another io.Writer. -type truncWriter struct { - w io.WriteCloser - n int - p [4]byte -} - -func (w *truncWriter) Write(p []byte) (int, error) { - n := 0 - - // fill buffer first for simplicity. - if w.n < len(w.p) { - n = copy(w.p[w.n:], p) - p = p[n:] - w.n += n - if len(p) == 0 { - return n, nil - } - } - - m := len(p) - if m > len(w.p) { - m = len(w.p) - } - - if nn, err := w.w.Write(w.p[:m]); err != nil { - return n + nn, err - } - - copy(w.p[:], w.p[m:]) - copy(w.p[len(w.p)-m:], p[len(p)-m:]) - nn, err := w.w.Write(p[:len(p)-m]) - return n + nn, err -} - -type flateWriteWrapper struct { - fw *flate.Writer - tw *truncWriter - p *sync.Pool -} - -func (w *flateWriteWrapper) Write(p []byte) (int, error) { - if w.fw == nil { - return 0, errWriteClosed - } - return w.fw.Write(p) -} - -func (w *flateWriteWrapper) Close() error { - if w.fw == nil { - return errWriteClosed - } - err1 := w.fw.Flush() - w.p.Put(w.fw) - w.fw = nil - if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { - return errors.New("websocket: internal error, unexpected bytes at end of flate stream") - } - err2 := w.tw.w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -type flateReadWrapper struct { - fr io.ReadCloser -} - -func (r *flateReadWrapper) Read(p []byte) (int, error) { - if r.fr == nil { - return 0, io.ErrClosedPipe - } - n, err := r.fr.Read(p) - if err == io.EOF { - // Preemptively place the reader back in the pool. This helps with - // scenarios where the application does not call NextReader() soon after - // this final read. - r.Close() - } - return n, err -} - -func (r *flateReadWrapper) Close() error { - if r.fr == nil { - return io.ErrClosedPipe - } - err := r.fr.Close() - flateReaderPool.Put(r.fr) - r.fr = nil - return err -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/conn.go b/pkg/c8y/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 9f95ed27..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,1164 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "math/rand" - "net" - "strconv" - "sync" - "time" - "unicode/utf8" -) - -const ( - // Frame header byte 0 bits from Section 5.2 of RFC 6455 - finalBit = 1 << 7 - rsv1Bit = 1 << 6 - rsv2Bit = 1 << 5 - rsv3Bit = 1 << 4 - - // Frame header byte 1 bits from Section 5.2 of RFC 6455 - maskBit = 1 << 7 - - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a pong control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents a close message. -type CloseError struct { - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this -// interface. The type of the value stored in a pool is not specified. -type BufferPool interface { - // Get gets a value from the pool or returns nil if the pool is empty. - Get() interface{} - // Put adds a value to the pool. - Put(interface{}) -} - -// writePoolData is the type added to the write buffer pool. This wrapper is -// used to prevent applications from peeking at and depending on the values -// added to the pool. -type writePoolData struct{ buf []byte } - -// The Conn type represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan bool // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. - writePool BufferPool - writeBufSize int - writeDeadline time.Time - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection - - writeErrMu sync.Mutex - writeErr error - - enableWriteCompression bool - compressionLevel int - newCompressionWriter func(io.WriteCloser, int) io.WriteCloser - - // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - handleClose func(int, string) error - readErrCount int - messageReader *messageReader // the current low-level reader - - readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.ReadCloser -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { - - if br == nil { - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } else if readBufferSize < maxControlFramePayloadSize { - // must be large enough for control frame - readBufferSize = maxControlFramePayloadSize - } - br = bufio.NewReaderSize(conn, readBufferSize) - } - - if writeBufferSize <= 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBufferSize += maxFrameHeaderSize - - if writeBuf == nil && writeBufferPool == nil { - writeBuf = make([]byte, writeBufferSize) - } - - mu := make(chan bool, 1) - mu <- true - c := &Conn{ - isServer: isServer, - br: br, - conn: conn, - mu: mu, - readFinal: true, - writeBuf: writeBuf, - writePool: writeBufferPool, - writeBufSize: writeBufferSize, - enableWriteCompression: true, - compressionLevel: defaultCompressionLevel, - } - c.SetCloseHandler(nil) - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting -// for a close message. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) - c.writeErrMu.Lock() - if c.writeErr == nil { - c.writeErr = err - } - c.writeErrMu.Unlock() - return err -} - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err -} - -func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { - <-c.mu - defer func() { c.mu <- true }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - if len(buf1) == 0 { - _, err = c.conn.Write(buf0) - } else { - err = c.writeBufs(buf0, buf1) - } - if err != nil { - return c.writeFatal(err) - } - if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return nil -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := time.Hour * 1000 - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- true }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - _, err = c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return err -} - -func (c *Conn) prepWrite(messageType int) error { - // Close previous writer if not already closed by the application. It's - // probably better to return an error in this situation, but we cannot - // change this without breaking existing applications. - if c.writer != nil { - c.writer.Close() - c.writer = nil - } - - if !isControl(messageType) && !isData(messageType) { - return errBadWriteOpCode - } - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - if c.writeBuf == nil { - wpd, ok := c.writePool.Get().(writePoolData) - if ok { - c.writeBuf = wpd.buf - } else { - c.writeBuf = make([]byte, c.writeBufSize) - } - } - return nil -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -// -// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and -// PongMessage) are supported. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if err := c.prepWrite(messageType); err != nil { - return nil, err - } - - mw := &messageWriter{ - c: c, - frameType: messageType, - pos: maxFrameHeaderSize, - } - c.writer = mw - if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - w := c.newCompressionWriter(c.writer, c.compressionLevel) - mw.compress = true - c.writer = w - } - return c.writer, nil -} - -type messageWriter struct { - c *Conn - compress bool // whether next call to flushFrame should set RSV1 - pos int // end of data in writeBuf. - frameType int // type of the current frame. - err error -} - -func (w *messageWriter) fatal(err error) error { - if w.err != nil { - w.err = err - w.c.writer = nil - } - return err -} - -// flushFrame writes buffered data and extra as a frame to the network. The -// final argument indicates that this is the last frame in the message. -func (w *messageWriter) flushFrame(final bool, extra []byte) error { - c := w.c - length := w.pos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(w.frameType) && - (!final || length > maxControlFramePayloadSize) { - return w.fatal(errInvalidControlFrame) - } - - b0 := byte(w.frameType) - if final { - b0 |= finalBit - } - if w.compress { - b0 |= rsv1Bit - } - w.compress = false - - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) - if len(extra) > 0 { - return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - if err != nil { - return w.fatal(err) - } - - if final { - c.writer = nil - if c.writePool != nil { - c.writePool.Put(writePoolData{buf: c.writeBuf}) - c.writeBuf = nil - } - return nil - } - - // Setup for next frame. - w.pos = maxFrameHeaderSize - w.frameType = continuationFrame - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.pos - if n <= 0 { - if err := w.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.pos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.flushFrame(false, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if w.err != nil { - return 0, w.err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if w.err != nil { - return 0, w.err - } - for { - if w.pos == len(w.c.writeBuf) { - err = w.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.pos:]) - w.pos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if w.err != nil { - return w.err - } - if err := w.flushFrame(true, nil); err != nil { - return err - } - w.err = errWriteClosed - return nil -} - -// WritePreparedMessage writes prepared message into connection. -func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { - frameType, frameData, err := pm.frame(prepareKey{ - isServer: c.isServer, - compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), - compressionLevel: c.compressionLevel, - }) - if err != nil { - return err - } - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - err = c.write(frameType, c.writeDeadline, frameData, nil) - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - return err -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - - if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { - // Fast path with no allocations and single frame. - - if err := c.prepWrite(messageType); err != nil { - return err - } - mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} - n := copy(c.writeBuf[mw.pos:], data) - mw.pos += n - data = data[n:] - return mw.flushFrame(true, data) - } - - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - final := p[0]&finalBit != 0 - frameType := int(p[0] & 0xf) - mask := p[1]&maskBit != 0 - c.readRemaining = int64(p[1] & 0x7f) - - c.readDecompress = false - if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { - c.readDecompress = true - p[0] &^= rsv1Bit - } - - if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - return noFrame, c.handleProtocolError("control frame length > 125") - } - if !final { - return noFrame, c.handleProtocolError("control frame not final") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - return noFrame, c.handleProtocolError("message start before final message frame") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - return noFrame, c.handleProtocolError("continuation after final message frame") - } - c.readFinal = final - default: - return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) - } - - // 3. Read and parse frame length. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint16(p)) - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint64(p)) - } - - // 4. Handle frame masking. - - if mask != c.isServer { - return noFrame, c.handleProtocolError("incorrect mask flag") - } - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - c.readRemaining = 0 - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("invalid close code") - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - if err := c.handleClose(closeCode, closeText); err != nil { - return noFrame, err - } - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - // Close previous reader, only relevant for decompression. - if c.reader != nil { - c.reader.Close() - c.reader = nil - } - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - if frameType == TextMessage || frameType == BinaryMessage { - c.messageReader = &messageReader{c} - c.reader = c.messageReader - if c.readDecompress { - c.reader = c.newDecompressionReader(c.reader) - } - return frameType, c.reader, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - c.readRemaining -= int64(n) - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -func (r *messageReader) Close() error { - return nil -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = io.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size for a message read from the peer. If a -// message exceeds the limit, the connection sends a close message to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// CloseHandler returns the current close handler -func (c *Conn) CloseHandler() func(code int, text string) error { - return c.handleClose -} - -// SetCloseHandler sets the handler for close messages received from the peer. -// The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close -// message back to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// close messages as described in the section on Control Messages above. -// -// The connection read methods return a CloseError when a close message is -// received. Most applications should handle close messages as part of their -// normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close message back to -// the peer. -func (c *Conn) SetCloseHandler(h func(code int, text string) error) { - if h == nil { - h = func(code int, text string) error { - message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) - return nil - } - } - c.handleClose = h -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING message application data. The default -// ping handler sends a pong to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// ping messages as described in the section on Control Messages above. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG message application data. The default -// pong handler does nothing. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// pong messages as described in the section on Control Messages above. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// EnableWriteCompression enables and disables write compression of -// subsequent text and binary messages. This function is a noop if -// compression was not negotiated with the peer. -func (c *Conn) EnableWriteCompression(enable bool) { - c.enableWriteCompression = enable -} - -// SetCompressionLevel sets the flate compression level for subsequent text and -// binary messages. This function is a noop if compression was not negotiated -// with the peer. See the compress/flate package for a description of -// compression levels. -func (c *Conn) SetCompressionLevel(level int) error { - if !isValidCompressionLevel(level) { - return errors.New("websocket: invalid compression level") - } - c.compressionLevel = level - return nil -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -// An empty message is returned for code CloseNoStatusReceived. -func FormatCloseMessage(closeCode int, text string) []byte { - if closeCode == CloseNoStatusReceived { - // Return empty message because it's illegal to send - // CloseNoStatusReceived. Return non-nil value in case application - // checks for nil. - return []byte{} - } - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/conn_write.go b/pkg/c8y/vendor/github.com/gorilla/websocket/conn_write.go deleted file mode 100644 index a509a21f..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/conn_write.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "net" - -func (c *Conn) writeBufs(bufs ...[]byte) error { - b := net.Buffers(bufs) - _, err := b.WriteTo(c.conn) - return err -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/pkg/c8y/vendor/github.com/gorilla/websocket/conn_write_legacy.go deleted file mode 100644 index 37edaff5..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/conn_write_legacy.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -func (c *Conn) writeBufs(bufs ...[]byte) error { - for _, buf := range bufs { - if len(buf) > 0 { - if _, err := c.conn.Write(buf); err != nil { - return err - } - } - } - return nil -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/doc.go b/pkg/c8y/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index dcce1a63..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application calls -// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// log.Println(err) -// return -// } -// if err := conn.WriteMessage(messageType, p); err != nil { -// log.Println(err) -// return -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by calling the handler function -// set with the SetCloseHandler method and by returning a *CloseError from the -// NextReader, ReadMessage or the message Read method. The default close -// handler sends a close message to the peer. -// -// Connections handle received ping messages by calling the handler function -// set with the SetPingHandler method. The default ping handler sends a pong -// message to the peer. -// -// Connections handle received pong messages by calling the handler function -// set with the SetPongHandler method. The default pong handler does nothing. -// If an application sends ping messages, then the application should set a -// pong handler to receive the corresponding pong. -// -// The control message handler functions are called from the NextReader, -// ReadMessage and message reader Read methods. The default close and ping -// handlers can block these methods for a short time when the handler writes to -// the connection. -// -// The application must read the connection to process close, ping and pong -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and -// that no more than one goroutine calls the read methods (NextReader, -// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) -// concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and the Origin host is -// not equal to the Host request header. -// -// The deprecated package-level Upgrade function does not perform origin -// checking. The application is responsible for checking the Origin header -// before calling the Upgrade function. -// -// Compression EXPERIMENTAL -// -// Per message compression extensions (RFC 7692) are experimentally supported -// by this package in a limited capacity. Setting the EnableCompression option -// to true in Dialer or Upgrader will attempt to negotiate per message deflate -// support. -// -// var upgrader = websocket.Upgrader{ -// EnableCompression: true, -// } -// -// If compression was successfully negotiated with the connection's peer, any -// message received in compressed form will be automatically decompressed. -// All Read methods will return uncompressed bytes. -// -// Per message compression of messages written to a connection can be enabled -// or disabled by calling the corresponding Conn method: -// -// conn.EnableWriteCompression(false) -// -// Currently this package does not support compression with "context takeover". -// This means that messages must be compressed and decompressed in isolation, -// without retaining sliding window or dictionary state across messages. For -// more details refer to RFC 7692. -// -// Use of compression is experimental and may result in decreased performance. -package websocket diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/json.go b/pkg/c8y/vendor/github.com/gorilla/websocket/json.go deleted file mode 100644 index dc2c1f64..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" - "io" -) - -// WriteJSON writes the JSON encoding of v as a message. -// -// Deprecated: Use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v as a message. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// Deprecated: Use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - err = json.NewDecoder(r).Decode(v) - if err == io.EOF { - // One value is expected in the message. - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/mask.go b/pkg/c8y/vendor/github.com/gorilla/websocket/mask.go deleted file mode 100644 index 577fce9e..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/mask.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// +build !appengine - -package websocket - -import "unsafe" - -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -func maskBytes(key [4]byte, pos int, b []byte) int { - // Mask one byte at a time for small buffers. - if len(b) < 2*wordSize { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 - } - - // Mask one byte at a time to word boundary. - if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { - n = wordSize - n - for i := range b[:n] { - b[i] ^= key[pos&3] - pos++ - } - b = b[n:] - } - - // Create aligned word size key. - var k [wordSize]byte - for i := range k { - k[i] = key[(pos+i)&3] - } - kw := *(*uintptr)(unsafe.Pointer(&k)) - - // Mask one word at a time. - n := (len(b) / wordSize) * wordSize - for i := 0; i < n; i += wordSize { - *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw - } - - // Mask one byte at a time for remaining bytes. - b = b[n:] - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - - return pos & 3 -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/mask_safe.go b/pkg/c8y/vendor/github.com/gorilla/websocket/mask_safe.go deleted file mode 100644 index 2aac060e..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/mask_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// +build appengine - -package websocket - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/prepared.go b/pkg/c8y/vendor/github.com/gorilla/websocket/prepared.go deleted file mode 100644 index 74ec565d..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/prepared.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "net" - "sync" - "time" -) - -// PreparedMessage caches on the wire representations of a message payload. -// Use PreparedMessage to efficiently send a message payload to multiple -// connections. PreparedMessage is especially useful when compression is used -// because the CPU and memory expensive compression operation can be executed -// once for a given set of compression options. -type PreparedMessage struct { - messageType int - data []byte - mu sync.Mutex - frames map[prepareKey]*preparedFrame -} - -// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. -type prepareKey struct { - isServer bool - compress bool - compressionLevel int -} - -// preparedFrame contains data in wire representation. -type preparedFrame struct { - once sync.Once - data []byte -} - -// NewPreparedMessage returns an initialized PreparedMessage. You can then send -// it to connection using WritePreparedMessage method. Valid wire -// representation will be calculated lazily only once for a set of current -// connection options. -func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { - pm := &PreparedMessage{ - messageType: messageType, - frames: make(map[prepareKey]*preparedFrame), - data: data, - } - - // Prepare a plain server frame. - _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) - if err != nil { - return nil, err - } - - // To protect against caller modifying the data argument, remember the data - // copied to the plain server frame. - pm.data = frameData[len(frameData)-len(data):] - return pm, nil -} - -func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { - pm.mu.Lock() - frame, ok := pm.frames[key] - if !ok { - frame = &preparedFrame{} - pm.frames[key] = frame - } - pm.mu.Unlock() - - var err error - frame.once.Do(func() { - // Prepare a frame using a 'fake' connection. - // TODO: Refactor code in conn.go to allow more direct construction of - // the frame. - mu := make(chan bool, 1) - mu <- true - var nc prepareConn - c := &Conn{ - conn: &nc, - mu: mu, - isServer: key.isServer, - compressionLevel: key.compressionLevel, - enableWriteCompression: true, - writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), - } - if key.compress { - c.newCompressionWriter = compressNoContextTakeover - } - err = c.WriteMessage(pm.messageType, pm.data) - frame.data = nc.buf.Bytes() - }) - return pm.messageType, frame.data, err -} - -type prepareConn struct { - buf bytes.Buffer - net.Conn -} - -func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } -func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/proxy.go b/pkg/c8y/vendor/github.com/gorilla/websocket/proxy.go deleted file mode 100644 index bf2478e4..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/proxy.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/base64" - "errors" - "net" - "net/http" - "net/url" - "strings" -) - -type netDialerFunc func(network, addr string) (net.Conn, error) - -func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { - return fn(network, addr) -} - -func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil - }) -} - -type httpProxyDialer struct { - proxyURL *url.URL - fowardDial func(network, addr string) (net.Conn, error) -} - -func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { - hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.fowardDial(network, hostPort) - if err != nil { - return nil, err - } - - connectHeader := make(http.Header) - if user := hpd.proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - - connectReq := &http.Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: addr}, - Host: addr, - Header: connectHeader, - } - - if err := connectReq.Write(conn); err != nil { - conn.Close() - return nil, err - } - - // Read response. It's OK to use and discard buffered reader here becaue - // the remote server does not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - - if resp.StatusCode != 200 { - conn.Close() - f := strings.SplitN(resp.Status, " ", 2) - return nil, errors.New(f[1]) - } - return conn, nil -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/server.go b/pkg/c8y/vendor/github.com/gorilla/websocket/server.go deleted file mode 100644 index a761824b..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then buffers allocated by the HTTP server are used. The - // I/O buffer sizes do not limit the size of the messages that can be sent - // or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is not nil, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. If there's no match, then no protocol is - // negotiated (the Sec-Websocket-Protocol header is not included in the - // handshake response). - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, then a safe default is used: return false if the - // Origin request header is present and the origin host is not equal to - // request Host header. - // - // A CheckOrigin function should carefully validate the request origin to - // prevent cross-site request forgery. - CheckOrigin func(r *http.Request) bool - - // EnableCompression specify if the server should attempt to negotiate per - // message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - w.Header().Set("Sec-Websocket-Version", "13") - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return equalASCIIFold(u.Host, r.Host) -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// application negotiated subprotocol (Sec-WebSocket-Protocol). -// -// If the upgrade fails, then Upgrade replies to the client with an HTTP error -// response. -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - const badHandshake = "websocket: the client is not using the websocket protocol: " - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") - } - - if r.Method != "GET" { - return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") - } - - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - // Negotiate PMCE - var compress bool - if u.EnableCompression { - for _, ext := range parseExtensions(r.Header) { - if ext[""] != "permessage-deflate" { - continue - } - compress = true - break - } - } - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err := h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - netConn.Close() - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - var br *bufio.Reader - if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { - // Reuse hijacked buffered reader as connection reader. - br = brw.Reader - } - - buf := bufioWriterBuffer(netConn, brw.Writer) - - var writeBuf []byte - if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { - // Reuse hijacked write buffer as connection buffer. - writeBuf = buf - } - - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) - c.subprotocol = subprotocol - - if compress { - c.newCompressionWriter = compressNoContextTakeover - c.newDecompressionReader = decompressNoContextTakeover - } - - // Use larger of hijacked buffer and connection write buffer for header. - p := buf - if len(c.writeBuf) > len(p) { - p = c.writeBuf - } - p = p[:0] - - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-WebSocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - if compress { - p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) - - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) - } - if _, err = netConn.Write(p); err != nil { - netConn.Close() - return nil, err - } - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// Deprecated: Use websocket.Upgrader instead. -// -// Upgrade does not perform origin checking. The application is responsible for -// checking the Origin header before calling Upgrade. An example implementation -// of the same origin policy check is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", http.StatusForbidden) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} - -// IsWebSocketUpgrade returns true if the client requested upgrade to the -// WebSocket protocol. -func IsWebSocketUpgrade(r *http.Request) bool { - return tokenListContainsValue(r.Header, "Connection", "upgrade") && - tokenListContainsValue(r.Header, "Upgrade", "websocket") -} - -// bufioReaderSize size returns the size of a bufio.Reader. -func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { - // This code assumes that peek on a reset reader returns - // bufio.Reader.buf[:0]. - // TODO: Use bufio.Reader.Size() after Go 1.10 - br.Reset(originalReader) - if p, err := br.Peek(0); err == nil { - return cap(p) - } - return 0 -} - -// writeHook is an io.Writer that records the last slice passed to it vio -// io.Writer.Write. -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} - -// bufioWriterBuffer grabs the buffer from a bufio.Writer. -func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { - // This code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() - - bw.Reset(originalWriter) - - return wh.p[:cap(wh.p)] -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/trace.go b/pkg/c8y/vendor/github.com/gorilla/websocket/trace.go deleted file mode 100644 index 834f122a..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/trace.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.8 - -package websocket - -import ( - "crypto/tls" - "net/http/httptrace" -) - -func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { - if trace.TLSHandshakeStart != nil { - trace.TLSHandshakeStart() - } - err := doHandshake(tlsConn, cfg) - if trace.TLSHandshakeDone != nil { - trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) - } - return err -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/trace_17.go b/pkg/c8y/vendor/github.com/gorilla/websocket/trace_17.go deleted file mode 100644 index 77d05a0b..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/trace_17.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !go1.8 - -package websocket - -import ( - "crypto/tls" - "net/http/httptrace" -) - -func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { - return doHandshake(tlsConn, cfg) -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/util.go b/pkg/c8y/vendor/github.com/gorilla/websocket/util.go deleted file mode 100644 index 354001e1..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "io" - "net/http" - "strings" - "unicode/utf8" -) - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -// Octet types from RFC 2616. -var octetTypes [256]byte - -const ( - isTokenOctet = 1 << iota - isSpaceOctet -) - -func init() { - // From RFC 2616 - // - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t byte - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpaceOctet - } - if isChar && !isCtl && !isSeparator { - t |= isTokenOctet - } - octetTypes[c] = t - } -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpaceOctet == 0 { - break - } - } - return s[i:] -} - -func nextToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isTokenOctet == 0 { - break - } - } - return s[:i], s[i:] -} - -func nextTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return nextToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} - -// equalASCIIFold returns true if s is equal to t with ASCII case folding. -func equalASCIIFold(s, t string) bool { - for s != "" && t != "" { - sr, size := utf8.DecodeRuneInString(s) - s = s[size:] - tr, size := utf8.DecodeRuneInString(t) - t = t[size:] - if sr == tr { - continue - } - if 'A' <= sr && sr <= 'Z' { - sr = sr + 'a' - 'A' - } - if 'A' <= tr && tr <= 'Z' { - tr = tr + 'a' - 'A' - } - if sr != tr { - return false - } - } - return s == t -} - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains a token equal to value with ASCII case folding. -func tokenListContainsValue(header http.Header, name string, value string) bool { -headers: - for _, s := range header[name] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - s = skipSpace(s) - if s != "" && s[0] != ',' { - continue headers - } - if equalASCIIFold(t, value) { - return true - } - if s == "" { - continue headers - } - s = s[1:] - } - } - return false -} - -// parseExtensions parses WebSocket extensions from a header. -func parseExtensions(header http.Header) []map[string]string { - // From RFC 6455: - // - // Sec-WebSocket-Extensions = extension-list - // extension-list = 1#extension - // extension = extension-token *( ";" extension-param ) - // extension-token = registered-token - // registered-token = token - // extension-param = token [ "=" (token | quoted-string) ] - // ;When using the quoted-string syntax variant, the value - // ;after quoted-string unescaping MUST conform to the - // ;'token' ABNF. - - var result []map[string]string -headers: - for _, s := range header["Sec-Websocket-Extensions"] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - ext := map[string]string{"": t} - for { - s = skipSpace(s) - if !strings.HasPrefix(s, ";") { - break - } - var k string - k, s = nextToken(skipSpace(s[1:])) - if k == "" { - continue headers - } - s = skipSpace(s) - var v string - if strings.HasPrefix(s, "=") { - v, s = nextTokenOrQuoted(skipSpace(s[1:])) - s = skipSpace(s) - } - if s != "" && s[0] != ',' && s[0] != ';' { - continue headers - } - ext[k] = v - } - if s != "" && s[0] != ',' { - continue headers - } - result = append(result, ext) - if s == "" { - continue headers - } - s = s[1:] - } - } - return result -} diff --git a/pkg/c8y/vendor/github.com/gorilla/websocket/x_net_proxy.go b/pkg/c8y/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b..00000000 --- a/pkg/c8y/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/.travis.yml b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/.travis.yml deleted file mode 100644 index 122ceac0..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - -install: - - export PATH=${PATH}:${HOME}/gopath/bin - - go get -v -t ./... - - go get -v github.com/golang/lint/golint - - go get -v golang.org/x/tools/cmd/vet - -script: - - go vet ./... - - go test -v ./... - - golint . - -sudo: false diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/LICENSE b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/LICENSE deleted file mode 100644 index 262eb715..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Oliver Beattie - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/README.md b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/README.md deleted file mode 100644 index d068056d..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# ohmyglob - -A minimal glob matching utility for Go. - -![ohmyglob!](http://i.imgur.com/7vjO2mF.gif) - -[![GoDoc](https://godoc.org/github.com/obeattie/ohmyglob?status.svg)](https://godoc.org/github.com/obeattie/ohmyglob) [![Build Status](https://travis-ci.org/obeattie/ohmyglob.svg?branch=master)](https://travis-ci.org/obeattie/ohmyglob) - -Works internally by converting glob expressions into [`Regexp`](http://golang.org/pkg/regexp/#Regexp)s, which are then -used to match strings. - -## Features - -* Customisable separators -* `*` matches any number of characters, but not the separator -* `?` matches any *single* character, but not the separator -* `!` at the beginning of a pattern will negate the match -* `\` escapes the next character – `\\` is a literal backslash -* ["Globstar"](http://www.linuxjournal.com/content/globstar-new-bash-globbing-option) (`**`) matching -* Glob sets allow matching against a set of ordered globs, with precedence to later matches - -## Usage - - import glob "github.com/obeattie/ohmyglob" - - var g glob.Glob - var err error - var doesMatch bool - - // Standard, with a wildcard - g, err = glob.Compile("foo/*/baz", glob.DefaultOptions) - doesMatch = g.MatchString("foo/bar/baz") // true! - doesMatch = g.MatchString("nope") // false! - - // Globstar - g, err = glob.Compile("foo/**/baz", glob.DefaultOptions) - doesMatch = g.MatchString("foo/bar/bar/baz") // true! - doesMatch = g.MatchString("foo/baz") // true! diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/glob.go b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/glob.go deleted file mode 100644 index 501f5480..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/glob.go +++ /dev/null @@ -1,267 +0,0 @@ -// Package ohmyglob provides a minimal glob matching utility. -package ohmyglob - -import ( - "bytes" - "fmt" - "io" - "os" - "regexp" - "strings" - - log "github.com/cihub/seelog" -) - -var ( - // Logger is used to log trace-level info; logging is completely disabled by default but can be changed by replacing - // this with a configured logger - Logger log.LoggerInterface - // Escaper is the character used to escape a meaningful character - Escaper = '\\' - // Runes that, in addition to the separator, mean something when they appear in the glob (includes Escaper) - expanders = []rune{'?', '*', '!', Escaper} -) - -func init() { - if Logger == nil { - var err error - Logger, err = log.LoggerFromWriterWithMinLevel(os.Stderr, log.CriticalLvl) // seelog bug means we can't use log.Off - if err != nil { - panic(err) - } - } -} - -type processedToken struct { - contents *bytes.Buffer - tokenType tc -} - -type parserState struct { - options *Options - // The regex-escaped separator character - escapedSeparator string - processedTokens []processedToken -} - -// GlobMatcher is the basic interface of a Glob or GlobSet. It provides a Regexp-style interface for checking matches. -type GlobMatcher interface { - // Match reports whether the Glob matches the byte slice b - Match(b []byte) bool - // MatchReader reports whether the Glob matches the text read by the RuneReader - MatchReader(r io.RuneReader) bool - // MatchString reports whether the Glob matches the string s - MatchString(s string) bool -} - -// Glob is a single glob pattern; implements GlobMatcher. A Glob is immutable. -type Glob interface { - GlobMatcher - // String returns the pattern that was used to create the Glob - String() string - // IsNegative returns whether the pattern was negated (prefixed with !) - IsNegative() bool -} - -// Glob is a glob pattern that has been compiled into a regular expression. -type globImpl struct { - *regexp.Regexp - // The separator from options, escaped for appending to the regexBuffer (only available during parsing) - // The input pattern - globPattern string - // State only available during parsing - parserState *parserState - // Set to true if the pattern was negated - negated bool -} - -// Options modify the behaviour of Glob parsing -type Options struct { - // The character used to split path components - Separator rune - // Set to false to allow any prefix before the glob match - MatchAtStart bool - // Set to false to allow any suffix after the glob match - MatchAtEnd bool -} - -// DefaultOptions are a default set of Options that uses a forward slash as a separator, and require a full match -var DefaultOptions = &Options{ - Separator: '/', - MatchAtStart: true, - MatchAtEnd: true, -} - -func (g *globImpl) String() string { - return g.globPattern -} - -func (g *globImpl) IsNegative() bool { - return g.negated -} - -func popLastToken(state *parserState) *processedToken { - state.processedTokens = state.processedTokens[:len(state.processedTokens)-1] - if len(state.processedTokens) > 0 { - return &state.processedTokens[len(state.processedTokens)-1] - } - return &processedToken{} -} - -// Compile parses the given glob pattern and convertes it to a Glob. If no options are given, the DefaultOptions are -// used. -func Compile(pattern string, options *Options) (Glob, error) { - pattern = strings.TrimSpace(pattern) - reader := strings.NewReader(pattern) - - if options == nil { - options = DefaultOptions - } else { - // Check that the separator is not an expander - for _, expander := range expanders { - if options.Separator == expander { - return nil, fmt.Errorf("'%s' is not allowed as a separator", string(options.Separator)) - } - } - } - - state := &parserState{ - options: options, - escapedSeparator: escapeRegexComponent(string(options.Separator)), - processedTokens: make([]processedToken, 0, 10), - } - glob := &globImpl{ - Regexp: nil, - globPattern: pattern, - negated: false, - parserState: state, - } - - regexBuf := new(bytes.Buffer) - if options.MatchAtStart { - regexBuf.WriteRune('^') - } - - var err error - // Transform into a regular expression pattern - // 1. Parse negation prefixes - err = parseNegation(reader, glob) - if err != nil { - return nil, err - } - - // 2. Tokenise and convert! - tokeniser := newGlobTokeniser(reader, options) - lastProcessedToken := &processedToken{} - for tokeniser.Scan() { - if err = tokeniser.Err(); err != nil { - return nil, err - } - - token, tokenType := tokeniser.Token() - t := processedToken{ - contents: nil, - tokenType: tokenType, - } - - // Special cases - if tokenType == tcGlobStar && tokeniser.Peek() { - // If this is a globstar and the next token is a separator, consume it (the globstar pattern itself includes - // a separator) - if err = tokeniser.PeekErr(); err != nil { - return nil, err - } - _, peekedType := tokeniser.PeekToken() - if peekedType == tcSeparator { - tokeniser.Scan() - } - } - if tokenType == tcGlobStar && lastProcessedToken.tokenType == tcGlobStar { - // If the last token was a globstar and this is too, remove the last. We don't remove this globstar because - // it may now be the last in the pattern, which is special - lastProcessedToken = popLastToken(state) - } - if tokenType == tcGlobStar && lastProcessedToken.tokenType == tcSeparator && !tokeniser.Peek() { - // If this is the last token, and it's a globstar, remove a preceeding separator - lastProcessedToken = popLastToken(state) - } - - t.contents, err = processToken(token, tokenType, glob, tokeniser) - if err != nil { - return nil, err - } - - lastProcessedToken = &t - state.processedTokens = append(state.processedTokens, t) - } - - for _, t := range state.processedTokens { - t.contents.WriteTo(regexBuf) - } - - if options.MatchAtEnd { - regexBuf.WriteRune('$') - } - - regexString := regexBuf.String() - Logger.Tracef("[ohmyglob:Glob] Compiled \"%s\" to regex `%s` (negated: %v)", pattern, regexString, glob.negated) - re, err := regexp.Compile(regexString) - if err != nil { - return nil, err - } - - glob.parserState = nil - glob.Regexp = re - - return glob, nil -} - -func parseNegation(r io.RuneScanner, glob *globImpl) error { - for { - char, _, err := r.ReadRune() - - if err != nil { - return err - } else if char == '!' { - glob.negated = !glob.negated - } else { - r.UnreadRune() - return nil - } - } -} - -func processToken(token string, tokenType tc, glob *globImpl, tokeniser *globTokeniser) (*bytes.Buffer, error) { - state := glob.parserState - buf := new(bytes.Buffer) - - switch tokenType { - case tcGlobStar: - // Globstars also take care of surrounding separators; separator components before and after a globstar are - // suppressed - isLast := !tokeniser.Peek() - buf.WriteString("(?:") - if isLast && len(glob.parserState.processedTokens) > 0 { - buf.WriteString(state.escapedSeparator) - } - buf.WriteString(".+") - if !isLast { - buf.WriteString(state.escapedSeparator) - } - buf.WriteString(")?") - case tcStar: - buf.WriteString("[^") - buf.WriteString(state.escapedSeparator) - buf.WriteString("]*") - case tcAny: - buf.WriteString("[^") - buf.WriteString(state.escapedSeparator) - buf.WriteString("]") - case tcSeparator: - buf.WriteString(state.escapedSeparator) - case tcLiteral: - buf.WriteString(escapeRegexComponent(token)) - } - - return buf, nil -} diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/globset.go b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/globset.go deleted file mode 100644 index c4ea744b..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/globset.go +++ /dev/null @@ -1,112 +0,0 @@ -package ohmyglob - -import ( - "io" - "strings" -) - -// GlobSet represents an ordered set of Globs, and has the same matching capabilities as a Glob. Globbing is done -// in order, with later globs taking precedence over earlier globs in the set. A GlobSet is immutable. -type GlobSet interface { - GlobMatcher - // Globs returns the ordered Glob objects contained within the set - Globs() []Glob - // String returns the patterns used to create the GlobSet - String() string - // MatchingGlob returns the Glob that matches the specified pattern (or does not match, in the case of a negative - // glob) - MatchingGlob(b []byte) Glob - // AllMatchingGlobs returns all Globs that match the specified pattern (or do not match, in the case of a negative - // glob) - AllMatchingGlobs(b []byte) []Glob -} - -type globSetImpl []Glob - -func (g globSetImpl) String() string { - strs := make([]string, len(g)) - for i, glob := range g { - strs[i] = glob.String() - } - return strings.Join(strs, ", ") -} - -func (g globSetImpl) Globs() []Glob { - globs := make([]Glob, 0, len(g)) - globs = append(globs, g...) - return globs -} - -func (g globSetImpl) MatchingGlob(b []byte) Glob { - // By iterating in reverse order, we can bail early if we get a match further down. If we iterated in normal order, - // we would HAVE to check every glob - for i := len(g) - 1; i >= 0; i-- { - glob := g[i] - matches := glob.Match(b) - if matches { - Logger.Tracef("[ohmyglob:GlobSet] %s matched to %s", string(b), glob.String()) - return glob - } - } - - return nil -} - -func (g globSetImpl) AllMatchingGlobs(b []byte) []Glob { - result := []Glob(nil) - for _, glob := range g { - if glob.Match(b) { - Logger.Tracef("[ohmyglob:GlobSet] %s matched to %s", string(b), glob.String()) - result = append(result, glob) - } - } - return result -} - -func (g globSetImpl) Match(b []byte) bool { - glob := g.MatchingGlob(b) - return glob != nil && !glob.IsNegative() -} - -func (g globSetImpl) MatchReader(r io.RuneReader) bool { - // Drain the reader and convert to a byte array - b := make([]byte, 0, 10) - for { - rn, _, err := r.ReadRune() - if err == io.EOF { - break - } else if err != nil { - return false - } - b = append(b, byte(rn)) - } - - return g.Match(b) -} - -func (g globSetImpl) MatchString(s string) bool { - return g.Match([]byte(s)) -} - -// NewGlobSet constructs a GlobSet from a slice of Globs. -func NewGlobSet(globs []Glob) (GlobSet, error) { - set := make(globSetImpl, len(globs)) - for i, glob := range globs { - set[i] = glob - } - return set, nil -} - -// CompileGlobSet constructs a GlobSet from a slice of strings, which will be compiled individually to Globs. -func CompileGlobSet(patterns []string, options *Options) (GlobSet, error) { - globs := make(globSetImpl, len(patterns)) - for i, pattern := range patterns { - glob, err := Compile(pattern, options) - if err != nil { - return nil, err - } - globs[i] = glob - } - - return globs, nil -} diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/tokeniser.go b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/tokeniser.go deleted file mode 100644 index d263406a..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/tokeniser.go +++ /dev/null @@ -1,176 +0,0 @@ -package ohmyglob - -import ( - "bytes" - "io" -) - -type tc uint8 - -const ( - // An unknown component; returned if there is an error scanning or there are no more tokens - tcUnknown = tc(0x0) - // A string literal - tcLiteral = tc(0x1) - // An Escaper - tcEscaper = tc(0x2) - // Any characters, aside from the separator - tcStar = tc(0x3) - // A globstar component (tc = type component) - tcGlobStar = tc(0x4) - // Any single character, aside from the separator - tcAny = tc(0x5) - // A separator - tcSeparator = tc(0x6) -) - -// Tokenises a glob input; implements an API very similar to that of bufio.Scanner (though is not identical) -type globTokeniser struct { - input io.RuneScanner - globOptions *Options - token string - tokenType tc - err error - hasPeek bool - peekToken string - peekTokenType tc - peekErr error -} - -func newGlobTokeniser(input io.RuneScanner, globOptions *Options) *globTokeniser { - return &globTokeniser{ - input: input, - globOptions: globOptions, - } -} - -// Advances by a single token -func (g *globTokeniser) parse(lastTokenType tc) (string, tc, error) { - var err error - - tokenBuf := new(bytes.Buffer) - tokenType := tcUnknown - escaped := lastTokenType == tcEscaper - - for { - var r rune - r, _, err = g.input.ReadRune() - if err != nil { - break - } - - runeType := tcUnknown - switch r { - case Escaper: - runeType = tcEscaper - case '*': - if tokenType == tcStar { - runeType = tcGlobStar - tokenType = tcGlobStar - } else { - runeType = tcStar - } - case '?': - runeType = tcAny - case g.globOptions.Separator: - runeType = tcSeparator - default: - runeType = tcLiteral - } - - if escaped { - // If the last token was an Escaper, this MUST be a literal - runeType = tcLiteral - escaped = false - } - - if (tokenType != tcUnknown) && (tokenType != runeType) { - // We've stumbled into the next token; backtrack - g.input.UnreadRune() - break - } - - tokenType = runeType - tokenBuf.WriteRune(r) - - if tokenType == tcEscaper || - tokenType == tcGlobStar || - tokenType == tcAny || - tokenType == tcSeparator { - // These tokens are standalone; continued consumption must be a separate token - break - } - } - - if err == io.EOF && tokenType != tcUnknown { - // If we have a token, we can't have an EOF: we want the EOF on the next pass - err = nil - } - - if err != nil { - return "", tcUnknown, err - } - - if tokenType == tcEscaper { - // Escapers should never be yielded; recurse to find the next token - return g.parse(tokenType) - } - - return tokenBuf.String(), tokenType, err -} - -// Scan advances the tokeniser to the next token, which will then be available through the Token method. It returns -// false when the tokenisation stops, either by reaching the end of the input or an error. After Scan returns false, -// the Err method will return any error that occurred during scanning, except that if it was io.EOF, Err will return -// nil. -func (g *globTokeniser) Scan() bool { - if g.hasPeek { - g.token, g.tokenType, g.err = g.peekToken, g.peekTokenType, g.peekErr - } else { - g.token, g.tokenType, g.err = g.parse(g.tokenType) - } - - g.peekErr = nil - g.peekToken = "" - g.peekTokenType = tcUnknown - g.hasPeek = false - return g.err == nil -} - -// Peek peeks to the next token, making it available as PeekToken(). Next time Scan() is called it will advance the -// tokeniser to the peeked token. If there is already a peaked token, it will not advance. -func (g *globTokeniser) Peek() bool { - if !g.hasPeek { - g.peekToken, g.peekTokenType, g.peekErr = g.parse(g.tokenType) - g.hasPeek = true - } - - return g.peekErr == nil -} - -// Err returns the first non-EOF error that was encountered by the tokeniser -func (g *globTokeniser) Err() error { - if g.err == io.EOF { - return nil - } - - return g.err -} - -func (g *globTokeniser) Token() (token string, tokenType tc) { - return g.token, g.tokenType -} - -// PeekToken returns the peeked token -func (g *globTokeniser) PeekToken() (token string, tokenType tc) { - return g.peekToken, g.peekTokenType -} - -// PeekErr returns the error that will be returned by Err() next time Scan() is called. Peek() must be called first. -func (g *globTokeniser) PeekErr() error { - if g.peekErr == io.EOF { - return nil - } - - return g.peekErr -} diff --git a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/utils.go b/pkg/c8y/vendor/github.com/obeattie/ohmyglob/utils.go deleted file mode 100644 index 0586c8f9..00000000 --- a/pkg/c8y/vendor/github.com/obeattie/ohmyglob/utils.go +++ /dev/null @@ -1,116 +0,0 @@ -package ohmyglob - -import ( - "bufio" - "bytes" - "regexp" - "strings" - "unicode/utf8" -) - -var escapeNeededCharRegex = regexp.MustCompile(`[-\/\\^$*+?.()|[\]{}]`) -var runesToEscape []rune - -func init() { - runesToEscape = make([]rune, len(expanders)) -} - -// Escapes any characters that would have special meaning in a regular expression, returning the escaped string -func escapeRegexComponent(str string) string { - return escapeNeededCharRegex.ReplaceAllString(str, "\\$0") -} - -// separatorsScanner returns a split function for a scanner that returns tokens delimited any of the specified runes. -// Note that the delimiters themselves are counted as tokens, so callers who want to discard the separators must do this -// themselves. -func separatorsScanner(separators []rune) func(data []byte, atEOF bool) (int, []byte, error) { - return func(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - - // Transform the separators into a map (for efficient lookup) - seps := make(map[rune]bool) - for _, r := range separators { - seps[r] = true - } - - // Scan until separator, marking the end of a token - for width, i := 0, 0; i < len(data); i += width { - var r rune - r, width = utf8.DecodeRune(data[i:]) - if seps[r] { - if i == 0 { - // Separator token - return i + width, data[0 : i+width], nil - } - - // Normal token - return i, data[0:i], nil - } - } - - // If we're at EOF, we have a final, non-empty, non-terminated token: return it - if atEOF && len(data) > 0 { - return len(data), data[0:], nil - } - - // Request more data - return 0, nil, nil - } -} - -// EscapeGlobComponent returns an escaped version of the passed string, ensuring a literal match when used in a pattern. -func EscapeGlobComponent(component string, options *Options) string { - if options == nil { - options = DefaultOptions - } - - runesToEscape := make([]rune, 0, len(expanders)+1) - runesToEscape = append(runesToEscape, expanders...) - runesToEscape = append(runesToEscape, options.Separator) - - runesToEscapeMap := make(map[string]bool, len(runesToEscape)) - for _, r := range runesToEscape { - runesToEscapeMap[string(r)] = true - } - - scanner := bufio.NewScanner(strings.NewReader(component)) - scanner.Split(separatorsScanner(runesToEscape)) - buf := new(bytes.Buffer) - for scanner.Scan() { - component := scanner.Text() - if runesToEscapeMap[component] { - buf.WriteRune(Escaper) - } - buf.WriteString(component) - } - - return buf.String() -} - -// EscapeGlobString returns an escaped version of the passed string, ensuring a literal match of its components. -// As distinct to EscapeGlobComponent, it will not escape the separator -func EscapeGlobString(gs string, options *Options) string { - if options == nil { - options = DefaultOptions - } - - runesToEscapeMap := make(map[string]bool, len(expanders)) - for _, r := range expanders { - runesToEscapeMap[string(r)] = true - } - - scanner := bufio.NewScanner(strings.NewReader(gs)) - scanner.Split(separatorsScanner(expanders)) - buf := new(bytes.Buffer) - for scanner.Scan() { - part := scanner.Text() - if runesToEscapeMap[part] { - buf.WriteRune(Escaper) - } - buf.WriteString(part) - } - - return buf.String() -} diff --git a/pkg/c8y/vendor/github.com/pkg/errors/.gitignore b/pkg/c8y/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/pkg/c8y/vendor/github.com/pkg/errors/.travis.yml b/pkg/c8y/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index d4b92663..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -script: - - go test -v ./... diff --git a/pkg/c8y/vendor/github.com/pkg/errors/LICENSE b/pkg/c8y/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e7..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/c8y/vendor/github.com/pkg/errors/README.md b/pkg/c8y/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 9cc14902..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := io.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/pkg/c8y/vendor/github.com/pkg/errors/appveyor.yml b/pkg/c8y/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/pkg/c8y/vendor/github.com/pkg/errors/errors.go b/pkg/c8y/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 36b9857b..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// # Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := io.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// # Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// # Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// # Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/pkg/c8y/vendor/github.com/pkg/errors/stack.go b/pkg/c8y/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 2874a048..00000000 --- a/pkg/c8y/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,147 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/.travis.yml b/pkg/c8y/vendor/github.com/tidwall/gjson/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/gjson/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/LICENSE b/pkg/c8y/vendor/github.com/tidwall/gjson/LICENSE deleted file mode 100644 index 58f5819a..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/gjson/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Josh Baker - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/README.md b/pkg/c8y/vendor/github.com/tidwall/gjson/README.md deleted file mode 100644 index 70240d99..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/gjson/README.md +++ /dev/null @@ -1,401 +0,0 @@ -

-GJSON -
-Build Status -GoDoc -GJSON Playground -

- - - -

get json values quickly

- -GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document. -It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines). - -Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool. - -Getting Started -=============== - -## Installing - -To start using GJSON, install Go and run `go get`: - -```sh -$ go get -u github.com/tidwall/gjson -``` - -This will retrieve the library. - -## Get a value -Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately. - -```go -package main - -import "github.com/tidwall/gjson" - -const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` - -func main() { - value := gjson.Get(json, "name.last") - println(value.String()) -} -``` - -This will print: - -``` -Prichard -``` -*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* - -## Path Syntax - -A path is a series of keys separated by a dot. -A key may contain special wildcard characters '\*' and '?'. -To access an array value use the index as the key. -To get the number of elements in an array or to access a child path, use the '#' character. -The dot and wildcard characters can be escaped with '\\'. - -```json -{ - "name": {"first": "Tom", "last": "Anderson"}, - "age":37, - "children": ["Sara","Alex","Jack"], - "fav.movie": "Deer Hunter", - "friends": [ - {"first": "Dale", "last": "Murphy", "age": 44}, - {"first": "Roger", "last": "Craig", "age": 68}, - {"first": "Jane", "last": "Murphy", "age": 47} - ] -} -``` -``` -"name.last" >> "Anderson" -"age" >> 37 -"children" >> ["Sara","Alex","Jack"] -"children.#" >> 3 -"children.1" >> "Alex" -"child*.2" >> "Jack" -"c?ildren.0" >> "Sara" -"fav\.movie" >> "Deer Hunter" -"friends.#.first" >> ["Dale","Roger","Jane"] -"friends.1.last" >> "Craig" -``` - -You can also query an array for the first match by using `#[...]`, or find all matches with `#[...]#`. -Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators and the simple pattern matching `%` operator. - -``` -friends.#[last=="Murphy"].first >> "Dale" -friends.#[last=="Murphy"]#.first >> ["Dale","Jane"] -friends.#[age>45]#.last >> ["Craig","Murphy"] -friends.#[first%"D*"].last >> "Murphy" -``` - -## JSON Lines - -There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array. - -For example: - -``` -{"name": "Gilbert", "age": 61} -{"name": "Alexa", "age": 34} -{"name": "May", "age": 57} -{"name": "Deloise", "age": 44} -``` - -``` -..# >> 4 -..1 >> {"name": "Alexa", "age": 34} -..3 >> {"name": "Deloise", "age": 44} -..#.name >> ["Gilbert","Alexa","May","Deloise"] -..#[name="May"].age >> 57 -``` - -The `ForEachLines` function will iterate through JSON lines. - -```go -gjson.ForEachLine(json, func(line gjson.Result) bool{ - println(line.String()) - return true -}) -``` - -## Result Type - -GJSON supports the json types `string`, `number`, `bool`, and `null`. -Arrays and Objects are returned as their raw json types. - -The `Result` type holds one of these: - -``` -bool, for JSON booleans -float64, for JSON numbers -string, for JSON string literals -nil, for JSON null -``` - -To directly access the value: - -```go -result.Type // can be String, Number, True, False, Null, or JSON -result.Str // holds the string -result.Num // holds the float64 number -result.Raw // holds the raw json -result.Index // index of raw value in original json, zero means index unknown -``` - -There are a variety of handy functions that work on a result: - -```go -result.Exists() bool -result.Value() interface{} -result.Int() int64 -result.Uint() uint64 -result.Float() float64 -result.String() string -result.Bool() bool -result.Time() time.Time -result.Array() []gjson.Result -result.Map() map[string]gjson.Result -result.Get(path string) Result -result.ForEach(iterator func(key, value Result) bool) -result.Less(token Result, caseSensitive bool) bool -``` - -The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: - -The `result.Array()` function returns back an array of values. -If the result represents a non-existent value, then an empty array will be returned. -If the result is not a JSON array, the return value will be an array containing one result. - -```go -boolean >> bool -number >> float64 -string >> string -null >> nil -array >> []interface{} -object >> map[string]interface{} -``` - -### 64-bit integers - -The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers. - -```go -result.Int() int64 // -9223372036854775808 to 9223372036854775807 -result.Uint() int64 // 0 to 18446744073709551615 -``` - -## Get nested array values - -Suppose you want all the last names from the following json: - -```json -{ - "programmers": [ - { - "firstName": "Janet", - "lastName": "McLaughlin", - }, { - "firstName": "Elliotte", - "lastName": "Hunter", - }, { - "firstName": "Jason", - "lastName": "Harold", - } - ] -} -``` - -You would use the path "programmers.#.lastName" like such: - -```go -result := gjson.Get(json, "programmers.#.lastName") -for _, name := range result.Array() { - println(name.String()) -} -``` - -You can also query an object inside an array: - -```go -name := gjson.Get(json, `programmers.#[lastName="Hunter"].firstName`) -println(name.String()) // prints "Elliotte" -``` - -## Iterate through an object or array - -The `ForEach` function allows for quickly iterating through an object or array. -The key and value are passed to the iterator function for objects. -Only the value is passed for arrays. -Returning `false` from an iterator will stop iteration. - -```go -result := gjson.Get(json, "programmers") -result.ForEach(func(key, value gjson.Result) bool { - println(value.String()) - return true // keep iterating -}) -``` - -## Simple Parse and Get - -There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. - -For example, all of these will return the same result: - -```go -gjson.Parse(json).Get("name").Get("last") -gjson.Get(json, "name").Get("last") -gjson.Get(json, "name.last") -``` - -## Check for the existence of a value - -Sometimes you just want to know if a value exists. - -```go -value := gjson.Get(json, "name.last") -if !value.Exists() { - println("no last name") -} else { - println(value.String()) -} - -// Or as one step -if gjson.Get(json, "name.last").Exists() { - println("has a last name") -} -``` - -## Validate JSON - -The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results. - -If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON. - -```go -if !gjson.Valid(json) { - return errors.New("invalid json") -} -value := gjson.Get(json, "name.last") -``` - -## Unmarshal to a map - -To unmarshal to a `map[string]interface{}`: - -```go -m, ok := gjson.Parse(json).Value().(map[string]interface{}) -if !ok { - // not a map -} -``` - -## Working with Bytes - -If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. - -```go -var json []byte = ... -result := gjson.GetBytes(json, path) -``` - -If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: - -```go -var json []byte = ... -result := gjson.GetBytes(json, path) -var raw []byte -if result.Index > 0 { - raw = json[result.Index:result.Index+len(result.Raw)] -} else { - raw = []byte(result.Raw) -} -``` - -This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. - -## Get multiple values at once - -The `GetMany` function can be used to get multiple values at the same time. - -```go -results := gjson.GetMany(json, "name.first", "name.last", "age") -``` - -The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. - -## Performance - -Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), -[ffjson](https://github.com/pquerna/ffjson), -[EasyJSON](https://github.com/mailru/easyjson), -[jsonparser](https://github.com/buger/jsonparser), -and [json-iterator](https://github.com/json-iterator/go) - -``` -BenchmarkGJSONGet-8 3000000 372 ns/op 0 B/op 0 allocs/op -BenchmarkGJSONUnmarshalMap-8 900000 4154 ns/op 1920 B/op 26 allocs/op -BenchmarkJSONUnmarshalMap-8 600000 9019 ns/op 3048 B/op 69 allocs/op -BenchmarkJSONDecoder-8 300000 14120 ns/op 4224 B/op 184 allocs/op -BenchmarkFFJSONLexer-8 1500000 3111 ns/op 896 B/op 8 allocs/op -BenchmarkEasyJSONLexer-8 3000000 887 ns/op 613 B/op 6 allocs/op -BenchmarkJSONParserGet-8 3000000 499 ns/op 21 B/op 0 allocs/op -BenchmarkJSONIterator-8 3000000 812 ns/op 544 B/op 9 allocs/op -``` - -JSON document used: - -```json -{ - "widget": { - "debug": "on", - "window": { - "title": "Sample Konfabulator Widget", - "name": "main_window", - "width": 500, - "height": 500 - }, - "image": { - "src": "Images/Sun.png", - "hOffset": 250, - "vOffset": 250, - "alignment": "center" - }, - "text": { - "data": "Click Here", - "size": 36, - "style": "bold", - "vOffset": 100, - "alignment": "center", - "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" - } - } -} -``` - -Each operation was rotated though one of the following search paths: - -``` -widget.window.name -widget.image.hOffset -widget.text.onMouseUp -``` - -*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.8 and can be be found [here](https://github.com/tidwall/gjson-benchmarks).* - - -## Contact -Josh Baker [@tidwall](http://twitter.com/tidwall) - -## License - -GJSON source code is available under the MIT [License](/LICENSE). diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/gjson.go b/pkg/c8y/vendor/github.com/tidwall/gjson/gjson.go deleted file mode 100644 index 74515ed5..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/gjson/gjson.go +++ /dev/null @@ -1,2110 +0,0 @@ -// Package gjson provides searching for json strings. -package gjson - -import ( - "encoding/base64" - "encoding/json" - "errors" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf16" - "unicode/utf8" - - "github.com/tidwall/match" -) - -// Type is Result type -type Type int - -const ( - // Null is a null json value - Null Type = iota - // False is a json false boolean - False - // Number is json number - Number - // String is a json string - String - // True is a json true boolean - True - // JSON is a raw block of JSON - JSON -) - -// String returns a string representation of the type. -func (t Type) String() string { - switch t { - default: - return "" - case Null: - return "Null" - case False: - return "False" - case Number: - return "Number" - case String: - return "String" - case True: - return "True" - case JSON: - return "JSON" - } -} - -// Result represents a json value that is returned from Get(). -type Result struct { - // Type is the json type - Type Type - // Raw is the raw json - Raw string - // Str is the json string - Str string - // Num is the json number - Num float64 - // Index of raw value in original json, zero means index unknown - Index int -} - -// String returns a string representation of the value. -func (t Result) String() string { - switch t.Type { - default: - return "" - case False: - return "false" - case Number: - if len(t.Raw) == 0 { - // calculated result - return strconv.FormatFloat(t.Num, 'f', -1, 64) - } - var i int - if t.Raw[0] == '-' { - i++ - } - for ; i < len(t.Raw); i++ { - if t.Raw[i] < '0' || t.Raw[i] > '9' { - return strconv.FormatFloat(t.Num, 'f', -1, 64) - } - } - return t.Raw - case String: - return t.Str - case JSON: - return t.Raw - case True: - return "true" - } -} - -// Bool returns an boolean representation. -func (t Result) Bool() bool { - switch t.Type { - default: - return false - case True: - return true - case String: - return t.Str != "" && t.Str != "0" && t.Str != "false" - case Number: - return t.Num != 0 - } -} - -// Int returns an integer representation. -func (t Result) Int() int64 { - switch t.Type { - default: - return 0 - case True: - return 1 - case String: - n, _ := parseInt(t.Str) - return n - case Number: - // try to directly convert the float64 to int64 - n, ok := floatToInt(t.Num) - if !ok { - // now try to parse the raw string - n, ok = parseInt(t.Raw) - if !ok { - // fallback to a standard conversion - return int64(t.Num) - } - } - return n - } -} - -// Uint returns an unsigned integer representation. -func (t Result) Uint() uint64 { - switch t.Type { - default: - return 0 - case True: - return 1 - case String: - n, _ := parseUint(t.Str) - return n - case Number: - // try to directly convert the float64 to uint64 - n, ok := floatToUint(t.Num) - if !ok { - // now try to parse the raw string - n, ok = parseUint(t.Raw) - if !ok { - // fallback to a standard conversion - return uint64(t.Num) - } - } - return n - } -} - -// Float returns an float64 representation. -func (t Result) Float() float64 { - switch t.Type { - default: - return 0 - case True: - return 1 - case String: - n, _ := strconv.ParseFloat(t.Str, 64) - return n - case Number: - return t.Num - } -} - -// Time returns a time.Time representation. -func (t Result) Time() time.Time { - res, _ := time.Parse(time.RFC3339, t.String()) - return res -} - -// Array returns back an array of values. -// If the result represents a non-existent value, then an empty array will be returned. -// If the result is not a JSON array, the return value will be an array containing one result. -func (t Result) Array() []Result { - if t.Type == Null { - return []Result{} - } - if t.Type != JSON { - return []Result{t} - } - r := t.arrayOrMap('[', false) - return r.a -} - -// IsObject returns true if the result value is a JSON object. -func (t Result) IsObject() bool { - return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' -} - -// IsArray returns true if the result value is a JSON array. -func (t Result) IsArray() bool { - return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' -} - -// ForEach iterates through values. -// If the result represents a non-existent value, then no values will be iterated. -// If the result is an Object, the iterator will pass the key and value of each item. -// If the result is an Array, the iterator will only pass the value of each item. -// If the result is not a JSON array or object, the iterator will pass back one value equal to the result. -func (t Result) ForEach(iterator func(key, value Result) bool) { - if !t.Exists() { - return - } - if t.Type != JSON { - iterator(Result{}, t) - return - } - json := t.Raw - var keys bool - var i int - var key, value Result - for ; i < len(json); i++ { - if json[i] == '{' { - i++ - key.Type = String - keys = true - break - } else if json[i] == '[' { - i++ - break - } - if json[i] > ' ' { - return - } - } - var str string - var vesc bool - var ok bool - for ; i < len(json); i++ { - if keys { - if json[i] != '"' { - continue - } - s := i - i, str, vesc, ok = parseString(json, i+1) - if !ok { - return - } - if vesc { - key.Str = unescape(str[1 : len(str)-1]) - } else { - key.Str = str[1 : len(str)-1] - } - key.Raw = str - key.Index = s - } - for ; i < len(json); i++ { - if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { - continue - } - break - } - s := i - i, value, ok = parseAny(json, i, true) - if !ok { - return - } - value.Index = s - if !iterator(key, value) { - return - } - } -} - -// Map returns back an map of values. The result should be a JSON array. -func (t Result) Map() map[string]Result { - if t.Type != JSON { - return map[string]Result{} - } - r := t.arrayOrMap('{', false) - return r.o -} - -// Get searches result for the specified path. -// The result should be a JSON array or object. -func (t Result) Get(path string) Result { - return Get(t.Raw, path) -} - -type arrayOrMapResult struct { - a []Result - ai []interface{} - o map[string]Result - oi map[string]interface{} - vc byte -} - -func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { - var json = t.Raw - var i int - var value Result - var count int - var key Result - if vc == 0 { - for ; i < len(json); i++ { - if json[i] == '{' || json[i] == '[' { - r.vc = json[i] - i++ - break - } - if json[i] > ' ' { - goto end - } - } - } else { - for ; i < len(json); i++ { - if json[i] == vc { - i++ - break - } - if json[i] > ' ' { - goto end - } - } - r.vc = vc - } - if r.vc == '{' { - if valueize { - r.oi = make(map[string]interface{}) - } else { - r.o = make(map[string]Result) - } - } else { - if valueize { - r.ai = make([]interface{}, 0) - } else { - r.a = make([]Result, 0) - } - } - for ; i < len(json); i++ { - if json[i] <= ' ' { - continue - } - // get next value - if json[i] == ']' || json[i] == '}' { - break - } - switch json[i] { - default: - if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { - value.Type = Number - value.Raw, value.Num = tonum(json[i:]) - value.Str = "" - } else { - continue - } - case '{', '[': - value.Type = JSON - value.Raw = squash(json[i:]) - value.Str, value.Num = "", 0 - case 'n': - value.Type = Null - value.Raw = tolit(json[i:]) - value.Str, value.Num = "", 0 - case 't': - value.Type = True - value.Raw = tolit(json[i:]) - value.Str, value.Num = "", 0 - case 'f': - value.Type = False - value.Raw = tolit(json[i:]) - value.Str, value.Num = "", 0 - case '"': - value.Type = String - value.Raw, value.Str = tostr(json[i:]) - value.Num = 0 - } - i += len(value.Raw) - 1 - - if r.vc == '{' { - if count%2 == 0 { - key = value - } else { - if valueize { - if _, ok := r.oi[key.Str]; !ok { - r.oi[key.Str] = value.Value() - } - } else { - if _, ok := r.o[key.Str]; !ok { - r.o[key.Str] = value - } - } - } - count++ - } else { - if valueize { - r.ai = append(r.ai, value.Value()) - } else { - r.a = append(r.a, value) - } - } - } -end: - return -} - -// Parse parses the json and returns a result. -// -// This function expects that the json is well-formed, and does not validate. -// Invalid json will not panic, but it may return back unexpected results. -// If you are consuming JSON from an unpredictable source then you may want to -// use the Valid function first. -func Parse(json string) Result { - var value Result - for i := 0; i < len(json); i++ { - if json[i] == '{' || json[i] == '[' { - value.Type = JSON - value.Raw = json[i:] // just take the entire raw - break - } - if json[i] <= ' ' { - continue - } - switch json[i] { - default: - if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { - value.Type = Number - value.Raw, value.Num = tonum(json[i:]) - } else { - return Result{} - } - case 'n': - value.Type = Null - value.Raw = tolit(json[i:]) - case 't': - value.Type = True - value.Raw = tolit(json[i:]) - case 'f': - value.Type = False - value.Raw = tolit(json[i:]) - case '"': - value.Type = String - value.Raw, value.Str = tostr(json[i:]) - } - break - } - return value -} - -// ParseBytes parses the json and returns a result. -// If working with bytes, this method preferred over Parse(string(data)) -func ParseBytes(json []byte) Result { - return Parse(string(json)) -} - -func squash(json string) string { - // expects that the lead character is a '[' or '{' - // squash the value, ignoring all nested arrays and objects. - // the first '[' or '{' has already been read - depth := 1 - for i := 1; i < len(json); i++ { - if json[i] >= '"' && json[i] <= '}' { - switch json[i] { - case '"': - i++ - s2 := i - for ; i < len(json); i++ { - if json[i] > '\\' { - continue - } - if json[i] == '"' { - // look for an escaped slash - if json[i-1] == '\\' { - n := 0 - for j := i - 2; j > s2-1; j-- { - if json[j] != '\\' { - break - } - n++ - } - if n%2 == 0 { - continue - } - } - break - } - } - case '{', '[': - depth++ - case '}', ']': - depth-- - if depth == 0 { - return json[:i+1] - } - } - } - } - return json -} - -func tonum(json string) (raw string, num float64) { - for i := 1; i < len(json); i++ { - // less than dash might have valid characters - if json[i] <= '-' { - if json[i] <= ' ' || json[i] == ',' { - // break on whitespace and comma - raw = json[:i] - num, _ = strconv.ParseFloat(raw, 64) - return - } - // could be a '+' or '-'. let's assume so. - continue - } - if json[i] < ']' { - // probably a valid number - continue - } - if json[i] == 'e' || json[i] == 'E' { - // allow for exponential numbers - continue - } - // likely a ']' or '}' - raw = json[:i] - num, _ = strconv.ParseFloat(raw, 64) - return - } - raw = json - num, _ = strconv.ParseFloat(raw, 64) - return -} - -func tolit(json string) (raw string) { - for i := 1; i < len(json); i++ { - if json[i] < 'a' || json[i] > 'z' { - return json[:i] - } - } - return json -} - -func tostr(json string) (raw string, str string) { - // expects that the lead character is a '"' - for i := 1; i < len(json); i++ { - if json[i] > '\\' { - continue - } - if json[i] == '"' { - return json[:i+1], json[1:i] - } - if json[i] == '\\' { - i++ - for ; i < len(json); i++ { - if json[i] > '\\' { - continue - } - if json[i] == '"' { - // look for an escaped slash - if json[i-1] == '\\' { - n := 0 - for j := i - 2; j > 0; j-- { - if json[j] != '\\' { - break - } - n++ - } - if n%2 == 0 { - continue - } - } - break - } - } - var ret string - if i+1 < len(json) { - ret = json[:i+1] - } else { - ret = json[:i] - } - return ret, unescape(json[1:i]) - } - } - return json, json[1:] -} - -// Exists returns true if value exists. -// -// if gjson.Get(json, "name.last").Exists(){ -// println("value exists") -// } -func (t Result) Exists() bool { - return t.Type != Null || len(t.Raw) != 0 -} - -// Value returns one of these types: -// -// bool, for JSON booleans -// float64, for JSON numbers -// Number, for JSON numbers -// string, for JSON string literals -// nil, for JSON null -// map[string]interface{}, for JSON objects -// []interface{}, for JSON arrays -// -func (t Result) Value() interface{} { - if t.Type == String { - return t.Str - } - switch t.Type { - default: - return nil - case False: - return false - case Number: - return t.Num - case JSON: - r := t.arrayOrMap(0, true) - if r.vc == '{' { - return r.oi - } else if r.vc == '[' { - return r.ai - } - return nil - case True: - return true - } -} - -func parseString(json string, i int) (int, string, bool, bool) { - var s = i - for ; i < len(json); i++ { - if json[i] > '\\' { - continue - } - if json[i] == '"' { - return i + 1, json[s-1 : i+1], false, true - } - if json[i] == '\\' { - i++ - for ; i < len(json); i++ { - if json[i] > '\\' { - continue - } - if json[i] == '"' { - // look for an escaped slash - if json[i-1] == '\\' { - n := 0 - for j := i - 2; j > 0; j-- { - if json[j] != '\\' { - break - } - n++ - } - if n%2 == 0 { - continue - } - } - return i + 1, json[s-1 : i+1], true, true - } - } - break - } - } - return i, json[s-1:], false, false -} - -func parseNumber(json string, i int) (int, string) { - var s = i - i++ - for ; i < len(json); i++ { - if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || json[i] == '}' { - return i, json[s:i] - } - } - return i, json[s:] -} - -func parseLiteral(json string, i int) (int, string) { - var s = i - i++ - for ; i < len(json); i++ { - if json[i] < 'a' || json[i] > 'z' { - return i, json[s:i] - } - } - return i, json[s:] -} - -type arrayPathResult struct { - part string - path string - more bool - alogok bool - arrch bool - alogkey string - query struct { - on bool - path string - op string - value string - all bool - } -} - -func parseArrayPath(path string) (r arrayPathResult) { - for i := 0; i < len(path); i++ { - if path[i] == '.' { - r.part = path[:i] - r.path = path[i+1:] - r.more = true - return - } - if path[i] == '#' { - r.arrch = true - if i == 0 && len(path) > 1 { - if path[1] == '.' { - r.alogok = true - r.alogkey = path[2:] - r.path = path[:1] - } else if path[1] == '[' { - r.query.on = true - // query - i += 2 - // whitespace - for ; i < len(path); i++ { - if path[i] > ' ' { - break - } - } - s := i - for ; i < len(path); i++ { - if path[i] <= ' ' || - path[i] == '!' || - path[i] == '=' || - path[i] == '<' || - path[i] == '>' || - path[i] == '%' || - path[i] == ']' { - break - } - } - r.query.path = path[s:i] - // whitespace - for ; i < len(path); i++ { - if path[i] > ' ' { - break - } - } - if i < len(path) { - s = i - if path[i] == '!' { - if i < len(path)-1 && path[i+1] == '=' { - i++ - } - } else if path[i] == '<' || path[i] == '>' { - if i < len(path)-1 && path[i+1] == '=' { - i++ - } - } else if path[i] == '=' { - if i < len(path)-1 && path[i+1] == '=' { - s++ - i++ - } - } - i++ - r.query.op = path[s:i] - // whitespace - for ; i < len(path); i++ { - if path[i] > ' ' { - break - } - } - s = i - for ; i < len(path); i++ { - if path[i] == '"' { - i++ - s2 := i - for ; i < len(path); i++ { - if path[i] > '\\' { - continue - } - if path[i] == '"' { - // look for an escaped slash - if path[i-1] == '\\' { - n := 0 - for j := i - 2; j > s2-1; j-- { - if path[j] != '\\' { - break - } - n++ - } - if n%2 == 0 { - continue - } - } - break - } - } - } else if path[i] == ']' { - if i+1 < len(path) && path[i+1] == '#' { - r.query.all = true - } - break - } - } - if i > len(path) { - i = len(path) - } - v := path[s:i] - for len(v) > 0 && v[len(v)-1] <= ' ' { - v = v[:len(v)-1] - } - r.query.value = v - } - } - } - continue - } - } - r.part = path - r.path = "" - return -} - -type objectPathResult struct { - part string - path string - wild bool - more bool -} - -func parseObjectPath(path string) (r objectPathResult) { - for i := 0; i < len(path); i++ { - if path[i] == '.' { - r.part = path[:i] - r.path = path[i+1:] - r.more = true - return - } - if path[i] == '*' || path[i] == '?' { - r.wild = true - continue - } - if path[i] == '\\' { - // go into escape mode. this is a slower path that - // strips off the escape character from the part. - epart := []byte(path[:i]) - i++ - if i < len(path) { - epart = append(epart, path[i]) - i++ - for ; i < len(path); i++ { - if path[i] == '\\' { - i++ - if i < len(path) { - epart = append(epart, path[i]) - } - continue - } else if path[i] == '.' { - r.part = string(epart) - r.path = path[i+1:] - r.more = true - return - } else if path[i] == '*' || path[i] == '?' { - r.wild = true - } - epart = append(epart, path[i]) - } - } - // append the last part - r.part = string(epart) - return - } - } - r.part = path - return -} - -func parseSquash(json string, i int) (int, string) { - // expects that the lead character is a '[' or '{' - // squash the value, ignoring all nested arrays and objects. - // the first '[' or '{' has already been read - s := i - i++ - depth := 1 - for ; i < len(json); i++ { - if json[i] >= '"' && json[i] <= '}' { - switch json[i] { - case '"': - i++ - s2 := i - for ; i < len(json); i++ { - if json[i] > '\\' { - continue - } - if json[i] == '"' { - // look for an escaped slash - if json[i-1] == '\\' { - n := 0 - for j := i - 2; j > s2-1; j-- { - if json[j] != '\\' { - break - } - n++ - } - if n%2 == 0 { - continue - } - } - break - } - } - case '{', '[': - depth++ - case '}', ']': - depth-- - if depth == 0 { - i++ - return i, json[s:i] - } - } - } - } - return i, json[s:] -} - -func parseObject(c *parseContext, i int, path string) (int, bool) { - var pmatch, kesc, vesc, ok, hit bool - var key, val string - rp := parseObjectPath(path) - for i < len(c.json) { - for ; i < len(c.json); i++ { - if c.json[i] == '"' { - // parse_key_string - // this is slightly different from getting s string value - // because we don't need the outer quotes. - i++ - var s = i - for ; i < len(c.json); i++ { - if c.json[i] > '\\' { - continue - } - if c.json[i] == '"' { - i, key, kesc, ok = i+1, c.json[s:i], false, true - goto parse_key_string_done - } - if c.json[i] == '\\' { - i++ - for ; i < len(c.json); i++ { - if c.json[i] > '\\' { - continue - } - if c.json[i] == '"' { - // look for an escaped slash - if c.json[i-1] == '\\' { - n := 0 - for j := i - 2; j > 0; j-- { - if c.json[j] != '\\' { - break - } - n++ - } - if n%2 == 0 { - continue - } - } - i, key, kesc, ok = i+1, c.json[s:i], true, true - goto parse_key_string_done - } - } - break - } - } - key, kesc, ok = c.json[s:], false, false - parse_key_string_done: - break - } - if c.json[i] == '}' { - return i + 1, false - } - } - if !ok { - return i, false - } - if rp.wild { - if kesc { - pmatch = match.Match(unescape(key), rp.part) - } else { - pmatch = match.Match(key, rp.part) - } - } else { - if kesc { - pmatch = rp.part == unescape(key) - } else { - pmatch = rp.part == key - } - } - hit = pmatch && !rp.more - for ; i < len(c.json); i++ { - switch c.json[i] { - default: - continue - case '"': - i++ - i, val, vesc, ok = parseString(c.json, i) - if !ok { - return i, false - } - if hit { - if vesc { - c.value.Str = unescape(val[1 : len(val)-1]) - } else { - c.value.Str = val[1 : len(val)-1] - } - c.value.Raw = val - c.value.Type = String - return i, true - } - case '{': - if pmatch && !hit { - i, hit = parseObject(c, i+1, rp.path) - if hit { - return i, true - } - } else { - i, val = parseSquash(c.json, i) - if hit { - c.value.Raw = val - c.value.Type = JSON - return i, true - } - } - case '[': - if pmatch && !hit { - i, hit = parseArray(c, i+1, rp.path) - if hit { - return i, true - } - } else { - i, val = parseSquash(c.json, i) - if hit { - c.value.Raw = val - c.value.Type = JSON - return i, true - } - } - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i, val = parseNumber(c.json, i) - if hit { - c.value.Raw = val - c.value.Type = Number - c.value.Num, _ = strconv.ParseFloat(val, 64) - return i, true - } - case 't', 'f', 'n': - vc := c.json[i] - i, val = parseLiteral(c.json, i) - if hit { - c.value.Raw = val - switch vc { - case 't': - c.value.Type = True - case 'f': - c.value.Type = False - } - return i, true - } - } - break - } - } - return i, false -} -func queryMatches(rp *arrayPathResult, value Result) bool { - rpv := rp.query.value - if len(rpv) > 2 && rpv[0] == '"' && rpv[len(rpv)-1] == '"' { - rpv = rpv[1 : len(rpv)-1] - } - switch value.Type { - case String: - switch rp.query.op { - case "=": - return value.Str == rpv - case "!=": - return value.Str != rpv - case "<": - return value.Str < rpv - case "<=": - return value.Str <= rpv - case ">": - return value.Str > rpv - case ">=": - return value.Str >= rpv - case "%": - return match.Match(value.Str, rpv) - } - case Number: - rpvn, _ := strconv.ParseFloat(rpv, 64) - switch rp.query.op { - case "=": - return value.Num == rpvn - case "!=": - return value.Num != rpvn - case "<": - return value.Num < rpvn - case "<=": - return value.Num <= rpvn - case ">": - return value.Num > rpvn - case ">=": - return value.Num >= rpvn - } - case True: - switch rp.query.op { - case "=": - return rpv == "true" - case "!=": - return rpv != "true" - case ">": - return rpv == "false" - case ">=": - return true - } - case False: - switch rp.query.op { - case "=": - return rpv == "false" - case "!=": - return rpv != "false" - case "<": - return rpv == "true" - case "<=": - return true - } - } - return false -} -func parseArray(c *parseContext, i int, path string) (int, bool) { - var pmatch, vesc, ok, hit bool - var val string - var h int - var alog []int - var partidx int - var multires []byte - rp := parseArrayPath(path) - if !rp.arrch { - n, ok := parseUint(rp.part) - if !ok { - partidx = -1 - } else { - partidx = int(n) - } - } - for i < len(c.json)+1 { - if !rp.arrch { - pmatch = partidx == h - hit = pmatch && !rp.more - } - h++ - if rp.alogok { - alog = append(alog, i) - } - for ; ; i++ { - var ch byte - if i > len(c.json) { - break - } else if i == len(c.json) { - ch = ']' - } else { - ch = c.json[i] - } - switch ch { - default: - continue - case '"': - i++ - i, val, vesc, ok = parseString(c.json, i) - if !ok { - return i, false - } - if hit { - if rp.alogok { - break - } - if vesc { - c.value.Str = unescape(val[1 : len(val)-1]) - } else { - c.value.Str = val[1 : len(val)-1] - } - c.value.Raw = val - c.value.Type = String - return i, true - } - case '{': - if pmatch && !hit { - i, hit = parseObject(c, i+1, rp.path) - if hit { - if rp.alogok { - break - } - return i, true - } - } else { - i, val = parseSquash(c.json, i) - if rp.query.on { - res := Get(val, rp.query.path) - if queryMatches(&rp, res) { - if rp.more { - res = Get(val, rp.path) - } else { - res = Result{Raw: val, Type: JSON} - } - if rp.query.all { - if len(multires) == 0 { - multires = append(multires, '[') - } else { - multires = append(multires, ',') - } - multires = append(multires, res.Raw...) - } else { - c.value = res - return i, true - } - } - } else if hit { - if rp.alogok { - break - } - c.value.Raw = val - c.value.Type = JSON - return i, true - } - } - case '[': - if pmatch && !hit { - i, hit = parseArray(c, i+1, rp.path) - if hit { - if rp.alogok { - break - } - return i, true - } - } else { - i, val = parseSquash(c.json, i) - if hit { - if rp.alogok { - break - } - c.value.Raw = val - c.value.Type = JSON - return i, true - } - } - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i, val = parseNumber(c.json, i) - if hit { - if rp.alogok { - break - } - c.value.Raw = val - c.value.Type = Number - c.value.Num, _ = strconv.ParseFloat(val, 64) - return i, true - } - case 't', 'f', 'n': - vc := c.json[i] - i, val = parseLiteral(c.json, i) - if hit { - if rp.alogok { - break - } - c.value.Raw = val - switch vc { - case 't': - c.value.Type = True - case 'f': - c.value.Type = False - } - return i, true - } - case ']': - if rp.arrch && rp.part == "#" { - if rp.alogok { - var jsons = make([]byte, 0, 64) - jsons = append(jsons, '[') - - for j, k := 0, 0; j < len(alog); j++ { - _, res, ok := parseAny(c.json, alog[j], true) - if ok { - res := res.Get(rp.alogkey) - if res.Exists() { - if k > 0 { - jsons = append(jsons, ',') - } - jsons = append(jsons, []byte(res.Raw)...) - k++ - } - } - } - jsons = append(jsons, ']') - c.value.Type = JSON - c.value.Raw = string(jsons) - return i + 1, true - } - if rp.alogok { - break - } - c.value.Raw = "" - c.value.Type = Number - c.value.Num = float64(h - 1) - c.calcd = true - return i + 1, true - } - if len(multires) > 0 && !c.value.Exists() { - c.value = Result{ - Raw: string(append(multires, ']')), - Type: JSON, - } - } - return i + 1, false - } - break - } - } - return i, false -} - -// ForEachLine iterates through lines of JSON as specified by the JSON Lines -// format (http://jsonlines.org/). -// Each line is returned as a GJSON Result. -func ForEachLine(json string, iterator func(line Result) bool) { - var res Result - var i int - for { - i, res, _ = parseAny(json, i, true) - if !res.Exists() { - break - } - if !iterator(res) { - return - } - } -} - -type parseContext struct { - json string - value Result - calcd bool - lines bool -} - -// Get searches json for the specified path. -// A path is in dot syntax, such as "name.last" or "age". -// When the value is found it's returned immediately. -// -// A path is a series of keys searated by a dot. -// A key may contain special wildcard characters '*' and '?'. -// To access an array value use the index as the key. -// To get the number of elements in an array or to access a child path, use the '#' character. -// The dot and wildcard character can be escaped with '\'. -// -// { -// "name": {"first": "Tom", "last": "Anderson"}, -// "age":37, -// "children": ["Sara","Alex","Jack"], -// "friends": [ -// {"first": "James", "last": "Murphy"}, -// {"first": "Roger", "last": "Craig"} -// ] -// } -// "name.last" >> "Anderson" -// "age" >> 37 -// "children" >> ["Sara","Alex","Jack"] -// "children.#" >> 3 -// "children.1" >> "Alex" -// "child*.2" >> "Jack" -// "c?ildren.0" >> "Sara" -// "friends.#.first" >> ["James","Roger"] -// -// This function expects that the json is well-formed, and does not validate. -// Invalid json will not panic, but it may return back unexpected results. -// If you are consuming JSON from an unpredictable source then you may want to -// use the Valid function first. -func Get(json, path string) Result { - var i int - var c = &parseContext{json: json} - if len(path) >= 2 && path[0] == '.' && path[1] == '.' { - c.lines = true - parseArray(c, 0, path[2:]) - } else { - for ; i < len(c.json); i++ { - if c.json[i] == '{' { - i++ - parseObject(c, i, path) - break - } - if c.json[i] == '[' { - i++ - parseArray(c, i, path) - break - } - } - } - fillIndex(json, c) - return c.value -} - -// GetBytes searches json for the specified path. -// If working with bytes, this method preferred over Get(string(data), path) -func GetBytes(json []byte, path string) Result { - return getBytes(json, path) -} - -// runeit returns the rune from the the \uXXXX -func runeit(json string) rune { - n, _ := strconv.ParseUint(json[:4], 16, 64) - return rune(n) -} - -// unescape unescapes a string -func unescape(json string) string { //, error) { - var str = make([]byte, 0, len(json)) - for i := 0; i < len(json); i++ { - switch { - default: - str = append(str, json[i]) - case json[i] < ' ': - return string(str) - case json[i] == '\\': - i++ - if i >= len(json) { - return string(str) - } - switch json[i] { - default: - return string(str) - case '\\': - str = append(str, '\\') - case '/': - str = append(str, '/') - case 'b': - str = append(str, '\b') - case 'f': - str = append(str, '\f') - case 'n': - str = append(str, '\n') - case 'r': - str = append(str, '\r') - case 't': - str = append(str, '\t') - case '"': - str = append(str, '"') - case 'u': - if i+5 > len(json) { - return string(str) - } - r := runeit(json[i+1:]) - i += 5 - if utf16.IsSurrogate(r) { - // need another code - if len(json[i:]) >= 6 && json[i] == '\\' && json[i+1] == 'u' { - // we expect it to be correct so just consume it - r = utf16.DecodeRune(r, runeit(json[i+2:])) - i += 6 - } - } - // provide enough space to encode the largest utf8 possible - str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) - n := utf8.EncodeRune(str[len(str)-8:], r) - str = str[:len(str)-8+n] - i-- // backtrack index by one - } - } - } - return string(str) -} - -// Less return true if a token is less than another token. -// The caseSensitive paramater is used when the tokens are Strings. -// The order when comparing two different type is: -// -// Null < False < Number < String < True < JSON -// -func (t Result) Less(token Result, caseSensitive bool) bool { - if t.Type < token.Type { - return true - } - if t.Type > token.Type { - return false - } - if t.Type == String { - if caseSensitive { - return t.Str < token.Str - } - return stringLessInsensitive(t.Str, token.Str) - } - if t.Type == Number { - return t.Num < token.Num - } - return t.Raw < token.Raw -} - -func stringLessInsensitive(a, b string) bool { - for i := 0; i < len(a) && i < len(b); i++ { - if a[i] >= 'A' && a[i] <= 'Z' { - if b[i] >= 'A' && b[i] <= 'Z' { - // both are uppercase, do nothing - if a[i] < b[i] { - return true - } else if a[i] > b[i] { - return false - } - } else { - // a is uppercase, convert a to lowercase - if a[i]+32 < b[i] { - return true - } else if a[i]+32 > b[i] { - return false - } - } - } else if b[i] >= 'A' && b[i] <= 'Z' { - // b is uppercase, convert b to lowercase - if a[i] < b[i]+32 { - return true - } else if a[i] > b[i]+32 { - return false - } - } else { - // neither are uppercase - if a[i] < b[i] { - return true - } else if a[i] > b[i] { - return false - } - } - } - return len(a) < len(b) -} - -// parseAny parses the next value from a json string. -// A Result is returned when the hit param is set. -// The return values are (i int, res Result, ok bool) -func parseAny(json string, i int, hit bool) (int, Result, bool) { - var res Result - var val string - for ; i < len(json); i++ { - if json[i] == '{' || json[i] == '[' { - i, val = parseSquash(json, i) - if hit { - res.Raw = val - res.Type = JSON - } - return i, res, true - } - if json[i] <= ' ' { - continue - } - switch json[i] { - case '"': - i++ - var vesc bool - var ok bool - i, val, vesc, ok = parseString(json, i) - if !ok { - return i, res, false - } - if hit { - res.Type = String - res.Raw = val - if vesc { - res.Str = unescape(val[1 : len(val)-1]) - } else { - res.Str = val[1 : len(val)-1] - } - } - return i, res, true - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i, val = parseNumber(json, i) - if hit { - res.Raw = val - res.Type = Number - res.Num, _ = strconv.ParseFloat(val, 64) - } - return i, res, true - case 't', 'f', 'n': - vc := json[i] - i, val = parseLiteral(json, i) - if hit { - res.Raw = val - switch vc { - case 't': - res.Type = True - case 'f': - res.Type = False - } - return i, res, true - } - } - } - return i, res, false -} - -var ( // used for testing - testWatchForFallback bool - testLastWasFallback bool -) - -// GetMany searches json for the multiple paths. -// The return value is a Result array where the number of items -// will be equal to the number of input paths. -func GetMany(json string, path ...string) []Result { - res := make([]Result, len(path)) - for i, path := range path { - res[i] = Get(json, path) - } - return res -} - -// GetManyBytes searches json for the multiple paths. -// The return value is a Result array where the number of items -// will be equal to the number of input paths. -func GetManyBytes(json []byte, path ...string) []Result { - return GetMany(string(json), path...) -} - -var fieldsmu sync.RWMutex -var fields = make(map[string]map[string]int) - -func assign(jsval Result, goval reflect.Value) { - if jsval.Type == Null { - return - } - switch goval.Kind() { - default: - case reflect.Ptr: - if !goval.IsNil() { - newval := reflect.New(goval.Elem().Type()) - assign(jsval, newval.Elem()) - goval.Elem().Set(newval.Elem()) - } else { - newval := reflect.New(goval.Type().Elem()) - assign(jsval, newval.Elem()) - goval.Set(newval) - } - case reflect.Struct: - fieldsmu.RLock() - sf := fields[goval.Type().String()] - fieldsmu.RUnlock() - if sf == nil { - fieldsmu.Lock() - sf = make(map[string]int) - for i := 0; i < goval.Type().NumField(); i++ { - f := goval.Type().Field(i) - tag := strings.Split(f.Tag.Get("json"), ",")[0] - if tag != "-" { - if tag != "" { - sf[tag] = i - sf[f.Name] = i - } else { - sf[f.Name] = i - } - } - } - fields[goval.Type().String()] = sf - fieldsmu.Unlock() - } - jsval.ForEach(func(key, value Result) bool { - if idx, ok := sf[key.Str]; ok { - f := goval.Field(idx) - if f.CanSet() { - assign(value, f) - } - } - return true - }) - case reflect.Slice: - if goval.Type().Elem().Kind() == reflect.Uint8 && jsval.Type == String { - data, _ := base64.StdEncoding.DecodeString(jsval.String()) - goval.Set(reflect.ValueOf(data)) - } else { - jsvals := jsval.Array() - slice := reflect.MakeSlice(goval.Type(), len(jsvals), len(jsvals)) - for i := 0; i < len(jsvals); i++ { - assign(jsvals[i], slice.Index(i)) - } - goval.Set(slice) - } - case reflect.Array: - i, n := 0, goval.Len() - jsval.ForEach(func(_, value Result) bool { - if i == n { - return false - } - assign(value, goval.Index(i)) - i++ - return true - }) - case reflect.Map: - if goval.Type().Key().Kind() == reflect.String && goval.Type().Elem().Kind() == reflect.Interface { - goval.Set(reflect.ValueOf(jsval.Value())) - } - case reflect.Interface: - goval.Set(reflect.ValueOf(jsval.Value())) - case reflect.Bool: - goval.SetBool(jsval.Bool()) - case reflect.Float32, reflect.Float64: - goval.SetFloat(jsval.Float()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - goval.SetInt(jsval.Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - goval.SetUint(jsval.Uint()) - case reflect.String: - goval.SetString(jsval.String()) - } - if len(goval.Type().PkgPath()) > 0 { - v := goval.Addr() - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - u.UnmarshalJSON([]byte(jsval.Raw)) - } - } - } -} - -var validate uintptr = 1 - -// UnmarshalValidationEnabled provides the option to disable JSON validation -// during the Unmarshal routine. Validation is enabled by default. -// -// Deprecated: Use encoder/json.Unmarshal instead -func UnmarshalValidationEnabled(enabled bool) { - if enabled { - atomic.StoreUintptr(&validate, 1) - } else { - atomic.StoreUintptr(&validate, 0) - } -} - -// Unmarshal loads the JSON data into the value pointed to by v. -// -// This function works almost identically to json.Unmarshal except that -// gjson.Unmarshal will automatically attempt to convert JSON values to any Go -// type. For example, the JSON string "100" or the JSON number 100 can be equally -// assigned to Go string, int, byte, uint64, etc. This rule applies to all types. -// -// Deprecated: Use encoder/json.Unmarshal instead -func Unmarshal(data []byte, v interface{}) error { - if atomic.LoadUintptr(&validate) == 1 { - _, ok := validpayload(data, 0) - if !ok { - return errors.New("invalid json") - } - } - if v := reflect.ValueOf(v); v.Kind() == reflect.Ptr { - assign(ParseBytes(data), v) - } - return nil -} - -func validpayload(data []byte, i int) (outi int, ok bool) { - for ; i < len(data); i++ { - switch data[i] { - default: - i, ok = validany(data, i) - if !ok { - return i, false - } - for ; i < len(data); i++ { - switch data[i] { - default: - return i, false - case ' ', '\t', '\n', '\r': - continue - } - } - return i, true - case ' ', '\t', '\n', '\r': - continue - } - } - return i, false -} -func validany(data []byte, i int) (outi int, ok bool) { - for ; i < len(data); i++ { - switch data[i] { - default: - return i, false - case ' ', '\t', '\n', '\r': - continue - case '{': - return validobject(data, i+1) - case '[': - return validarray(data, i+1) - case '"': - return validstring(data, i+1) - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return validnumber(data, i+1) - case 't': - return validtrue(data, i+1) - case 'f': - return validfalse(data, i+1) - case 'n': - return validnull(data, i+1) - } - } - return i, false -} -func validobject(data []byte, i int) (outi int, ok bool) { - for ; i < len(data); i++ { - switch data[i] { - default: - return i, false - case ' ', '\t', '\n', '\r': - continue - case '}': - return i + 1, true - case '"': - key: - if i, ok = validstring(data, i+1); !ok { - return i, false - } - if i, ok = validcolon(data, i); !ok { - return i, false - } - if i, ok = validany(data, i); !ok { - return i, false - } - if i, ok = validcomma(data, i, '}'); !ok { - return i, false - } - if data[i] == '}' { - return i + 1, true - } - i++ - for ; i < len(data); i++ { - switch data[i] { - default: - return i, false - case ' ', '\t', '\n', '\r': - continue - case '"': - goto key - } - } - return i, false - } - } - return i, false -} -func validcolon(data []byte, i int) (outi int, ok bool) { - for ; i < len(data); i++ { - switch data[i] { - default: - return i, false - case ' ', '\t', '\n', '\r': - continue - case ':': - return i + 1, true - } - } - return i, false -} -func validcomma(data []byte, i int, end byte) (outi int, ok bool) { - for ; i < len(data); i++ { - switch data[i] { - default: - return i, false - case ' ', '\t', '\n', '\r': - continue - case ',': - return i, true - case end: - return i, true - } - } - return i, false -} -func validarray(data []byte, i int) (outi int, ok bool) { - for ; i < len(data); i++ { - switch data[i] { - default: - for ; i < len(data); i++ { - if i, ok = validany(data, i); !ok { - return i, false - } - if i, ok = validcomma(data, i, ']'); !ok { - return i, false - } - if data[i] == ']' { - return i + 1, true - } - } - case ' ', '\t', '\n', '\r': - continue - case ']': - return i + 1, true - } - } - return i, false -} -func validstring(data []byte, i int) (outi int, ok bool) { - for ; i < len(data); i++ { - if data[i] < ' ' { - return i, false - } else if data[i] == '\\' { - i++ - if i == len(data) { - return i, false - } - switch data[i] { - default: - return i, false - case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': - case 'u': - for j := 0; j < 4; j++ { - i++ - if i >= len(data) { - return i, false - } - if !((data[i] >= '0' && data[i] <= '9') || - (data[i] >= 'a' && data[i] <= 'f') || - (data[i] >= 'A' && data[i] <= 'F')) { - return i, false - } - } - } - } else if data[i] == '"' { - return i + 1, true - } - } - return i, false -} -func validnumber(data []byte, i int) (outi int, ok bool) { - i-- - // sign - if data[i] == '-' { - i++ - } - // int - if i == len(data) { - return i, false - } - if data[i] == '0' { - i++ - } else { - for ; i < len(data); i++ { - if data[i] >= '0' && data[i] <= '9' { - continue - } - break - } - } - // frac - if i == len(data) { - return i, true - } - if data[i] == '.' { - i++ - if i == len(data) { - return i, false - } - if data[i] < '0' || data[i] > '9' { - return i, false - } - i++ - for ; i < len(data); i++ { - if data[i] >= '0' && data[i] <= '9' { - continue - } - break - } - } - // exp - if i == len(data) { - return i, true - } - if data[i] == 'e' || data[i] == 'E' { - i++ - if i == len(data) { - return i, false - } - if data[i] == '+' || data[i] == '-' { - i++ - } - if i == len(data) { - return i, false - } - if data[i] < '0' || data[i] > '9' { - return i, false - } - i++ - for ; i < len(data); i++ { - if data[i] >= '0' && data[i] <= '9' { - continue - } - break - } - } - return i, true -} - -func validtrue(data []byte, i int) (outi int, ok bool) { - if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && data[i+2] == 'e' { - return i + 3, true - } - return i, false -} -func validfalse(data []byte, i int) (outi int, ok bool) { - if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && data[i+2] == 's' && data[i+3] == 'e' { - return i + 4, true - } - return i, false -} -func validnull(data []byte, i int) (outi int, ok bool) { - if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && data[i+2] == 'l' { - return i + 3, true - } - return i, false -} - -// Valid returns true if the input is valid json. -// -// if !gjson.Valid(json) { -// return errors.New("invalid json") -// } -// value := gjson.Get(json, "name.last") -// -func Valid(json string) bool { - _, ok := validpayload([]byte(json), 0) - return ok -} - -// ValidBytes returns true if the input is valid json. -// -// if !gjson.Valid(json) { -// return errors.New("invalid json") -// } -// value := gjson.Get(json, "name.last") -// -// If working with bytes, this method preferred over Valid(string(data)) -// -func ValidBytes(json []byte) bool { - _, ok := validpayload(json, 0) - return ok -} - -func parseUint(s string) (n uint64, ok bool) { - var i int - if i == len(s) { - return 0, false - } - for ; i < len(s); i++ { - if s[i] >= '0' && s[i] <= '9' { - n = n*10 + uint64(s[i]-'0') - } else { - return 0, false - } - } - return n, true -} - -func parseInt(s string) (n int64, ok bool) { - var i int - var sign bool - if len(s) > 0 && s[0] == '-' { - sign = true - i++ - } - if i == len(s) { - return 0, false - } - for ; i < len(s); i++ { - if s[i] >= '0' && s[i] <= '9' { - n = n*10 + int64(s[i]-'0') - } else { - return 0, false - } - } - if sign { - return n * -1, true - } - return n, true -} - -const minUint53 = 0 -const maxUint53 = 4503599627370495 -const minInt53 = -2251799813685248 -const maxInt53 = 2251799813685247 - -func floatToUint(f float64) (n uint64, ok bool) { - n = uint64(f) - if float64(n) == f && n >= minUint53 && n <= maxUint53 { - return n, true - } - return 0, false -} - -func floatToInt(f float64) (n int64, ok bool) { - n = int64(f) - if float64(n) == f && n >= minInt53 && n <= maxInt53 { - return n, true - } - return 0, false -} diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/gjson_gae.go b/pkg/c8y/vendor/github.com/tidwall/gjson/gjson_gae.go deleted file mode 100644 index cbe2ab42..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/gjson/gjson_gae.go +++ /dev/null @@ -1,10 +0,0 @@ -//+build appengine - -package gjson - -func getBytes(json []byte, path string) Result { - return Get(string(json), path) -} -func fillIndex(json string, c *parseContext) { - // noop. Use zero for the Index value. -} diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/gjson_ngae.go b/pkg/c8y/vendor/github.com/tidwall/gjson/gjson_ngae.go deleted file mode 100644 index ff313a78..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/gjson/gjson_ngae.go +++ /dev/null @@ -1,73 +0,0 @@ -//+build !appengine - -package gjson - -import ( - "reflect" - "unsafe" -) - -// getBytes casts the input json bytes to a string and safely returns the -// results as uniquely allocated data. This operation is intended to minimize -// copies and allocations for the large json string->[]byte. -func getBytes(json []byte, path string) Result { - var result Result - if json != nil { - // unsafe cast to string - result = Get(*(*string)(unsafe.Pointer(&json)), path) - result = fromBytesGet(result) - } - return result -} - -func fromBytesGet(result Result) Result { - // safely get the string headers - rawhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Raw)) - strhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Str)) - // create byte slice headers - rawh := reflect.SliceHeader{Data: rawhi.Data, Len: rawhi.Len} - strh := reflect.SliceHeader{Data: strhi.Data, Len: strhi.Len} - if strh.Data == 0 { - // str is nil - if rawh.Data == 0 { - // raw is nil - result.Raw = "" - } else { - // raw has data, safely copy the slice header to a string - result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) - } - result.Str = "" - } else if rawh.Data == 0 { - // raw is nil - result.Raw = "" - // str has data, safely copy the slice header to a string - result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) - } else if strh.Data >= rawh.Data && - int(strh.Data)+strh.Len <= int(rawh.Data)+rawh.Len { - // Str is a substring of Raw. - start := int(strh.Data - rawh.Data) - // safely copy the raw slice header - result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) - // substring the raw - result.Str = result.Raw[start : start+strh.Len] - } else { - // safely copy both the raw and str slice headers to strings - result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) - result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) - } - return result -} - -// fillIndex finds the position of Raw data and assigns it to the Index field -// of the resulting value. If the position cannot be found then Index zero is -// used instead. -func fillIndex(json string, c *parseContext) { - if len(c.value.Raw) > 0 && !c.calcd { - jhdr := *(*reflect.StringHeader)(unsafe.Pointer(&json)) - rhdr := *(*reflect.StringHeader)(unsafe.Pointer(&(c.value.Raw))) - c.value.Index = int(rhdr.Data - jhdr.Data) - if c.value.Index < 0 || c.value.Index >= len(json) { - c.value.Index = 0 - } - } -} diff --git a/pkg/c8y/vendor/github.com/tidwall/gjson/logo.png b/pkg/c8y/vendor/github.com/tidwall/gjson/logo.png deleted file mode 100644 index 17a8bbe9d651e5d79cbd59a3645a152443440ad6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15936 zcmdseRa9I})9xU_0znfrFi3EBcXtB8-Q8tyw?MEE+&u)>;O-g-!QF!n?wrm0ecyln zi+`Pqb92@@b204MboZ|AuIj4isoIgsic)AO1SlX72u(&>{38ei4_w2jBEbT8g#0}> zfj{`J5}K~64(6^NM$Tp+5mN_aGq8-Ek%ieuGb2+^ry(I^6HUo1oax$2(u&{u+covw&X$WR|Y3j=W4v9v&Wy9&C&b&K688JUl#1%&bhTtPDU41{Y6zS0f06 zy$kt&Mi4i1F>$tXbhUD@2fvJHWbEMPDnJHE`mZV2IVvdp*TD8J|6V9y$(SHUj!Z0! z%uIH6FZ24RwTtUVv;Qr||Jd3^&C}70>7$v8gPXGnupj2+|LF{@-T(PPFAV`{c$J*3 zfK4&76?ZUkvoo`Il@S*p1OCHkYGukR$|buuylm3H z<}7aJY~^ldD(UQC2mWW3d9D5jDR|g;*tw0_nHkJkjZ7HWS$Vh_jJR2u8Q6HtIgQO& zSd4j$Sjqm~-}Jw&-oLaIxC6|@@jn9bnzJ+W7@M$}GZ-0hn=-JlaPctk7_+f5@NjXm zahsTP8Jln!kud=xGQB9ye^aFY+yb=o6ILj1BiHWiOsHjwSN(6-S#&IQQX(?!~oRWla6k zvj3m#lk>9cCv!wc6L6R*c~`T5NT-O!e%&k`LRLWCk5V)xyH@qD}#ba7*ern zD^KwwL26x@_H9n^_xJEwj%`LF z2_M&3l{$q7;~URr@StEw0z3u!#j3ZdE5%$S1w0iM^K2@_(3KW1T0xFBPH3#1nB2_f zvBMJo!z+(+E_6G$0(R-JL<$OXHd^e=iug&_x%->yfVBZnFV3sIHbvZbAFwXaW7AoKsJ9QG z>$+mQV_~P4&z8jBcSvrf!^$eZC-BSslJQJOm4Sir>t!xQeA0}meB*bk78Ll6JYotw zFN_tm1+Ok3dz)A2wN_Xinfj#Ee?*6SsMzd_g;n4n@*KNU9c5|uUDdVb5;09jU=BA8 z7Ip^CUs>Q(aGI&ybtyH$7K|L2U(LdFslcD0RJYaHH%xx{O^TZmsdabO$yfZSTX|)r z=-oR$toP)*A6N)!Ci&fYk#B|NMo}VC;kw_(;DJ=HK8MnXxCRjx zm+Ele5%M~`%RXZFhRaja zofW}k`izaw_BnN>(4NODngeC2ev#$a0Za@}FunO>tS9?CYq6*J5?4v-!SCmNEg}($ zsiIfoly7`UXRh?F`!)YcJa>S4yLz&L#;kL&6#~9Ni+?|0z}yLl+KUVh*{UN zQL82>H7J0_BwzsZbE2-owg@HT^}ncXoZ;dh4@Ag(^&lQPK)^E=LOJs3m1N!Uj9F54V7Fy*9;Hf!S| z2Vtfjj8{AZ6|UJ&-*wZR;=h8&K-WF?$U44F^rNysF*k#YfwM3ww(UIiz!K$Vl6g^; zpZSmDI41>YtUMi>*8?muaBUxB;C6#-g+)6l$2v@q$uZDbJ6wES8#l*s2D<1?VzXJ$ zNn3AE*NNnAtmKenlM+7=mM9>ZV6zb+`lI$2@hpIeP1DdcS*Cvz5A~9(XQ5ee8Zy?1 zV$H!Cd=InD(OPcd;^t`I|2d8dNC%ws6z&4#gegDv>rH+oz!8Nz>NP}eD-R;bVvA0S z5fJb?Ou@|fK(P*e**ICmfISbcs}Y$fZuREW@ZFBDNYmhXW7PA6V7+}jLHzU1y=p!n z^hyRvQ|hIqYYoin6oO1NuT}m&C#3Y93YZnNA2Tz$8cr96%FkEFIxLhO1c8xa?YS>1 zYfbRvnrv%W@wwZlMg$41zv!F1Uthy~PJ0n;XM;%WG#G1Z(D~^_heW34*YaC}4fwaY zQ_|5S2@Q6+L&grf$wpF2KXm2n`%skl-*5HsEQC3gz~7nJ8i!$efQc!-p`}FGdT|bp zjc+K291ok>nAU4-{|as^#|^q`l>3ommlA=!Yk*~f4lIlN?BhIKO<)tnThs?ySx%(Q zZ{BqMNZOg7QO=}w219Yn;z5ayrjclN12jx{->DqdA?>)@B*M55Z6*>9uOVG^g4+gB$$ z&!XEFAyF0vK@Hi45CHqg@K*IEb;v3e@Qc{QP%M+qSM^o+6flzV`6>&uh)6U4UOl>Y zUko)LSj?`l;=xRs21!m{!rvOi>)at<`A6r#TDg{HRsnKvk6eoC&S-x#Mq{7vf>j;i zpoU-IgJbwqGu*^D+T5dUG0a_I-j;HEIj*%e?#p&>hhO%ho;0*k&ga~7^2w21Ks!^N zJ88HQ;e-ECm0XE^;oQz-+JymauGTlg2o=jQD#D!&eneT>3#Y{`@=}~!gct3epErdK zU`nI?9GgnOgA%X>7A8>kKcDjv8zn1;!d*!BC&n1*8?K}4xYS*8J-uGJt;RUTVXir%{}C0=q5B02D<=xOiYxcn?4l1}Em4lV07IMeqs8t@G*e)leU z^LZ&bK;v9xiiAiYvcgAIY^ z{hsN<54t}2vf2`*A~p<)503a+JOop=cS87$R%0Y9b}n%u{HMx2QvUVcnksc$0lfh| zO+0575+Z)~?&@C8M99dsa?f$2%sd?hq|Ds&;G=9fK537ECE{4uO>1(q_^&|Vlh_Gr>&p4j#CTe1bR2DJ_0}1+G9&6IH~WZ{A=YBXE4q6-nx9bn^kCGD zKq`a|i1oH?5f(Dp2PUamK}6jL3(BY6_{%!xtDA#drfkJ=#M6{`HRjVF)bpUQq&Ef6 zyRCSNEr>MF%a_m1u9p-Py~pK8D7c5rSo^`&g9y&-@)_~;#h*R~7VEJ@5*>Y+Ig5{* z{KaPMCB3%XAcWYoc^46OK;ZSTx9HGO-}GxhBWm@2u4qc+2r6b_aBFo$7yfX_H3CEQ zaAx(~OTHSE^w9jPe|Db0^FFGfRAv|V-o14(YFFLKu2^KV)OYV$MilR}0%8L8mXrX# z?aXNsv{y&t%MTik%-A};(yQA&Eexy&Tel+t@?5=F>qE*mRdi^0cko1yYtB`K{ocK2 ze?^g1EeKeXFLrFjvhUiy$_GM26OFkX+qLLsXfuf_ zAiE6m$DGN>RDMk}<35n$|sHc(gYzBw5(l8USpk56G_tt5v zT2RPk>)0{R5nwf2>%09ZBb^t1O4UtbrXz%J=bQ7Aj>{^+T`g!YD&6-HAA&h~?oH{F z-*(;OvA=)%wE6M!TY|;d-~Kce;ebbI&U$_a!QI&6cp@;QC-Ynh;FOeB9wl}vAGWi_ zI6Eb)y%&;;oN5BIUcqEK(H~zuZ*l#_kGGu+l9nrvvTET;x{?m&Pz z)6(6l?`Nf+N{at87srvs@R$3YNC|1B5vn8BpnndfCUEtywUobq0}Eh3Y)W$a((OMt zeVTMUi_J2y7^X9|!b=N>O53xrkSYtG=HIjP+ixUv0p0}H|0JbRs^%hA!Jwk}>L&MM z80l=$8*p?p5nqzB2flUC+qS5co0cA{{~DgUCuuOfxzg5oVle|X6R|&z9HyjTBz7=W z6(hQ3!ou1{BT@o3I#0`zwh{7Z9D2aZ8IS0|LDbPB~{a9 zxp|-)J|}zIePx$CI@kZ{&VL_`9mP8HDU)6(_8QG5JF(05;fr7V+K5bA1Etfxgh#5W>6P@@ucg&Jzly%#@3VwSKV{@&j#^M( zTlz^^ZmfJ%6rHQ3%yEH?9(L!Qb(!Do<{dt5FB1GvxNV4LI{Pjr0R!MUrMb5A4pX>C zAxvpRy)idQ1E-@e{fUAX=xnu;V!H}&V4CM$%=yv*v=$M;MOYIcj@A-jG z?Rpip=$Wr@_3CPVC#tvr@>g2)Q4aUQ5cATrcEjnW2##{myh=>MLI$B&y6jSWY!swkxM01Od*SEg zL*Mh~ni#4$)U3y+i&vQhK|9qf!@Ie%77J-pfjx;IqN%|*VX(lO%{8>t0l{p^gB)j@ zJH57Z00N>gy2Swd*!!iFW0?|!a>O(rOZL2g&I>ppw~Od1Z5(JPKd=Q&^ICR7OvUvu z?=;QVzQ(dwYT=aA*Bg?3&P}yAThCa2|K}GvR-m-L*_!MseF=o;M zOc|!hnA_@||7VYZk>zZ=UNBMA*Vtu^yo#aiaXg2+Bh3A_ z*k(u*3VX`)Fpa0tfiD<{pqWjZHrb&h8UI8iP-1;S@Ctq-P$n^!{&NzMp(LvzzPbW5 z&{drM`nf{0E^A*AS;QZ4;qFHOENld#)$;q_vGmq_=O^r}ovg!Le`|uzGHczAnUBcG z`*F2&g&n(N8{#Biba+rq`+AT}7E##C*+mWAHcl~e8IF*BlD}ptBDDp7inFHgCu%CG zMQGj9<9umYwTdv9IK}7?I-Az9B4JJeXGNp&Y~9dg6EMByhM;=CJgzMOHPo|HyPwEF z>AJ~_8b(2SFF>9)iwy@AzPGmoydb?M{G9dKy6!c@N}i|Lt*7q!;Msy>FWlp16n&Oo zmA=5&{-W||a^yg79H=mpEMh=;B08fpy9ZhoVvt6ohvYk&9Dko*huy|_Vcj-#n-^%Y zs(?&;92eWq(+`)_??I^-a?v+g0KQKt*?l$Eba3dO7`iLIHz8VPfP?g+cWvw%=+?0$ zw8JC0o-q5rI(C{(+>M|Yd?#NqbO%~Z8MLGynQ3(_s2no9^BqZSUb>Vsq3+-&Z~pLd z_+I&t*zl@gEawwB70Gx!2LBP^FYe$`QL1{2+{*4>GM~87Lx9~C-tG~*?hRhhc=M|v zKz`3aEk0yz8Txxs?~Urdp+R0uG5?!T)2G;O-?IC#fm9y}er2kwt<)7;#|LG5_jh)$ z2Sc<@EHgtod#3K*7;KqZ%_)YTiJL+t8o!4XIkwUN@t`E$Yq7=AK*!U9H?3HzBPCK4 z1;{FwV^;P{uD18>%3tX3j%6R_MIMv&@f1V4k%w(DWcZccnI;9GVC^$jwAX_dysW1j zM`z~{y%DsP60N1h*GBx4LRqvFn|B3AG`4>4XHx@(`6tg$H8rQaVQ_b9%`q`{{4}r2 z=OP;`SSJVDA#p82i#aqb_VL+eJ|9?gaG4go0A~hZ1+MA=FDhbXUeMCt&#CDcTTP6} zxtZStJ{EiC|G-Fk6MeJpZ%|$9Rn@xfF(WyQBgyfy#vHNweKWz*P#EpEYl?H6KHpMe zvf?*tGh4Og7>Gv?^om*Qpu)Eyho@?PDF~@Ea^wN^&DqhE;aD3KU>Xh(01Gsi_yYOZ*mM@2DpdkOw<+vQpws53fo{ z&wajB`u(vzs_9!4?h&i8?CuR2u+G84+26IV*Kn`^r>fu|-veN(1Zn=_`3YN(XcZ|e1tk=THcWJbl zsYeS(c=;b95uXNv-?n*7v=S{~uMPFRje+fM2G~eIo<0k{7KhLSksr739qkp}2P;MF z#RHd6+;5fK#zzu#)US{5c4HuGKPuG*;1FXZM{@sop4IH+(rfX}ZIm^whc{X<>-WUx=Qlw6^^~L>&&#_wbjA%NM-BO- ziWfJ(=D?Q^MhL3 zhf&VlDz1)-0Zchw2k$O-icKDM^}Uif#Kk9m;R`tpuwOp5cG?~QR}6ZxbSN_q>1prP zakr4;NOzZ;Y^{A;uGpF=CV3`sPEq~4a=}w;)_$M_5ewbN6EZm-FnM6t)+yo$1Xzo8 zIpW+8HNz7 zlkMVkuwUHCF6C}Aw-E@>xDL3Tfbn_4oq*Rv>wJw&{jvDbvdHt}yMJ0Gb!csez+k{> zteQ9MH2^!E*)-B03O|-kOhidM>4!7jddvni6e+3Rhgmz{jFUl94}&j}0ME)Fp%mZ0 zXlE;DA$P!z`Ey7={LOdHkNp(zqOd+a5rWUY-&`B1w6#jF;9ooo#D?cdNf9r=4ZwBl zOS1FBam;Twsy90oaUIT{CrE+w=xr2`$no9-7C=scPgFEzAOBOZ<*$GeQ77bZwMSg@@7fXHj)VSZO3m{PYxnm7QPW=ydOs z{#chl`}vB`$gV*CX!a!p0D-WAVE#D;v=)%6LUt~fUM3C-2r&%X-h~3n92m=ymRYwXeipL(xBw46A4m}b zz`ipJfSi^h{{;9`Am@=3^@?4(vH)Plzr`-h3~f>NWL@{d&CLaZiG|xLcAC$!-`noB z4&_SEZ1pq&)UusTnZ1RoHFypZcN}#Eq#vv|MOIILF6sZa{{;$X363&PR_E3MC1@C^ zx;^*2YnYQiXAqu1qRWO2EB%sab?nau7YG4niID=(CE2JAH3TMJ{gmd4t!{42oMXA5 z=pHOdBnf^{06X{BZ;N29axO0saaETG~y-q@bv})p+406EHxk$8IMo93knK5R6iV?^+?4JEa=@BB{Wr;7$09%)WWpwdSJpc&gy?Xb#y=!13d4 znM6rJyf+X%lZo?o%SI9DF_A11@m+d`!3G!<29W9k_{MPczg;h)nTL@kmCpN_655)+ zV4#=PKa9~L!Z2&Ah&=5fCU1*LOJ_x$W27L$LOfiib%hP1*Z~zUp#+jQq7DbI-U%rP zk^T9Tl?*Eoupe)4-y&KT~+O##fDEImn4y>NutF;IVJ3=m*! z*rEW%TlVNiM?a>>U$u_$JuA>L6CZFXb!s@yP&g!R3gv~Bp}AgpK2=m0jV8VvIZv2C zmd^lRyVaiKaS$v6oYCt@$k#%)MsE3;z}yQ2L<8`{hpSucn4z6B`)k9mk!I(}zCa8Y zec8sli-a|~e-I8yodRg%#-Fe zUUq-YRYyjqA8u02I*uj{d;W>=)zk|nH^OPBH+T%!|4ND7zpZ*;!cs{XDZAc=)TpYy zygF?tJ_g{6^&8==X_&x=`hHuSa{O~;8JsY}g+Ry)97Lmf`m;+MhVx9uQw2LJB)YCN z@x%=aA>jGW@-K6kNsyeSxy?!*B;mpG3B8IhiJ`aL(iYA4>ejWW`Ba}KuFQ=qb|9UD zd1@S)Q6&PF^2(HexS@I93W4L)kM;M0t02}wjNHH_W_t5)TVGK6pzr>0Q^W#T7H~ar z!`O{mW@83Sl9!-P1c_I-0Gd=n(Al2Ah)MotJ8AbzM;HOuxA3PI2G5Xa4-KFnd(R<& zl~(O&-Jsc|!_LVQcPi|XD}`wzR1Xn(cG2(D_N3D~k*V(8k4i+i9K+ulxOuCy!l zG3U%zV9j+$hqsRH4K8qiX)Y%5kpNKe-@i7c^ly-X9EHpqEaj*3o>oZwj-~iM`20S0 zT+X3*f&BF+O!NkR{xf~GM<9^RlYs$K*d}c0boMm+LsFzfZDJ%7->?En1@nR69#5W9 zha7r+_<*>ozO^~mkB|Jz*z!F@=x~3DwK8pu~`vT=$gd#s-trjR06bA&FSRn zXosx!+Ar!=CCEZdm*mgGVygGr=a;bPp3`1$ojoLd5*z)h`E`i{b~Tk6I_M{*hrhdq z%km?-j{K^V=N^-HEYrs)!oparg}x|u^fEHypM^&WQ?T7d$N;k6=t`n8b5A;R7+fd< zweEqw_&I7h2Q`^|Nhl+xehysMZI!+Zv%IfOO*?!yw98BGB}51~t1J8V4~T$`Y~Rpg zC&EB-_kDWzSVdXeG7o$eVQN}aM@*<;RL@^rCgVF8kWxLDx|}q#y}Sgg z??KODbbqBH3$u%uWAZp`x1p_9+vh$RDq>q8DArehLiv#? zS7p(k_klZ2h^UNb09!*yqk7YKmjb|*xuZ{kcYnK_eZDQDgCKrv@~G^rCEkhh1KQ8t zT~&5d1+9jn{HLO`4hJ}XbT4W*8iQ!OH&R^30MdVf>goXR<52$G7-HwbKISxdG2?gX zUWamaU{hM>`{N6CBF`m|UO7f;#%00^HUQjv90sS3{=D}p=qL&|w9G5VQ=hdE?qpB* z8CVyn;Vhr1QP!CQcS~LOSrN!b4hgV%PU9lhY4qrPo;TID@g4v|;8auwD2G~SY|8TM zRd4Zg*tNyl*2c@A_Z{0+cYF_Pnwm+58d;%$qhs2q2XkwXMBxG|t)dcF{495+0LXAt z%=xkC)NYk>2z6<8b(io?P7{c7z!dMrujH=okhn5qE$`KV2r?QH33y;KVw{~_a|@d1 zgc0Z`b1>*sRl_Co)qBcExZwdxbAVy&napFK=rMOLMPFe2WYbbO%19sPa*w$o;yi)Q z){i?j*$ujpB;L{(m!Q9Sdnfg#tTM-#YfR{CBw}t`;qUq-!x&bmVfcN}-Po_I4}X}_ z^t5y1VNMmk9z)``Ic6iQsFY7DwU8AJM3TN>l5#qSx#b5zyk}gA%k0LtM)YSep6s(K^!|HPUZnNmZx2o z56)-Om(G2o{0FL>zz=R>3h!{P^*+b!qaH#4@l(IQkuiEnE!ScPnFiaVAO&1gyai|l zAdJrT+kK3Se%<(USIy}n=jk}2Ht2IX#e*edEZ~-uUSJSqWC~k*1T5@YJrKyPnw|X< z+W3-Y{WKrQdHr~R3#5&-TZ3zM;w6TqCbhn|Rj2*Z?S*6e-R?XXaRSs|oId=TVXLu{ zj|URNRPe7+kR)4)8|nkVLsFM7<=Vmf~HeV0cp1|XXS5L@wjNj5yKie+KA z%09g+z^n`}M7P_vg>2)FYXMz=YLgchlQ~%ARY;7G(ys;3HB-ed^wLOTkupT@Nfk&o zqI^Bl$76XEwk1?h=qO6mkigTz!Fhq&Y~rX|9n!A;SBB16W?nFjd=|4(?*6`uT>Om4 zq!ivu+WiqM6jPn_bYNMC+&k!sajqnXW6Bh6mHZwrOTOLYAspD>VRI&w0>>Ym%QSk7jV!gm~4K#@3Ekv@C& z1ZW^9I}1pIxbnRqUm@~9k$hFsgO$SclmV)dsY(@axX-x;K@08<7z*M;!)D+)GnB-h_pX#E4&BdOm}hnZK05O34%JkmnSPvltCp%pfh1YkKaB; z3r{eK1y}F0zteZ!VbKxN(mGE+RBO2}il&HvJsmW5)9f&i7~9pNsoJ;9UuU{fdVZPdaTpl3xn;a*6FGldFooO)ID5!f~o{A(@Q|Z#mgd97e)$p~ZNmCyO%SVP)0qcao?}@xnBznLG_-jRz&F1t?k^&(O-9@Xe66ob6E_pADB_QBgqLn-gy;LfXQBSJbXm4Qa z)@0fUbjL-DMg$ghS|B?CBkpf7|ESPv)cUf`>w<=*s{H62A8V`IiK$3LELwT3jzOb8 zl!S`+Tx&GX2;@<}_wVf9BLV{P^(7He508+NcJ@gXb$|bH_KiMExuqD$AfHUdt4cDv znhtuostMM!A9W3zyP;)%a8-~&aywa>^w*%F9kkeh*?k3sl)5}_ql5OAK9{{G;i=&{ zUtHfQwYq)Mv2b#<<*l>m4R5ck-D}D4z}ASJ5n|#5oacfBD7c8Ej7x(YE2s{iT$}G8 zwe%kv{OB)T?3TF&{@pgAVDqA%#pj(8sb{K9MGKWIs8|2FC9!(el>JYu3twNt=QY>E zEfe)z$ve+s#4HImOHv>x>;K~XCmW++46hoJ3_n6i`N8IeHpqE;laOh)fhBpsdn`Zm z=#=<=nmx{M{f?01b_7r#Sw*A=7N%8-I`SyqirAZn#cfO6?4!h|ub2b6?Zr>?Gx+w7 z<{S4WRHS>hqD7{`zLA&m^E6s)FM4@tcOl;dBnnQWimCjB_^)p8k672RtIbfAuFEw6 zgYuq`4j>h|oy0tO_clJuOF6%_;JJXGg4^VC^xm%7&7Hy10VSjE=3!N?H7J&iqdP}I zNOMAsr-?az+$AOgF>TROKSqm>+JU2`&iM{Ko72ZNnN>6ZNGPvfr7rwp!%~yBJSI?n zB3R6?SX`*e^MKP=??Xlg7}vTlQ%RIYvO<*`dtIXjAi@O&*XGS5Uh^+>(R3{UOg9!# zeFU6}>N+X$p5K_^4*d4ma(tvz--++&_^^mulRJJ-8!^^{Pd@#l&?DVnNZmyNE?sJo z1v4i@CO?oX!Dg1Lqv3TWiOw2-FS)k+Ljd}Ilpuew^DX&cs7!n~Bu5*_ zbNkTWkOG>zvpx~^d~foOLY<`#J9-4ss8Adupv(u66p=MohfRAYxhNA>HSU|gUFPg!w!n|-*n&2dvMVX3i(q* z9avlqEO5CvT+~(A1xN=y58K~En@L`vMnKT5ufo5MXk%ca7vW2=31I*Km~>+KC%76EmzQ{Epl*zpBy66^=k| zgxepQc7MxTEKkj~;bY@6Gd!n`K!v>&``bhrGUsu ziUsg|6^14}8h@IKz!**2yt0@~gdZ8TQ6A456}yw%F8pOXchJfG;IsFRiK@Z=(_=hNf1EVaQxVt%sqi_6s+|aA2Hs^Z1^jkF} z)1c0f3(n&QqBF4M>5l>N+2Cviy}C>Nvp)=5PwiM9D`yM8h&eBJ^iH_7m4)X9Q%5MW z!_c~><|XVWOt+Ve`j5xtKb#eCe342+D}=)aV%}@?*l_nK;*CILu4J}tqmJH`|Q{N79k>Ago$ zw+kbnqFbA;2Yd~D;IrHI?7OkQTl}|AhyXHl!kF?*%64_xllGp7XvzmFLZx)x^DQ#( z8yw9_Kg)1_V%`v*<7w+#aJ1^1V0 zi}6B_vKX`{IOQ9xZOX1JvESZ!V(|c{SDSNfEs(d|CL$lQ?eu!R|NTpI?eS+Kf7vNl z%!IbKTC}f)@d@}t?+klTHUjTrMxbJXOeb(+XezwY*G|WOOma4Pdp;kk0Fl~k&%h$# zkNqIcqQ*;D3DT7}0g$zkqn+g9IR#X2V^fthIf z6xut?2mox2Q$mF~UZ_v*={bqjQ*QF8smcNS^TPHE8vGd1^gz*sl^2S)?%;8oMxCX* z0SKz`wGxRnWD$dRPEJ3QEZIK37gU^>f=z&GmJiDQqp92G7HAK&!?*nn%c^fgy_Rc6 z2i%y;23Y}W_x9$Q$t)tiX=};p&i0?ga{!QL{KM0y9!)RXqLg$ix6$n3zDrBF#amXMJ8=viyxAutbQYXsIV&Y%NuDPMK&4 z49)cj0zMhnla%IgMc=|Vc20Hv6p#_`b$l?h_yQC$DIz&u_>1yOOYIZ}$7ErKy`9Iw zO&^zH_d%1a{z{drpduoOO9>4K0hs_Wqej8OE3;~Jz;l|XDy($&>P*2><8H_*uLF

Bg_)L$48&o}ack zt!HAQladuqH6nxcdZjxF6vTK*7%AV0IOX}JZ^u_OkMumMel#gIzv1VJUb|?$j71?Q zE%B-a&vFk0&Z20HLn$L5hH9dQ4Ef5bKHOX_RfA5_`gJvR%bW4}0DCGg0Va&YC>upU z1&8GC>xLY}LkxejF;yq2sOY~5Cqo4UPpDVfIk~O=P(Gb%k`up^XsPRv9)fXL9y}y5 z_nYE{jUJUly7pJ?$r_eiajL0B@N?t6efF1pS9lk2UGDMWIj>@eS3OH*kme)>qMs4c zXm_q!8hu`I8Dhv5J1?UvlYLJ*X6D(~=y^sb&mT@}>JxIp-$sTvzSdP`p2 zJSTpI%GX_eG4+h<`8LXOC{XRWB^Zd0<_&>b2eoF`!(!~j1?f~Xrey`D;63!OSo@1j zhVkNITfkbp3}EkdgcmMq1ZjCrPrG8@X$!y3nZFDG<8?k82gdmVJ<<=koN(R0uQJ&q&;Hm9we)+ z>EiW_<2L!S=W{bXQR{t-Xk&^=i3!xJ1GH*$ft^*`kFY2U5$7}?R(xpv1OE!wXhM-U z{|-m&g!{Wci5J~AR7xhAcz2%T1X}X;Lm5kqn$n>CdZG%!jlgyEgu}8cSE=Tna>KaW zE{i{1$MRasX61aWzh$Kz+gg_XXfmLI)gJjN8%-etjvKrZa0TIVu@FNzi_DBbgG}?X zc%`<^VRHLdaERnl?k6tkmy~V-6wDDz--2n{wg8GA9pEX|*>WDA)uFS%@&MN%-fruuv;9cz5hkb^M5ny{e|a>hsQrwX-D^ zNBO*f*^1b~?2NHQ2g~@Hiq$_V;Z8|Q@sz(OJ-_;@xOeF3^57a-&;bjXTR0^(9&O1`Qs5mDXKaZ{gYh~^a4>N0el@Q8ji@;G0VwtnJC=gh zkvhS5%hY0~-FeS=8*`P?hYjBB(TJ(h3vvLJ;q{z?7Jf~hQZOlh z8(M!wZ6}#fur(A{mkQvWp^(z{kJ9Qcf&hRH6HK!c5b1?W28wck^aQ{Ju^0{PD&(m< zLbu{`I17`!l>tz)uDi~j;Q*7#M!X;*rP#E#K`0MHVO=C_Y>Kpq3I%tz)KS21z(70w zu+S+efLBlwQ<3knLW3-aM&dW%uuv1r?kQqDWsn|wHc35K!jBl%t~9oLn(8w!(iF2Y zj)Ca3zC|-dT69O*c44Drzb zTWRcGB4pjuzq;tNLKJ7H7&3CTH`^5&fs^&Csh+4TdNT~n>o18P123FK1<6Ap7>9OE zH?~wjjt8TicxY-E;Ld@{^P%j9BpeC^HJlw8iW2I<*Vc#1VH5Y&k@$T23>riN5a^fE z1xtK3We+L~pTm`C@mKj&g@flbKtRjuc?i2+3nW^d516YT2Qx*|&}0%IIZ{BK)n@-A z7W$f>T8g%2qR?15Ezb7#Al1jSng=Um;<;^4CZ)UF4*B#s16}WxXDR&e;IFDemyJBP zQD>pCTMx2W)Iwg$ckc4ElkYeRp?hVt(&_O^6u-=WO4y!T>T70vx)M)1f17nmsqw{$ zZhYP=C<2j}m^Lg-ex&8RKf~0WrfrL!~NLQ^Xk?)No>Z1xiNn~MBa)Y#@)-369lm6 zDH#U3Pv&kw{mt3+_t3uW8=*{`^%?Rl0~ff@XZrjZDZCjasI5PtLP6)zBrRO$pYlfV z4DCdS&(8)uR*23AuP?jhTi@#6yL=G*b~7!5d3?2d_2DzzR3V+SVCZe@ULo|*Qz`CV z*j}+=>D|^uy$<{s0tgCLYba;t*4jWsj}CfRI?T4cS90us!fy8Lz;(qp7V}H&`+loKEpPZr6wt`2EqWMWj z6-EjjNWU@D@IQdNmlvT5>qUq{`9mBuild Status -GoDoc - -Match is a very simple pattern matcher where '*' matches on any -number characters and '?' matches on any one character. - -Installing ----------- - -``` -go get -u github.com/tidwall/match -``` - -Example -------- - -```go -match.Match("hello", "*llo") -match.Match("jello", "?ello") -match.Match("hello", "h*o") -``` - - -Contact -------- -Josh Baker [@tidwall](http://twitter.com/tidwall) - -License -------- -Redcon source code is available under the MIT [License](/LICENSE). diff --git a/pkg/c8y/vendor/github.com/tidwall/match/match.go b/pkg/c8y/vendor/github.com/tidwall/match/match.go deleted file mode 100644 index 8885add6..00000000 --- a/pkg/c8y/vendor/github.com/tidwall/match/match.go +++ /dev/null @@ -1,192 +0,0 @@ -// Match provides a simple pattern matcher with unicode support. -package match - -import "unicode/utf8" - -// Match returns true if str matches pattern. This is a very -// simple wildcard match where '*' matches on any number characters -// and '?' matches on any one character. - -// pattern: -// { term } -// term: -// '*' matches any sequence of non-Separator characters -// '?' matches any single non-Separator character -// c matches character c (c != '*', '?', '\\') -// '\\' c matches character c -// -func Match(str, pattern string) bool { - if pattern == "*" { - return true - } - return deepMatch(str, pattern) -} -func deepMatch(str, pattern string) bool { - for len(pattern) > 0 { - if pattern[0] > 0x7f { - return deepMatchRune(str, pattern) - } - switch pattern[0] { - default: - if len(str) == 0 { - return false - } - if str[0] > 0x7f { - return deepMatchRune(str, pattern) - } - if str[0] != pattern[0] { - return false - } - case '?': - if len(str) == 0 { - return false - } - case '*': - return deepMatch(str, pattern[1:]) || - (len(str) > 0 && deepMatch(str[1:], pattern)) - } - str = str[1:] - pattern = pattern[1:] - } - return len(str) == 0 && len(pattern) == 0 -} - -func deepMatchRune(str, pattern string) bool { - var sr, pr rune - var srsz, prsz int - - // read the first rune ahead of time - if len(str) > 0 { - if str[0] > 0x7f { - sr, srsz = utf8.DecodeRuneInString(str) - } else { - sr, srsz = rune(str[0]), 1 - } - } else { - sr, srsz = utf8.RuneError, 0 - } - if len(pattern) > 0 { - if pattern[0] > 0x7f { - pr, prsz = utf8.DecodeRuneInString(pattern) - } else { - pr, prsz = rune(pattern[0]), 1 - } - } else { - pr, prsz = utf8.RuneError, 0 - } - // done reading - for pr != utf8.RuneError { - switch pr { - default: - if srsz == utf8.RuneError { - return false - } - if sr != pr { - return false - } - case '?': - if srsz == utf8.RuneError { - return false - } - case '*': - return deepMatchRune(str, pattern[prsz:]) || - (srsz > 0 && deepMatchRune(str[srsz:], pattern)) - } - str = str[srsz:] - pattern = pattern[prsz:] - // read the next runes - if len(str) > 0 { - if str[0] > 0x7f { - sr, srsz = utf8.DecodeRuneInString(str) - } else { - sr, srsz = rune(str[0]), 1 - } - } else { - sr, srsz = utf8.RuneError, 0 - } - if len(pattern) > 0 { - if pattern[0] > 0x7f { - pr, prsz = utf8.DecodeRuneInString(pattern) - } else { - pr, prsz = rune(pattern[0]), 1 - } - } else { - pr, prsz = utf8.RuneError, 0 - } - // done reading - } - - return srsz == 0 && prsz == 0 -} - -var maxRuneBytes = func() []byte { - b := make([]byte, 4) - if utf8.EncodeRune(b, '\U0010FFFF') != 4 { - panic("invalid rune encoding") - } - return b -}() - -// Allowable parses the pattern and determines the minimum and maximum allowable -// values that the pattern can represent. -// When the max cannot be determined, 'true' will be returned -// for infinite. -func Allowable(pattern string) (min, max string) { - if pattern == "" || pattern[0] == '*' { - return "", "" - } - - minb := make([]byte, 0, len(pattern)) - maxb := make([]byte, 0, len(pattern)) - var wild bool - for i := 0; i < len(pattern); i++ { - if pattern[i] == '*' { - wild = true - break - } - if pattern[i] == '?' { - minb = append(minb, 0) - maxb = append(maxb, maxRuneBytes...) - } else { - minb = append(minb, pattern[i]) - maxb = append(maxb, pattern[i]) - } - } - if wild { - r, n := utf8.DecodeLastRune(maxb) - if r != utf8.RuneError { - if r < utf8.MaxRune { - r++ - if r > 0x7f { - b := make([]byte, 4) - nn := utf8.EncodeRune(b, r) - maxb = append(maxb[:len(maxb)-n], b[:nn]...) - } else { - maxb = append(maxb[:len(maxb)-n], byte(r)) - } - } - } - } - return string(minb), string(maxb) - /* - return - if wild { - r, n := utf8.DecodeLastRune(maxb) - if r != utf8.RuneError { - if r < utf8.MaxRune { - infinite = true - } else { - r++ - if r > 0x7f { - b := make([]byte, 4) - nn := utf8.EncodeRune(b, r) - maxb = append(maxb[:len(maxb)-n], b[:nn]...) - } else { - maxb = append(maxb[:len(maxb)-n], byte(r)) - } - } - } - } - return string(minb), string(maxb), infinite - */ -} diff --git a/pkg/c8y/vendor/go.uber.org/atomic/.codecov.yml b/pkg/c8y/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 6d4d1be7..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/pkg/c8y/vendor/go.uber.org/atomic/.gitignore b/pkg/c8y/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 0a4504f1..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -.DS_Store -/vendor -/cover -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof diff --git a/pkg/c8y/vendor/go.uber.org/atomic/.travis.yml b/pkg/c8y/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 58957222..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -go: - - 1.7 - - 1.8 - - 1.9 - -cache: - directories: - - vendor - -install: - - make install_ci - -script: - - make test_ci - - scripts/test-ubergo.sh - - make lint - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/pkg/c8y/vendor/go.uber.org/atomic/LICENSE.txt b/pkg/c8y/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fb..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/pkg/c8y/vendor/go.uber.org/atomic/Makefile b/pkg/c8y/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index dfc63d9d..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,64 +0,0 @@ -PACKAGES := $(shell glide nv) -# Many Go tools take file globs or directories as arguments instead of packages. -PACKAGE_FILES ?= *.go - - -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 7 8 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif - - -export GO15VENDOREXPERIMENT=1 - - -.PHONY: build -build: - go build -i $(PACKAGES) - - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - - -.PHONY: test -test: - go test -cover -race $(PACKAGES) - - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover -ifdef SHOULD_LINT - go get github.com/golang/lint/golint -endif - -.PHONY: lint -lint: -ifdef SHOULD_LINT - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif - - -.PHONY: test_ci -test_ci: install_ci build - ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/pkg/c8y/vendor/go.uber.org/atomic/README.md b/pkg/c8y/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index 6505abf6..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation -`go get -u go.uber.org/atomic` - -## Usage -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status -Stable. - -


-Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.org/uber-go/atomic -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/pkg/c8y/vendor/go.uber.org/atomic/atomic.go b/pkg/c8y/vendor/go.uber.org/atomic/atomic.go deleted file mode 100644 index 1db6849f..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/atomic.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic - -import ( - "math" - "sync/atomic" - "time" -) - -// Int32 is an atomic wrapper around an int32. -type Int32 struct{ v int32 } - -// NewInt32 creates an Int32. -func NewInt32(i int32) *Int32 { - return &Int32{i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// Int64 is an atomic wrapper around an int64. -type Int64 struct{ v int64 } - -// NewInt64 creates an Int64. -func NewInt64(i int64) *Int64 { - return &Int64{i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// Uint32 is an atomic wrapper around an uint32. -type Uint32 struct{ v uint32 } - -// NewUint32 creates a Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// Uint64 is an atomic wrapper around a uint64. -type Uint64 struct{ v uint64 } - -// NewUint64 creates a Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// Bool is an atomic Boolean. -type Bool struct{ v uint32 } - -// NewBool creates a Bool. -func NewBool(initial bool) *Bool { - return &Bool{boolToInt(initial)} -} - -// Load atomically loads the Boolean. -func (b *Bool) Load() bool { - return truthy(atomic.LoadUint32(&b.v)) -} - -// CAS is an atomic compare-and-swap. -func (b *Bool) CAS(old, new bool) bool { - return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) -} - -// Store atomically stores the passed value. -func (b *Bool) Store(new bool) { - atomic.StoreUint32(&b.v, boolToInt(new)) -} - -// Swap sets the given value and returns the previous value. -func (b *Bool) Swap(new bool) bool { - return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - return truthy(atomic.AddUint32(&b.v, 1) - 1) -} - -func truthy(n uint32) bool { - return n&1 == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Float64 is an atomic wrapper around float64. -type Float64 struct { - v uint64 -} - -// NewFloat64 creates a Float64. -func NewFloat64(f float64) *Float64 { - return &Float64{math.Float64bits(f)} -} - -// Load atomically loads the wrapped value. -func (f *Float64) Load() float64 { - return math.Float64frombits(atomic.LoadUint64(&f.v)) -} - -// Store atomically stores the passed value. -func (f *Float64) Store(s float64) { - atomic.StoreUint64(&f.v, math.Float64bits(s)) -} - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// CAS is an atomic compare-and-swap. -func (f *Float64) CAS(old, new float64) bool { - return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) -} - -// Duration is an atomic wrapper around time.Duration -// https://godoc.org/time#Duration -type Duration struct { - v Int64 -} - -// NewDuration creates a Duration. -func NewDuration(d time.Duration) *Duration { - return &Duration{v: *NewInt64(int64(d))} -} - -// Load atomically loads the wrapped value. -func (d *Duration) Load() time.Duration { - return time.Duration(d.v.Load()) -} - -// Store atomically stores the passed value. -func (d *Duration) Store(n time.Duration) { - d.v.Store(int64(n)) -} - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// Swap atomically swaps the wrapped time.Duration and returns the old value. -func (d *Duration) Swap(n time.Duration) time.Duration { - return time.Duration(d.v.Swap(int64(n))) -} - -// CAS is an atomic compare-and-swap. -func (d *Duration) CAS(old, new time.Duration) bool { - return d.v.CAS(int64(old), int64(new)) -} - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct{ atomic.Value } diff --git a/pkg/c8y/vendor/go.uber.org/atomic/glide.lock b/pkg/c8y/vendor/go.uber.org/atomic/glide.lock deleted file mode 100644 index 3c72c599..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/glide.lock +++ /dev/null @@ -1,17 +0,0 @@ -hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 -updated: 2016-10-27T00:10:51.16960137-07:00 -imports: [] -testImports: -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require diff --git a/pkg/c8y/vendor/go.uber.org/atomic/glide.yaml b/pkg/c8y/vendor/go.uber.org/atomic/glide.yaml deleted file mode 100644 index 4cf608ec..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/glide.yaml +++ /dev/null @@ -1,6 +0,0 @@ -package: go.uber.org/atomic -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require diff --git a/pkg/c8y/vendor/go.uber.org/atomic/string.go b/pkg/c8y/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index ede8136f..00000000 --- a/pkg/c8y/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper around Value for strings. -type String struct{ v Value } - -// NewString creates a String. -func NewString(str string) *String { - s := &String{} - if str != "" { - s.Store(str) - } - return s -} - -// Load atomically loads the wrapped string. -func (s *String) Load() string { - v := s.v.Load() - if v == nil { - return "" - } - return v.(string) -} - -// Store atomically stores the passed string. -// Note: Converting the string to an interface{} to store in the Value -// requires an allocation. -func (s *String) Store(str string) { - s.v.Store(str) -} diff --git a/pkg/c8y/vendor/go.uber.org/multierr/.codecov.yml b/pkg/c8y/vendor/go.uber.org/multierr/.codecov.yml deleted file mode 100644 index 6d4d1be7..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/pkg/c8y/vendor/go.uber.org/multierr/.gitignore b/pkg/c8y/vendor/go.uber.org/multierr/.gitignore deleted file mode 100644 index 61ead866..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/vendor diff --git a/pkg/c8y/vendor/go.uber.org/multierr/.travis.yml b/pkg/c8y/vendor/go.uber.org/multierr/.travis.yml deleted file mode 100644 index 5ffa8fed..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/multierr - -env: - global: - - GO15VENDOREXPERIMENT=1 - -go: - - 1.7 - - 1.8 - - tip - -cache: - directories: - - vendor - -before_install: -- go version - -install: -- | - set -e - make install_ci - -script: -- | - set -e - make lint - make test_ci - -after_success: -- bash <(curl -s https://codecov.io/bash) diff --git a/pkg/c8y/vendor/go.uber.org/multierr/CHANGELOG.md b/pkg/c8y/vendor/go.uber.org/multierr/CHANGELOG.md deleted file mode 100644 index 898445d0..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/CHANGELOG.md +++ /dev/null @@ -1,28 +0,0 @@ -Releases -======== - -v1.1.0 (2017-06-30) -=================== - -- Added an `Errors(error) []error` function to extract the underlying list of - errors for a multierr error. - - -v1.0.0 (2017-05-31) -=================== - -No changes since v0.2.0. This release is committing to making no breaking -changes to the current API in the 1.X series. - - -v0.2.0 (2017-04-11) -=================== - -- Repeatedly appending to the same error is now faster due to fewer - allocations. - - -v0.1.0 (2017-31-03) -=================== - -- Initial release diff --git a/pkg/c8y/vendor/go.uber.org/multierr/LICENSE.txt b/pkg/c8y/vendor/go.uber.org/multierr/LICENSE.txt deleted file mode 100644 index 858e0247..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/pkg/c8y/vendor/go.uber.org/multierr/Makefile b/pkg/c8y/vendor/go.uber.org/multierr/Makefile deleted file mode 100644 index a7437d06..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/Makefile +++ /dev/null @@ -1,74 +0,0 @@ -export GO15VENDOREXPERIMENT=1 - -PACKAGES := $(shell glide nv) - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - -.PHONY: build -build: - go build -i $(PACKAGES) - -.PHONY: test -test: - go test -cover -race $(PACKAGES) - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) - -.PHONY: govet -govet: - $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) - @go vet $(PACKAGES) 2>&1 \ - | grep -v '^exit status' > $(VET_LOG) || true - @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) - -.PHONY: golint -golint: - @go get github.com/golang/lint/golint - $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) - -.PHONY: staticcheck -staticcheck: - @go get honnef.co/go/tools/cmd/staticcheck - $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) - @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true - @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) - -.PHONY: lint -lint: gofmt govet golint staticcheck - -.PHONY: cover -cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) - go tool cover -html=cover.out -o cover.html - -update-license: - @go get go.uber.org/tools/update-license - @update-license \ - $(shell go list -json $(PACKAGES) | \ - jq -r '.Dir + "/" + (.GoFiles | .[])') - -############################################################################## - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -.PHONY: test_ci -test_ci: install_ci - ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/pkg/c8y/vendor/go.uber.org/multierr/README.md b/pkg/c8y/vendor/go.uber.org/multierr/README.md deleted file mode 100644 index 065088f6..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -`multierr` allows combining one or more Go `error`s together. - -## Installation - - go get -u go.uber.org/multierr - -## Status - -Stable: No breaking changes will be made before 2.0. - -------------------------------------------------------------------------------- - -Released under the [MIT License]. - -[MIT License]: LICENSE.txt -[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg -[doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master -[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.org/uber-go/multierr -[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/pkg/c8y/vendor/go.uber.org/multierr/error.go b/pkg/c8y/vendor/go.uber.org/multierr/error.go deleted file mode 100644 index de6ce473..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/error.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package multierr allows combining one or more errors together. -// -// Overview -// -// Errors can be combined with the use of the Combine function. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) -// -// If only two errors are being combined, the Append function may be used -// instead. -// -// err = multierr.Combine(reader.Close(), writer.Close()) -// -// This makes it possible to record resource cleanup failures from deferred -// blocks with the help of named return values. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } -// -// The underlying list of errors for a returned error object may be retrieved -// with the Errors function. -// -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:") -// } -// -// Advanced Usage -// -// Errors returned by Combine and Append MAY implement the following -// interface. -// -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } -// -// Note that if you need access to list of errors behind a multierr error, you -// should prefer using the Errors function. That said, if you need cheap -// read-only access to the underlying errors slice, you can attempt to cast -// the error to this interface. You MUST handle the failure case gracefully -// because errors returned by Combine and Append are not guaranteed to -// implement this interface. -// -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } -package multierr // import "go.uber.org/multierr" - -import ( - "bytes" - "fmt" - "io" - "strings" - "sync" - - "go.uber.org/atomic" -) - -var ( - // Separator for single-line error messages. - _singlelineSeparator = []byte("; ") - - _newline = []byte("\n") - - // Prefix for multi-line messages - _multilinePrefix = []byte("the following errors occurred:") - - // Prefix for the first and following lines of an item in a list of - // multi-line error messages. - // - // For example, if a single item is: - // - // foo - // bar - // - // It will become, - // - // - foo - // bar - _multilineSeparator = []byte("\n - ") - _multilineIndent = []byte(" ") -) - -// _bufferPool is a pool of bytes.Buffers. -var _bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -type errorGroup interface { - Errors() []error -} - -// Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, the returned slice is empty. -// -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) -// -// If the error is not composed of other errors, the returned slice contains -// just the error that was passed in. -// -// Callers of this function are free to modify the returned slice. -func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result -} - -// multiError is an error that holds one or more errors. -// -// An instance of this is guaranteed to be non-empty and flattened. That is, -// none of the errors inside multiError are other multiErrors. -// -// multiError formats to a semi-colon delimited list of error messages with -// %v and with a more readable multi-line format with %+v. -type multiError struct { - copyNeeded atomic.Bool - errors []error -} - -var _ errorGroup = (*multiError)(nil) - -// Errors returns the list of underlying errors. -// -// This slice MUST NOT be modified. -func (merr *multiError) Errors() []error { - if merr == nil { - return nil - } - return merr.errors -} - -func (merr *multiError) Error() string { - if merr == nil { - return "" - } - - buff := _bufferPool.Get().(*bytes.Buffer) - buff.Reset() - - merr.writeSingleline(buff) - - result := buff.String() - _bufferPool.Put(buff) - return result -} - -func (merr *multiError) Format(f fmt.State, c rune) { - if c == 'v' && f.Flag('+') { - merr.writeMultiline(f) - } else { - merr.writeSingleline(f) - } -} - -func (merr *multiError) writeSingleline(w io.Writer) { - first := true - for _, item := range merr.errors { - if first { - first = false - } else { - w.Write(_singlelineSeparator) - } - io.WriteString(w, item.Error()) - } -} - -func (merr *multiError) writeMultiline(w io.Writer) { - w.Write(_multilinePrefix) - for _, item := range merr.errors { - w.Write(_multilineSeparator) - writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) - } -} - -// Writes s to the writer with the given prefix added before each line after -// the first. -func writePrefixLine(w io.Writer, prefix []byte, s string) { - first := true - for len(s) > 0 { - if first { - first = false - } else { - w.Write(prefix) - } - - idx := strings.IndexByte(s, '\n') - if idx < 0 { - idx = len(s) - 1 - } - - io.WriteString(w, s[:idx+1]) - s = s[idx+1:] - } -} - -type inspectResult struct { - // Number of top-level non-nil errors - Count int - - // Total number of errors including multiErrors - Capacity int - - // Index of the first non-nil error in the list. Value is meaningless if - // Count is zero. - FirstErrorIdx int - - // Whether the list contains at least one multiError - ContainsMultiError bool -} - -// Inspects the given slice of errors so that we can efficiently allocate -// space for it. -func inspect(errors []error) (res inspectResult) { - first := true - for i, err := range errors { - if err == nil { - continue - } - - res.Count++ - if first { - first = false - res.FirstErrorIdx = i - } - - if merr, ok := err.(*multiError); ok { - res.Capacity += len(merr.errors) - res.ContainsMultiError = true - } else { - res.Capacity++ - } - } - return -} - -// fromSlice converts the given list of errors into a single error. -func fromSlice(errors []error) error { - res := inspect(errors) - switch res.Count { - case 0: - return nil - case 1: - // only one non-nil entry - return errors[res.FirstErrorIdx] - case len(errors): - if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} - } - } - - nonNilErrs := make([]error, 0, res.Capacity) - for _, err := range errors[res.FirstErrorIdx:] { - if err == nil { - continue - } - - if nested, ok := err.(*multiError); ok { - nonNilErrs = append(nonNilErrs, nested.errors...) - } else { - nonNilErrs = append(nonNilErrs, err) - } - } - - return &multiError{errors: nonNilErrs} -} - -// Combine combines the passed errors into a single error. -// -// If zero arguments were passed or if all items are nil, a nil error is -// returned. -// -// Combine(nil, nil) // == nil -// -// If only a single error was passed, it is returned as-is. -// -// Combine(err) // == err -// -// Combine skips over nil arguments so this function may be used to combine -// together errors from operations that fail independently of each other. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) -// -// If any of the passed errors is a multierr error, it will be flattened along -// with the other errors. -// -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) -// -// The returned error formats into a readable multi-line error message if -// formatted with %+v. -// -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) -func Combine(errors ...error) error { - return fromSlice(errors) -} - -// Append appends the given errors together. Either value may be nil. -// -// This function is a specialization of Combine for the common case where -// there are only two errors. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The following pattern may also be used to record failure of deferred -// operations without losing information about the original error. -// -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() -func Append(left error, right error) error { - switch { - case left == nil: - return right - case right == nil: - return left - } - - if _, ok := right.(*multiError); !ok { - if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { - // Common case where the error on the left is constantly being - // appended to. - errs := append(l.errors, right) - return &multiError{errors: errs} - } else if !ok { - // Both errors are single errors. - return &multiError{errors: []error{left, right}} - } - } - - // Either right or both, left and right, are multiErrors. Rely on usual - // expensive logic. - errors := [2]error{left, right} - return fromSlice(errors[0:]) -} diff --git a/pkg/c8y/vendor/go.uber.org/multierr/glide.lock b/pkg/c8y/vendor/go.uber.org/multierr/glide.lock deleted file mode 100644 index f9ea94c3..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/glide.lock +++ /dev/null @@ -1,19 +0,0 @@ -hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 -updated: 2017-04-10T13:34:45.671678062-07:00 -imports: -- name: go.uber.org/atomic - version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb -testImports: -- name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 - subpackages: - - assert - - require diff --git a/pkg/c8y/vendor/go.uber.org/multierr/glide.yaml b/pkg/c8y/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084ec..00000000 --- a/pkg/c8y/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/pkg/c8y/vendor/go.uber.org/zap/.codecov.yml b/pkg/c8y/vendor/go.uber.org/zap/.codecov.yml deleted file mode 100644 index 8e5ca7d3..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/.codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 95% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure -ignore: - - internal/readme/readme.go - diff --git a/pkg/c8y/vendor/go.uber.org/zap/.gitignore b/pkg/c8y/vendor/go.uber.org/zap/.gitignore deleted file mode 100644 index 08fbde6c..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -vendor - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -*.pprof -*.out -*.log diff --git a/pkg/c8y/vendor/go.uber.org/zap/.readme.tmpl b/pkg/c8y/vendor/go.uber.org/zap/.readme.tmpl deleted file mode 100644 index c6440db8..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/.readme.tmpl +++ /dev/null @@ -1,108 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -{{.BenchmarkAddingFields}} - -Log a message with a logger that already has 10 fields of context: - -{{.BenchmarkAccumulatedContext}} - -Log a static string, without any context or `printf`-style templating: - -{{.BenchmarkWithoutFields}} - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
- -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) - -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/pkg/c8y/vendor/go.uber.org/zap/.travis.yml b/pkg/c8y/vendor/go.uber.org/zap/.travis.yml deleted file mode 100644 index a3321fa2..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -sudo: false -go: - - 1.9.x - - 1.10.x -go_import_path: go.uber.org/zap -env: - global: - - TEST_TIMEOUT_SCALE=10 -cache: - directories: - - vendor -install: - - make dependencies -script: - - make lint - - make test - - make bench -after_success: - - make cover - - bash <(curl -s https://codecov.io/bash) diff --git a/pkg/c8y/vendor/go.uber.org/zap/CHANGELOG.md b/pkg/c8y/vendor/go.uber.org/zap/CHANGELOG.md deleted file mode 100644 index 17d5b49f..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/CHANGELOG.md +++ /dev/null @@ -1,305 +0,0 @@ -# Changelog - -## v1.9.1 (06 Aug 2018) - -Bugfixes: - -* [#614][]: MapObjectEncoder should not ignore empty slices. - -## v1.9.0 (19 Jul 2018) - -Enhancements: -* [#602][]: Reduce number of allocations when logging with reflection. -* [#572][], [#606][]: Expose a registry for third-party logging sinks. - -Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and -@dimroc for their contributions to this release. - -## v1.8.0 (13 Apr 2018) - -Enhancements: -* [#508][]: Make log level configurable when redirecting the standard - library's logger. -* [#518][]: Add a logger that writes to a `*testing.TB`. -* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. - -Bugfixes: -* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. - -Thanks to @DiSiqueira and @djui for their contributions to this release. - -## v1.7.1 (25 Sep 2017) - -Bugfixes: -* [#504][]: Store strings when using AddByteString with the map encoder. - -## v1.7.0 (21 Sep 2017) - -Enhancements: - -* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user - to specify the level of the logged messages. - -## v1.6.0 (30 Aug 2017) - -Enhancements: - -* [#491][]: Omit zap stack frames from stacktraces. -* [#490][]: Add a `ContextMap` method to observer logs for simpler - field validation in tests. - -## v1.5.0 (22 Jul 2017) - -Enhancements: - -* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. -* [#465][]: Support user-supplied encoders for logger names. - -Bugfixes: - -* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. - -Thanks to @richard-tunein and @pavius for their contributions to this release. - -## v1.4.1 (08 Jun 2017) - -This release fixes two bugs. - -Bugfixes: - -* [#435][]: Support a variety of case conventions when unmarshaling levels. -* [#444][]: Fix a panic in the observer. - -## v1.4.0 (12 May 2017) - -This release adds a few small features and is fully backward-compatible. - -Enhancements: - -* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to - override the Unix-style default. -* [#425][]: Preserve time zones when logging times. -* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a - variety of operations a bit simpler. - -## v1.3.0 (25 Apr 2017) - -This release adds an enhancement to zap's testing helpers as well as the -ability to marshal an AtomicLevel. It is fully backward-compatible. - -Enhancements: - -* [#415][]: Add a substring-filtering helper to zap's observer. This is - particularly useful when testing the `SugaredLogger`. -* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. - -## v1.2.0 (13 Apr 2017) - -This release adds a gRPC compatibility wrapper. It is fully backward-compatible. - -Enhancements: - -* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements - `grpclog.Logger`. - -## v1.1.0 (31 Mar 2017) - -This release fixes two bugs and adds some enhancements to zap's testing helpers. -It is fully backward-compatible. - -Bugfixes: - -* [#385][]: Fix caller path trimming on Windows. -* [#396][]: Fix a panic when attempting to use non-existent directories with - zap's configuration struct. - -Enhancements: - -* [#386][]: Add filtering helpers to zaptest's observing logger. - -Thanks to @moitias for contributing to this release. - -## v1.0.0 (14 Mar 2017) - -This is zap's first stable release. All exported APIs are now final, and no -further breaking changes will be made in the 1.x release series. Anyone using a -semver-aware dependency manager should now pin to `^1`. - -Breaking changes: - -* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without - casting from `[]byte` to `string`. -* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, - `zap.Logger`, and `zap.SugaredLogger`. -* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to - clash with other testing helpers. - -Bugfixes: - -* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier - for tab-separated console output. -* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to - work with concurrency-safe `WriteSyncer` implementations. -* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux - systems. -* [#373][]: Report the correct caller from zap's standard library - interoperability wrappers. - -Enhancements: - -* [#348][]: Add a registry allowing third-party encodings to work with zap's - built-in `Config`. -* [#327][]: Make the representation of logger callers configurable (like times, - levels, and durations). -* [#376][]: Allow third-party encoders to use their own buffer pools, which - removes the last performance advantage that zap's encoders have over plugins. -* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple - `WriteSyncer`s and lock the result. -* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in - Go 1.9). -* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it - easier for particularly punctilious users to unit test their application's - logging. - -Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their -contributions to this release. - -## v1.0.0-rc.3 (7 Mar 2017) - -This is the third release candidate for zap's stable release. There are no -breaking changes. - -Bugfixes: - -* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs - rather than `[]uint8`. - -Enhancements: - -* [#307][]: Users can opt into colored output for log levels. -* [#353][]: In addition to hijacking the output of the standard library's - package-global logging functions, users can now construct a zap-backed - `log.Logger` instance. -* [#311][]: Frames from common runtime functions and some of zap's internal - machinery are now omitted from stacktraces. - -Thanks to @ansel1 and @suyash for their contributions to this release. - -## v1.0.0-rc.2 (21 Feb 2017) - -This is the second release candidate for zap's stable release. It includes two -breaking changes. - -Breaking changes: - -* [#316][]: Zap's global loggers are now fully concurrency-safe - (previously, users had to ensure that `ReplaceGlobals` was called before the - loggers were in use). However, they must now be accessed via the `L()` and - `S()` functions. Users can update their projects with - - ``` - gofmt -r "zap.L -> zap.L()" -w . - gofmt -r "zap.S -> zap.S()" -w . - ``` -* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid - JSON and YAML struct tags on all config structs. This release fixes the tags - and adds static analysis to prevent similar bugs in the future. - -Bugfixes: - -* [#321][]: Redirecting the standard library's `log` output now - correctly reports the logger's caller. - -Enhancements: - -* [#325][] and [#333][]: Zap now transparently supports non-standard, rich - errors like those produced by `github.com/pkg/errors`. -* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is - now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> - zap.NewNop()' -w .`. -* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a - more informative error. - -Thanks to @skipor and @chapsuk for their contributions to this release. - -## v1.0.0-rc.1 (14 Feb 2017) - -This is the first release candidate for zap's stable release. There are multiple -breaking changes and improvements from the pre-release version. Most notably: - -* **Zap's import path is now "go.uber.org/zap"** — all users will - need to update their code. -* User-facing types and functions remain in the `zap` package. Code relevant - largely to extension authors is now in the `zapcore` package. -* The `zapcore.Core` type makes it easy for third-party packages to use zap's - internals but provide a different user-facing API. -* `Logger` is now a concrete type instead of an interface. -* A less verbose (though slower) logging API is included by default. -* Package-global loggers `L` and `S` are included. -* A human-friendly console encoder is included. -* A declarative config struct allows common logger configurations to be managed - as configuration instead of code. -* Sampling is more accurate, and doesn't depend on the standard library's shared - timer heap. - -## v0.1.0-beta.1 (6 Feb 2017) - -This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and -upgrade at their leisure. Since this is the first tagged release, there are no -backward compatibility concerns and all functionality is new. - -Early zap adopters should pin to the 0.1.x minor version until they're ready to -upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 diff --git a/pkg/c8y/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/pkg/c8y/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md deleted file mode 100644 index e327d9aa..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/pkg/c8y/vendor/go.uber.org/zap/CONTRIBUTING.md b/pkg/c8y/vendor/go.uber.org/zap/CONTRIBUTING.md deleted file mode 100644 index 9454bbaf..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/CONTRIBUTING.md +++ /dev/null @@ -1,81 +0,0 @@ -# Contributing - -We'd love your help making zap the very best structured logging library in Go! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/zap.git -cd zap -git remote add upstream https://github.com/uber-go/zap.git -git fetch upstream -``` - -Install zap's dependencies: - -``` -make dependencies -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/zap -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/zap/fork -[open-issue]: https://github.com/uber-go/zap/issues/new -[cla]: https://cla-assistant.io/uber-go/zap -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/pkg/c8y/vendor/go.uber.org/zap/FAQ.md b/pkg/c8y/vendor/go.uber.org/zap/FAQ.md deleted file mode 100644 index 4256d35c..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/FAQ.md +++ /dev/null @@ -1,155 +0,0 @@ -# Frequently Asked Questions - -## Design - -### Why spend so much effort on logger performance? - -Of course, most applications won't notice the impact of a slow logger: they -already take tens or hundreds of milliseconds for each operation, so an extra -millisecond doesn't matter. - -On the other hand, why *not* make structured logging fast? The `SugaredLogger` -isn't any harder to use than other logging packages, and the `Logger` makes -structured logging possible in performance-sensitive contexts. Across a fleet -of Go microservices, making each application even slightly more efficient adds -up quickly. - -### Why aren't `Logger` and `SugaredLogger` interfaces? - -Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and -`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points -out][go-proverbs], "The bigger the interface, the weaker the abstraction." -Interfaces are also rigid — *any* change requires releasing a new major -version, since it breaks all third-party implementations. - -Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much -abstraction, and it lets us add methods without introducing breaking changes. -Your applications should define and depend upon an interface that includes -just the methods you use. - -### Why sample application logs? - -Applications often experience runs of errors, either because of a bug or -because of a misbehaving user. Logging errors is usually a good idea, but it -can easily make this bad situation worse: not only is your application coping -with a flood of errors, it's also spending extra CPU cycles and I/O logging -those errors. Since writes are typically serialized, logging limits throughput -when you need it most. - -Sampling fixes this problem by dropping repetitive log entries. Under normal -conditions, your application writes out every entry. When similar entries are -logged hundreds or thousands of times each second, though, zap begins dropping -duplicates to preserve throughput. - -### Why do the structured logging APIs take a message in addition to fields? - -Subjectively, we find it helpful to accompany structured context with a brief -description. This isn't critical during development, but it makes debugging -and operating unfamiliar systems much easier. - -More concretely, zap's sampling algorithm uses the message to identify -duplicate entries. In our experience, this is a practical middle ground -between random sampling (which often drops the exact entry that you need while -debugging) and hashing the complete entry (which is prohibitively expensive). - -### Why include package-global loggers? - -Since so many other logging packages include a global logger, many -applications aren't designed to accept loggers as explicit parameters. -Changing function signatures is often a breaking change, so zap includes -global loggers to simplify migration. - -Avoid them where possible. - -### Why include dedicated Panic and Fatal log levels? - -In general, application code should handle errors gracefully instead of using -`panic` or `os.Exit`. However, every rule has exceptions, and it's common to -crash when an error is truly unrecoverable. To avoid losing any information -— especially the reason for the crash — the logger must flush any -buffered entries before the process exits. - -Zap makes this easy by offering `Panic` and `Fatal` logging methods that -automatically flush before exiting. Of course, this doesn't guarantee that -logs will never be lost, but it eliminates a common error. - -See the discussion in uber-go/zap#207 for more details. - -### What's `DPanic`? - -`DPanic` stands for "panic in development." In development, it logs at -`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to -catch errors that are theoretically possible, but shouldn't actually happen, -*without* crashing in production. - -If you've ever written code like this, you need `DPanic`: - -```go -if err != nil { - panic(fmt.Sprintf("shouldn't ever get here: %v", err)) -} -``` - -## Installation - -### What does the error `expects import "go.uber.org/zap"` mean? - -Either zap was installed incorrectly or you're referencing the wrong package -name in your code. - -Zap's source code happens to be hosted on GitHub, but the [import -path][import-path] is `go.uber.org/zap`. This gives us, the project -maintainers, the freedom to move the source code if necessary. However, it -means that you need to take a little care when installing and using the -package. - -If you follow two simple rules, everything should work: install zap with `go -get -u go.uber.org/zap`, and always import it in your code with `import -"go.uber.org/zap"`. Your code shouldn't contain *any* references to -`github.com/uber-go/zap`. - -## Usage - -### Does zap support log rotation? - -Zap doesn't natively support rotating log files, since we prefer to leave this -to an external program like `logrotate`. - -However, it's easy to integrate a log rotation package like -[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. - -```go -// lumberjack.Logger is already safe for concurrent use, so we don't need to -// lock it. -w := zapcore.AddSync(&lumberjack.Logger{ - Filename: "/var/log/myapp/foo.log", - MaxSize: 500, // megabytes - MaxBackups: 3, - MaxAge: 28, // days -}) -core := zapcore.NewCore( - zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), - w, - zap.InfoLevel, -) -logger := zap.New(core) -``` - -## Extensions - -We'd love to support every logging need within zap itself, but we're only -familiar with a handful of log ingestion systems, flag-parsing packages, and -the like. Rather than merging code that we can't effectively debug and -support, we'd rather grow an ecosystem of zap extensions. - -We're aware of the following extensions, but haven't used them ourselves: - -| Package | Integration | -| --- | --- | -| `github.com/tchap/zapext` | Sentry, syslog | -| `github.com/fgrosse/zaptest` | Ginkgo | -| `github.com/blendle/zapdriver` | Stackdriver | - -[go-proverbs]: https://go-proverbs.github.io/ -[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths -[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/pkg/c8y/vendor/go.uber.org/zap/LICENSE.txt b/pkg/c8y/vendor/go.uber.org/zap/LICENSE.txt deleted file mode 100644 index 6652bed4..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/pkg/c8y/vendor/go.uber.org/zap/Makefile b/pkg/c8y/vendor/go.uber.org/zap/Makefile deleted file mode 100644 index ef7893b3..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -export GO15VENDOREXPERIMENT=1 - -BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem -PKGS ?= $(shell glide novendor) -# Many Go tools take file globs or directories as arguments instead of packages. -PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest - -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 10 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif - - -.PHONY: all -all: lint test - -.PHONY: dependencies -dependencies: - @echo "Installing Glide and locked dependencies..." - glide --version || go get -u -f github.com/Masterminds/glide - glide install - @echo "Installing test dependencies..." - go install ./vendor/github.com/axw/gocov/gocov - go install ./vendor/github.com/mattn/goveralls -ifdef SHOULD_LINT - @echo "Installing golint..." - go install ./vendor/github.com/golang/lint/golint -else - @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) -endif - -# Disable printf-like invocation checking due to testify.assert.Error() -VET_RULES := -printf=false - -.PHONY: lint -lint: -ifdef SHOULD_LINT - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log - @echo "Installing test dependencies for vet..." - @go test -i $(PKGS) - @echo "Checking vet..." - @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./check_license.sh | tee -a lint.log - @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif - -.PHONY: test -test: - go test -race $(PKGS) - -.PHONY: cover -cover: - ./scripts/cover.sh $(PKGS) - -.PHONY: bench -BENCH ?= . -bench: - @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) - -.PHONY: updatereadme -updatereadme: - rm -f README.md - cat .readme.tmpl | go run internal/readme/readme.go > README.md diff --git a/pkg/c8y/vendor/go.uber.org/zap/README.md b/pkg/c8y/vendor/go.uber.org/zap/README.md deleted file mode 100644 index f4fd1cb4..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/README.md +++ /dev/null @@ -1,136 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 3131 ns/op | 5 allocs/op | -| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | -| zerolog | 16154 ns/op | 90 allocs/op | -| lion | 16341 ns/op | 111 allocs/op | -| go-kit | 17049 ns/op | 126 allocs/op | -| logrus | 23662 ns/op | 142 allocs/op | -| log15 | 36351 ns/op | 149 allocs/op | -| apex/log | 42530 ns/op | 126 allocs/op | - -Log a message with a logger that already has 10 fields of context: - -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 380 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | -| zerolog | 321 ns/op | 0 allocs/op | -| lion | 7092 ns/op | 39 allocs/op | -| go-kit | 20226 ns/op | 115 allocs/op | -| logrus | 22312 ns/op | 130 allocs/op | -| log15 | 28788 ns/op | 79 allocs/op | -| apex/log | 42063 ns/op | 115 allocs/op | - -Log a static string, without any context or `printf`-style templating: - -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 361 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | -| zerolog | 323 ns/op | 0 allocs/op | -| standard library | 575 ns/op | 2 allocs/op | -| go-kit | 922 ns/op | 13 allocs/op | -| lion | 1413 ns/op | 10 allocs/op | -| logrus | 2291 ns/op | 27 allocs/op | -| apex/log | 3690 ns/op | 11 allocs/op | -| log15 | 5954 ns/op | 26 allocs/op | - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
- -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) - -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/pkg/c8y/vendor/go.uber.org/zap/array.go b/pkg/c8y/vendor/go.uber.org/zap/array.go deleted file mode 100644 index 5be3704a..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/array.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "time" - - "go.uber.org/zap/zapcore" -) - -// Array constructs a field with the given key and ArrayMarshaler. It provides -// a flexible, but still type-safe and efficient, way to add array-like types -// to the logging context. The struct's MarshalLogArray method is called lazily. -func Array(key string, val zapcore.ArrayMarshaler) Field { - return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} -} - -// Bools constructs a field that carries a slice of bools. -func Bools(key string, bs []bool) Field { - return Array(key, bools(bs)) -} - -// ByteStrings constructs a field that carries a slice of []byte, each of which -// must be UTF-8 encoded text. -func ByteStrings(key string, bss [][]byte) Field { - return Array(key, byteStringsArray(bss)) -} - -// Complex128s constructs a field that carries a slice of complex numbers. -func Complex128s(key string, nums []complex128) Field { - return Array(key, complex128s(nums)) -} - -// Complex64s constructs a field that carries a slice of complex numbers. -func Complex64s(key string, nums []complex64) Field { - return Array(key, complex64s(nums)) -} - -// Durations constructs a field that carries a slice of time.Durations. -func Durations(key string, ds []time.Duration) Field { - return Array(key, durations(ds)) -} - -// Float64s constructs a field that carries a slice of floats. -func Float64s(key string, nums []float64) Field { - return Array(key, float64s(nums)) -} - -// Float32s constructs a field that carries a slice of floats. -func Float32s(key string, nums []float32) Field { - return Array(key, float32s(nums)) -} - -// Ints constructs a field that carries a slice of integers. -func Ints(key string, nums []int) Field { - return Array(key, ints(nums)) -} - -// Int64s constructs a field that carries a slice of integers. -func Int64s(key string, nums []int64) Field { - return Array(key, int64s(nums)) -} - -// Int32s constructs a field that carries a slice of integers. -func Int32s(key string, nums []int32) Field { - return Array(key, int32s(nums)) -} - -// Int16s constructs a field that carries a slice of integers. -func Int16s(key string, nums []int16) Field { - return Array(key, int16s(nums)) -} - -// Int8s constructs a field that carries a slice of integers. -func Int8s(key string, nums []int8) Field { - return Array(key, int8s(nums)) -} - -// Strings constructs a field that carries a slice of strings. -func Strings(key string, ss []string) Field { - return Array(key, stringArray(ss)) -} - -// Times constructs a field that carries a slice of time.Times. -func Times(key string, ts []time.Time) Field { - return Array(key, times(ts)) -} - -// Uints constructs a field that carries a slice of unsigned integers. -func Uints(key string, nums []uint) Field { - return Array(key, uints(nums)) -} - -// Uint64s constructs a field that carries a slice of unsigned integers. -func Uint64s(key string, nums []uint64) Field { - return Array(key, uint64s(nums)) -} - -// Uint32s constructs a field that carries a slice of unsigned integers. -func Uint32s(key string, nums []uint32) Field { - return Array(key, uint32s(nums)) -} - -// Uint16s constructs a field that carries a slice of unsigned integers. -func Uint16s(key string, nums []uint16) Field { - return Array(key, uint16s(nums)) -} - -// Uint8s constructs a field that carries a slice of unsigned integers. -func Uint8s(key string, nums []uint8) Field { - return Array(key, uint8s(nums)) -} - -// Uintptrs constructs a field that carries a slice of pointer addresses. -func Uintptrs(key string, us []uintptr) Field { - return Array(key, uintptrs(us)) -} - -// Errors constructs a field that carries a slice of errors. -func Errors(key string, errs []error) Field { - return Array(key, errArray(errs)) -} - -type bools []bool - -func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bs { - arr.AppendBool(bs[i]) - } - return nil -} - -type byteStringsArray [][]byte - -func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bss { - arr.AppendByteString(bss[i]) - } - return nil -} - -type complex128s []complex128 - -func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex128(nums[i]) - } - return nil -} - -type complex64s []complex64 - -func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex64(nums[i]) - } - return nil -} - -type durations []time.Duration - -func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ds { - arr.AppendDuration(ds[i]) - } - return nil -} - -type float64s []float64 - -func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat64(nums[i]) - } - return nil -} - -type float32s []float32 - -func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat32(nums[i]) - } - return nil -} - -type ints []int - -func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt(nums[i]) - } - return nil -} - -type int64s []int64 - -func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt64(nums[i]) - } - return nil -} - -type int32s []int32 - -func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt32(nums[i]) - } - return nil -} - -type int16s []int16 - -func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt16(nums[i]) - } - return nil -} - -type int8s []int8 - -func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt8(nums[i]) - } - return nil -} - -type stringArray []string - -func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ss { - arr.AppendString(ss[i]) - } - return nil -} - -type times []time.Time - -func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ts { - arr.AppendTime(ts[i]) - } - return nil -} - -type uints []uint - -func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint(nums[i]) - } - return nil -} - -type uint64s []uint64 - -func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint64(nums[i]) - } - return nil -} - -type uint32s []uint32 - -func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint32(nums[i]) - } - return nil -} - -type uint16s []uint16 - -func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint16(nums[i]) - } - return nil -} - -type uint8s []uint8 - -func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint8(nums[i]) - } - return nil -} - -type uintptrs []uintptr - -func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUintptr(nums[i]) - } - return nil -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/buffer/buffer.go b/pkg/c8y/vendor/go.uber.org/zap/buffer/buffer.go deleted file mode 100644 index 7592e8c6..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/buffer/buffer.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package buffer provides a thin wrapper around a byte slice. Unlike the -// standard library's bytes.Buffer, it supports a portion of the strconv -// package's zero-allocation formatters. -package buffer // import "go.uber.org/zap/buffer" - -import "strconv" - -const _size = 1024 // by default, create 1 KiB buffers - -// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so -// the only way to construct one is via a Pool. -type Buffer struct { - bs []byte - pool Pool -} - -// AppendByte writes a single byte to the Buffer. -func (b *Buffer) AppendByte(v byte) { - b.bs = append(b.bs, v) -} - -// AppendString writes a string to the Buffer. -func (b *Buffer) AppendString(s string) { - b.bs = append(b.bs, s...) -} - -// AppendInt appends an integer to the underlying buffer (assuming base 10). -func (b *Buffer) AppendInt(i int64) { - b.bs = strconv.AppendInt(b.bs, i, 10) -} - -// AppendUint appends an unsigned integer to the underlying buffer (assuming -// base 10). -func (b *Buffer) AppendUint(i uint64) { - b.bs = strconv.AppendUint(b.bs, i, 10) -} - -// AppendBool appends a bool to the underlying buffer. -func (b *Buffer) AppendBool(v bool) { - b.bs = strconv.AppendBool(b.bs, v) -} - -// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN -// or +/- Inf. -func (b *Buffer) AppendFloat(f float64, bitSize int) { - b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) -} - -// Len returns the length of the underlying byte slice. -func (b *Buffer) Len() int { - return len(b.bs) -} - -// Cap returns the capacity of the underlying byte slice. -func (b *Buffer) Cap() int { - return cap(b.bs) -} - -// Bytes returns a mutable reference to the underlying byte slice. -func (b *Buffer) Bytes() []byte { - return b.bs -} - -// String returns a string copy of the underlying byte slice. -func (b *Buffer) String() string { - return string(b.bs) -} - -// Reset resets the underlying byte slice. Subsequent writes re-use the slice's -// backing array. -func (b *Buffer) Reset() { - b.bs = b.bs[:0] -} - -// Write implements io.Writer. -func (b *Buffer) Write(bs []byte) (int, error) { - b.bs = append(b.bs, bs...) - return len(bs), nil -} - -// TrimNewline trims any final "\n" byte from the end of the buffer. -func (b *Buffer) TrimNewline() { - if i := len(b.bs) - 1; i >= 0 { - if b.bs[i] == '\n' { - b.bs = b.bs[:i] - } - } -} - -// Free returns the Buffer to its Pool. -// -// Callers must not retain references to the Buffer after calling Free. -func (b *Buffer) Free() { - b.pool.put(b) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/buffer/pool.go b/pkg/c8y/vendor/go.uber.org/zap/buffer/pool.go deleted file mode 100644 index 8fb3e202..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/buffer/pool.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package buffer - -import "sync" - -// A Pool is a type-safe wrapper around a sync.Pool. -type Pool struct { - p *sync.Pool -} - -// NewPool constructs a new Pool. -func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} -} - -// Get retrieves a Buffer from the pool, creating one if necessary. -func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) - buf.Reset() - buf.pool = p - return buf -} - -func (p Pool) put(buf *Buffer) { - p.p.Put(buf) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/check_license.sh b/pkg/c8y/vendor/go.uber.org/zap/check_license.sh deleted file mode 100644 index 345ac8b8..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/check_license.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -e - -ERROR_COUNT=0 -while read -r file -do - case "$(head -1 "${file}")" in - *"Copyright (c) "*" Uber Technologies, Inc.") - # everything's cool - ;; - *) - echo "$file is missing license header." - (( ERROR_COUNT++ )) - ;; - esac -done < <(git ls-files "*\.go") - -exit $ERROR_COUNT diff --git a/pkg/c8y/vendor/go.uber.org/zap/config.go b/pkg/c8y/vendor/go.uber.org/zap/config.go deleted file mode 100644 index 6fe17d9e..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/config.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "sort" - "time" - - "go.uber.org/zap/zapcore" -) - -// SamplingConfig sets a sampling strategy for the logger. Sampling caps the -// global CPU and I/O load that logging puts on your process while attempting -// to preserve a representative subset of your logs. -// -// Values configured here are per-second. See zapcore.NewSampler for details. -type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` -} - -// Config offers a declarative way to construct a logger. It doesn't do -// anything that can't be done with New, Options, and the various -// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to -// toggle common options. -// -// Note that Config intentionally supports only the most common options. More -// unusual logging setups (logging to network connections or message queues, -// splitting output between multiple files, etc.) are possible, but require -// direct use of the zapcore package. For sample code, see the package-level -// BasicConfiguration and AdvancedConfiguration examples. -// -// For an example showing runtime log level changes, see the documentation for -// AtomicLevel. -type Config struct { - // Level is the minimum enabled logging level. Note that this is a dynamic - // level, so calling Config.Level.SetLevel will atomically change the log - // level of all loggers descended from this config. - Level AtomicLevel `json:"level" yaml:"level"` - // Development puts the logger in development mode, which changes the - // behavior of DPanicLevel and takes stacktraces more liberally. - Development bool `json:"development" yaml:"development"` - // DisableCaller stops annotating logs with the calling function's file - // name and line number. By default, all logs are annotated. - DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` - // DisableStacktrace completely disables automatic stacktrace capturing. By - // default, stacktraces are captured for WarnLevel and above logs in - // development and ErrorLevel and above in production. - DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` - // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. - Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` - // Encoding sets the logger's encoding. Valid values are "json" and - // "console", as well as any third-party encodings registered via - // RegisterEncoder. - Encoding string `json:"encoding" yaml:"encoding"` - // EncoderConfig sets options for the chosen encoder. See - // zapcore.EncoderConfig for details. - EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` - // OutputPaths is a list of URLs or file paths to write logging output to. - // See Open for details. - OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` - // ErrorOutputPaths is a list of URLs to write internal logger errors to. - // The default is standard error. - // - // Note that this setting only affects internal errors; for sample code that - // sends error-level logs to a different location from info- and debug-level - // logs, see the package-level AdvancedConfiguration example. - ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` - // InitialFields is a collection of fields to add to the root logger. - InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` -} - -// NewProductionEncoderConfig returns an opinionated EncoderConfig for -// production environments. -func NewProductionEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.EpochTimeEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. -// -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. -func NewProductionConfig() Config { - return Config{ - Level: NewAtomicLevelAt(InfoLevel), - Development: false, - Sampling: &SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: NewProductionEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for -// development environments. -func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - // Keys can be anything except the empty string. - TimeKey: "T", - LevelKey: "L", - NameKey: "N", - CallerKey: "C", - MessageKey: "M", - StacktraceKey: "S", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.CapitalLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. -// -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. -func NewDevelopmentConfig() Config { - return Config{ - Level: NewAtomicLevelAt(DebugLevel), - Development: true, - Encoding: "console", - EncoderConfig: NewDevelopmentEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// Build constructs a logger from the Config and Options. -func (cfg Config) Build(opts ...Option) (*Logger, error) { - enc, err := cfg.buildEncoder() - if err != nil { - return nil, err - } - - sink, errSink, err := cfg.openSinks() - if err != nil { - return nil, err - } - - log := New( - zapcore.NewCore(enc, sink, cfg.Level), - cfg.buildOptions(errSink)..., - ) - if len(opts) > 0 { - log = log.WithOptions(opts...) - } - return log, nil -} - -func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { - opts := []Option{ErrorOutput(errSink)} - - if cfg.Development { - opts = append(opts, Development()) - } - - if !cfg.DisableCaller { - opts = append(opts, AddCaller()) - } - - stackLevel := ErrorLevel - if cfg.Development { - stackLevel = WarnLevel - } - if !cfg.DisableStacktrace { - opts = append(opts, AddStacktrace(stackLevel)) - } - - if cfg.Sampling != nil { - opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) - })) - } - - if len(cfg.InitialFields) > 0 { - fs := make([]Field, 0, len(cfg.InitialFields)) - keys := make([]string, 0, len(cfg.InitialFields)) - for k := range cfg.InitialFields { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - fs = append(fs, Any(k, cfg.InitialFields[k])) - } - opts = append(opts, Fields(fs...)) - } - - return opts -} - -func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { - sink, closeOut, err := Open(cfg.OutputPaths...) - if err != nil { - return nil, nil, err - } - errSink, _, err := Open(cfg.ErrorOutputPaths...) - if err != nil { - closeOut() - return nil, nil, err - } - return sink, errSink, nil -} - -func (cfg Config) buildEncoder() (zapcore.Encoder, error) { - return newEncoder(cfg.Encoding, cfg.EncoderConfig) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/doc.go b/pkg/c8y/vendor/go.uber.org/zap/doc.go deleted file mode 100644 index 8638dd1b..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/doc.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zap provides fast, structured, leveled logging. -// -// For applications that log in the hot path, reflection-based serialization -// and string formatting are prohibitively expensive - they're CPU-intensive -// and make many small allocations. Put differently, using json.Marshal and -// fmt.Fprintf to log tons of interface{} makes your application slow. -// -// Zap takes a different approach. It includes a reflection-free, -// zero-allocation JSON encoder, and the base Logger strives to avoid -// serialization overhead and allocations wherever possible. By building the -// high-level SugaredLogger on that foundation, zap lets users choose when -// they need to count every allocation and when they'd prefer a more familiar, -// loosely typed API. -// -// Choosing a Logger -// -// In contexts where performance is nice, but not critical, use the -// SugaredLogger. It's 4-10x faster than other structured logging packages and -// supports both structured and printf-style logging. Like log15 and go-kit, -// the SugaredLogger's structured logging APIs are loosely typed and accept a -// variadic number of key-value pairs. (For more advanced use cases, they also -// accept strongly typed fields - see the SugaredLogger.With documentation for -// details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") -// -// By default, loggers are unbuffered. However, since zap's low-level APIs -// allow buffering, calling Sync before letting your process exit is a good -// habit. -// -// In the rare contexts where every microsecond and every allocation matter, -// use the Logger. It's even faster than the SugaredLogger and allocates far -// less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) -// -// Choosing between the Logger and SugaredLogger doesn't need to be an -// application-wide decision: converting between the two is simple and -// inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() -// -// Configuring Zap -// -// The simplest way to build a Logger is to use zap's opinionated presets: -// NewExample, NewProduction, and NewDevelopment. These presets build a logger -// with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() -// -// Presets are fine for small projects, but larger projects and organizations -// naturally require a bit more customization. For most users, zap's Config -// struct strikes the right balance between flexibility and convenience. See -// the package-level BasicConfiguration example for sample code. -// -// More unusual configurations (splitting output between files, sending logs -// to a message queue, etc.) are possible, but require direct use of -// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration -// example for sample code. -// -// Extending Zap -// -// The zap package itself is a relatively thin wrapper around the interfaces -// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., -// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an -// exception aggregation service, like Sentry or Rollbar) typically requires -// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core -// interfaces. See the zapcore documentation for details. -// -// Similarly, package authors can use the high-performance Encoder and Core -// implementations in the zapcore package to build their own loggers. -// -// Frequently Asked Questions -// -// An FAQ covering everything from installation errors to design decisions is -// available at https://github.com/uber-go/zap/blob/master/FAQ.md. -package zap // import "go.uber.org/zap" diff --git a/pkg/c8y/vendor/go.uber.org/zap/encoder.go b/pkg/c8y/vendor/go.uber.org/zap/encoder.go deleted file mode 100644 index 2e9d3c34..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/encoder.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "sync" - - "go.uber.org/zap/zapcore" -) - -var ( - errNoEncoderNameSpecified = errors.New("no encoder name specified") - - _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ - "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewConsoleEncoder(encoderConfig), nil - }, - "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewJSONEncoder(encoderConfig), nil - }, - } - _encoderMutex sync.RWMutex -) - -// RegisterEncoder registers an encoder constructor, which the Config struct -// can then reference. By default, the "json" and "console" encoders are -// registered. -// -// Attempting to register an encoder whose name is already taken returns an -// error. -func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { - _encoderMutex.Lock() - defer _encoderMutex.Unlock() - if name == "" { - return errNoEncoderNameSpecified - } - if _, ok := _encoderNameToConstructor[name]; ok { - return fmt.Errorf("encoder already registered for name %q", name) - } - _encoderNameToConstructor[name] = constructor - return nil -} - -func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - _encoderMutex.RLock() - defer _encoderMutex.RUnlock() - if name == "" { - return nil, errNoEncoderNameSpecified - } - constructor, ok := _encoderNameToConstructor[name] - if !ok { - return nil, fmt.Errorf("no encoder registered for name %q", name) - } - return constructor(encoderConfig) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/error.go b/pkg/c8y/vendor/go.uber.org/zap/error.go deleted file mode 100644 index 65982a51..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/error.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "sync" - - "go.uber.org/zap/zapcore" -) - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Error is shorthand for the common idiom NamedError("error", err). -func Error(err error) Field { - return NamedError("error", err) -} - -// NamedError constructs a field that lazily stores err.Error() under the -// provided key. Errors which also implement fmt.Formatter (like those produced -// by github.com/pkg/errors) will also have their verbose representation stored -// under key+"Verbose". If passed a nil error, the field is a no-op. -// -// For the common case in which the key is simply "error", the Error function -// is shorter and less repetitive. -func NamedError(key string, err error) Field { - if err == nil { - return Skip() - } - return Field{Key: key, Type: zapcore.ErrorType, Interface: err} -} - -type errArray []error - -func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - // To represent each error as an object with an "error" attribute and - // potentially an "errorVerbose" attribute, we need to wrap it in a - // type that implements LogObjectMarshaler. To prevent this from - // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) - elem.error = errs[i] - arr.AppendObject(elem) - elem.error = nil - _errArrayElemPool.Put(elem) - } - return nil -} - -type errArrayElem struct { - error -} - -func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { - // Re-use the error field's logic, which supports non-standard error types. - Error(e.error).AddTo(enc) - return nil -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/field.go b/pkg/c8y/vendor/go.uber.org/zap/field.go deleted file mode 100644 index 5130e134..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/field.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "math" - "time" - - "go.uber.org/zap/zapcore" -) - -// Field is an alias for Field. Aliasing this type dramatically -// improves the navigability of this package's API documentation. -type Field = zapcore.Field - -// Skip constructs a no-op field, which is often useful when handling invalid -// inputs in other Field constructors. -func Skip() Field { - return Field{Type: zapcore.SkipType} -} - -// Binary constructs a field that carries an opaque binary blob. -// -// Binary data is serialized in an encoding-appropriate format. For example, -// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, -// use ByteString. -func Binary(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.BinaryType, Interface: val} -} - -// Bool constructs a field that carries a bool. -func Bool(key string, val bool) Field { - var ival int64 - if val { - ival = 1 - } - return Field{Key: key, Type: zapcore.BoolType, Integer: ival} -} - -// ByteString constructs a field that carries UTF-8 encoded text as a []byte. -// To log opaque binary blobs (which aren't necessarily valid UTF-8), use -// Binary. -func ByteString(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} -} - -// Complex128 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex128 to -// interface{}). -func Complex128(key string, val complex128) Field { - return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} -} - -// Complex64 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex64 to -// interface{}). -func Complex64(key string, val complex64) Field { - return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} -} - -// Float64 constructs a field that carries a float64. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float64(key string, val float64) Field { - return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} -} - -// Float32 constructs a field that carries a float32. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float32(key string, val float32) Field { - return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} -} - -// Int constructs a field with the given key and value. -func Int(key string, val int) Field { - return Int64(key, int64(val)) -} - -// Int64 constructs a field with the given key and value. -func Int64(key string, val int64) Field { - return Field{Key: key, Type: zapcore.Int64Type, Integer: val} -} - -// Int32 constructs a field with the given key and value. -func Int32(key string, val int32) Field { - return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} -} - -// Int16 constructs a field with the given key and value. -func Int16(key string, val int16) Field { - return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} -} - -// Int8 constructs a field with the given key and value. -func Int8(key string, val int8) Field { - return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} -} - -// String constructs a field with the given key and value. -func String(key string, val string) Field { - return Field{Key: key, Type: zapcore.StringType, String: val} -} - -// Uint constructs a field with the given key and value. -func Uint(key string, val uint) Field { - return Uint64(key, uint64(val)) -} - -// Uint64 constructs a field with the given key and value. -func Uint64(key string, val uint64) Field { - return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} -} - -// Uint32 constructs a field with the given key and value. -func Uint32(key string, val uint32) Field { - return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} -} - -// Uint16 constructs a field with the given key and value. -func Uint16(key string, val uint16) Field { - return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} -} - -// Uint8 constructs a field with the given key and value. -func Uint8(key string, val uint8) Field { - return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} -} - -// Uintptr constructs a field with the given key and value. -func Uintptr(key string, val uintptr) Field { - return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} -} - -// Reflect constructs a field with the given key and an arbitrary object. It uses -// an encoding-appropriate, reflection-based function to lazily serialize nearly -// any object into the logging context, but it's relatively slow and -// allocation-heavy. Outside tests, Any is always a better choice. -// -// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect -// includes the error message in the final log output. -func Reflect(key string, val interface{}) Field { - return Field{Key: key, Type: zapcore.ReflectType, Interface: val} -} - -// Namespace creates a named, isolated scope within the logger's context. All -// subsequent fields will be added to the new namespace. -// -// This helps prevent key collisions when injecting loggers into sub-components -// or third-party libraries. -func Namespace(key string) Field { - return Field{Key: key, Type: zapcore.NamespaceType} -} - -// Stringer constructs a field with the given key and the output of the value's -// String method. The Stringer's String method is called lazily. -func Stringer(key string, val fmt.Stringer) Field { - return Field{Key: key, Type: zapcore.StringerType, Interface: val} -} - -// Time constructs a Field with the given key and value. The encoder -// controls how the time is serialized. -func Time(key string, val time.Time) Field { - return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} -} - -// Stack constructs a field that stores a stacktrace of the current goroutine -// under provided key. Keep in mind that taking a stacktrace is eager and -// expensive (relatively speaking); this function both makes an allocation and -// takes about two microseconds. -func Stack(key string) Field { - // Returning the stacktrace as a string costs an allocation, but saves us - // from expanding the zapcore.Field union struct to include a byte slice. Since - // taking a stacktrace is already so expensive (~10us), the extra allocation - // is okay. - return String(key, takeStacktrace()) -} - -// Duration constructs a field with the given key and value. The encoder -// controls how the duration is serialized. -func Duration(key string, val time.Duration) Field { - return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} -} - -// Object constructs a field with the given key and ObjectMarshaler. It -// provides a flexible, but still type-safe and efficient, way to add map- or -// struct-like user-defined types to the logging context. The struct's -// MarshalLogObject method is called lazily. -func Object(key string, val zapcore.ObjectMarshaler) Field { - return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} -} - -// Any takes a key and an arbitrary value and chooses the best way to represent -// them as a field, falling back to a reflection-based approach only if -// necessary. -// -// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between -// them. To minimize surprises, []byte values are treated as binary blobs, byte -// values are treated as uint8, and runes are always treated as integers. -func Any(key string, value interface{}) Field { - switch val := value.(type) { - case zapcore.ObjectMarshaler: - return Object(key, val) - case zapcore.ArrayMarshaler: - return Array(key, val) - case bool: - return Bool(key, val) - case []bool: - return Bools(key, val) - case complex128: - return Complex128(key, val) - case []complex128: - return Complex128s(key, val) - case complex64: - return Complex64(key, val) - case []complex64: - return Complex64s(key, val) - case float64: - return Float64(key, val) - case []float64: - return Float64s(key, val) - case float32: - return Float32(key, val) - case []float32: - return Float32s(key, val) - case int: - return Int(key, val) - case []int: - return Ints(key, val) - case int64: - return Int64(key, val) - case []int64: - return Int64s(key, val) - case int32: - return Int32(key, val) - case []int32: - return Int32s(key, val) - case int16: - return Int16(key, val) - case []int16: - return Int16s(key, val) - case int8: - return Int8(key, val) - case []int8: - return Int8s(key, val) - case string: - return String(key, val) - case []string: - return Strings(key, val) - case uint: - return Uint(key, val) - case []uint: - return Uints(key, val) - case uint64: - return Uint64(key, val) - case []uint64: - return Uint64s(key, val) - case uint32: - return Uint32(key, val) - case []uint32: - return Uint32s(key, val) - case uint16: - return Uint16(key, val) - case []uint16: - return Uint16s(key, val) - case uint8: - return Uint8(key, val) - case []byte: - return Binary(key, val) - case uintptr: - return Uintptr(key, val) - case []uintptr: - return Uintptrs(key, val) - case time.Time: - return Time(key, val) - case []time.Time: - return Times(key, val) - case time.Duration: - return Duration(key, val) - case []time.Duration: - return Durations(key, val) - case error: - return NamedError(key, val) - case []error: - return Errors(key, val) - case fmt.Stringer: - return Stringer(key, val) - default: - return Reflect(key, val) - } -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/flag.go b/pkg/c8y/vendor/go.uber.org/zap/flag.go deleted file mode 100644 index 13128750..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/flag.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "flag" - - "go.uber.org/zap/zapcore" -) - -// LevelFlag uses the standard library's flag.Var to declare a global flag -// with the specified name, default, and usage guidance. The returned value is -// a pointer to the value of the flag. -// -// If you don't want to use the flag package's global state, you can use any -// non-nil *Level as a flag.Value with your own *flag.FlagSet. -func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { - lvl := defaultLevel - flag.Var(&lvl, name, usage) - return &lvl -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/glide.lock b/pkg/c8y/vendor/go.uber.org/zap/glide.lock deleted file mode 100644 index 881b462c..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/glide.lock +++ /dev/null @@ -1,76 +0,0 @@ -hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 -updated: 2017-07-22T18:06:49.598185334-07:00 -imports: -- name: go.uber.org/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf -- name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a -testImports: -- name: github.com/apex/log - version: d9b960447bfa720077b2da653cc79e533455b499 - subpackages: - - handlers/json -- name: github.com/axw/gocov - version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 - subpackages: - - gocov -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/fatih/color - version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 -- name: github.com/go-kit/kit - version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d - subpackages: - - log -- name: github.com/go-logfmt/logfmt - version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-stack/stack - version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b -- name: github.com/golang/lint - version: c5fb716d6688a859aae56d26d3e6070808df29f7 - subpackages: - - golint -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/mattn/go-colorable - version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a -- name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe -- name: github.com/mattn/goveralls - version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 -- name: github.com/pborman/uuid - version: e790cca94e6cc75c7064b1332e63811d4aae1a53 -- name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/rs/zerolog - version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 -- name: github.com/satori/go.uuid - version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b -- name: github.com/sirupsen/logrus - version: 7dd06bf38e1e13df288d471a57d5adbac106be9e -- name: github.com/stretchr/testify - version: f6abca593680b2315d2075e0f5e2a9751e3f431a - subpackages: - - assert - - require -- name: go.pedge.io/lion - version: 87958e8713f1fa138d993087133b97e976642159 -- name: golang.org/x/sys - version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f - subpackages: - - unix -- name: golang.org/x/tools - version: 496819729719f9d07692195e0a94d6edd2251389 - subpackages: - - cover -- name: gopkg.in/inconshreveable/log15.v2 - version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f - subpackages: - - stack - - term diff --git a/pkg/c8y/vendor/go.uber.org/zap/glide.yaml b/pkg/c8y/vendor/go.uber.org/zap/glide.yaml deleted file mode 100644 index 94412594..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/glide.yaml +++ /dev/null @@ -1,35 +0,0 @@ -package: go.uber.org/zap -license: MIT -import: -- package: go.uber.org/atomic - version: ^1 -- package: go.uber.org/multierr - version: ^1 -testImport: -- package: github.com/satori/go.uuid -- package: github.com/sirupsen/logrus -- package: github.com/apex/log - subpackages: - - handlers/json -- package: github.com/go-kit/kit - subpackages: - - log -- package: github.com/stretchr/testify - subpackages: - - assert - - require -- package: gopkg.in/inconshreveable/log15.v2 -- package: github.com/mattn/goveralls -- package: github.com/pborman/uuid -- package: github.com/pkg/errors -- package: go.pedge.io/lion -- package: github.com/rs/zerolog -- package: golang.org/x/tools - subpackages: - - cover -- package: github.com/golang/lint - subpackages: - - golint -- package: github.com/axw/gocov - subpackages: - - gocov diff --git a/pkg/c8y/vendor/go.uber.org/zap/global.go b/pkg/c8y/vendor/go.uber.org/zap/global.go deleted file mode 100644 index d02232e3..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/global.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "bytes" - "fmt" - "log" - "os" - "sync" - - "go.uber.org/zap/zapcore" -) - -const ( - _stdLogDefaultDepth = 2 - _loggerWriterDepth = 2 - _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + - "https://github.com/uber-go/zap/issues/new and reference this error: %v" -) - -var ( - _globalMu sync.RWMutex - _globalL = NewNop() - _globalS = _globalL.Sugar() -) - -// L returns the global Logger, which can be reconfigured with ReplaceGlobals. -// It's safe for concurrent use. -func L() *Logger { - _globalMu.RLock() - l := _globalL - _globalMu.RUnlock() - return l -} - -// S returns the global SugaredLogger, which can be reconfigured with -// ReplaceGlobals. It's safe for concurrent use. -func S() *SugaredLogger { - _globalMu.RLock() - s := _globalS - _globalMu.RUnlock() - return s -} - -// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a -// function to restore the original values. It's safe for concurrent use. -func ReplaceGlobals(logger *Logger) func() { - _globalMu.Lock() - prev := _globalL - _globalL = logger - _globalS = logger.Sugar() - _globalMu.Unlock() - return func() { ReplaceGlobals(prev) } -} - -// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at -// InfoLevel. To redirect the standard library's package-global logging -// functions, use RedirectStdLog instead. -func NewStdLog(l *Logger) *log.Logger { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - f := logger.Info - return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) -} - -// NewStdLogAt returns *log.Logger which writes to supplied zap logger at -// required level. -func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil -} - -// RedirectStdLog redirects output from the standard library's package-global -// logger to the supplied logger at InfoLevel. Since zap already handles caller -// annotations, timestamps, etc., it automatically disables the standard -// library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLog(l *Logger) func() { - f, err := redirectStdLogAt(l, InfoLevel) - if err != nil { - // Can't get here, since passing InfoLevel to redirectStdLogAt always - // works. - panic(fmt.Sprintf(_programmerErrorTemplate, err)) - } - return f -} - -// RedirectStdLogAt redirects output from the standard library's package-global -// logger to the supplied logger at the specified level. Since zap already -// handles caller annotations, timestamps, etc., it automatically disables the -// standard library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - return redirectStdLogAt(l, level) -} - -func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - flags := log.Flags() - prefix := log.Prefix() - log.SetFlags(0) - log.SetPrefix("") - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - log.SetOutput(&loggerWriter{logFunc}) - return func() { - log.SetFlags(flags) - log.SetPrefix(prefix) - log.SetOutput(os.Stderr) - }, nil -} - -func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { - switch lvl { - case DebugLevel: - return logger.Debug, nil - case InfoLevel: - return logger.Info, nil - case WarnLevel: - return logger.Warn, nil - case ErrorLevel: - return logger.Error, nil - case DPanicLevel: - return logger.DPanic, nil - case PanicLevel: - return logger.Panic, nil - case FatalLevel: - return logger.Fatal, nil - } - return nil, fmt.Errorf("unrecognized level: %q", lvl) -} - -type loggerWriter struct { - logFunc func(msg string, fields ...Field) -} - -func (l *loggerWriter) Write(p []byte) (int, error) { - p = bytes.TrimSpace(p) - l.logFunc(string(p)) - return len(p), nil -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/http_handler.go b/pkg/c8y/vendor/go.uber.org/zap/http_handler.go deleted file mode 100644 index 1b0ecaca..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/http_handler.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "encoding/json" - "fmt" - "net/http" - - "go.uber.org/zap/zapcore" -) - -// ServeHTTP is a simple JSON endpoint that can report on or change the current -// logging level. -// -// GET requests return a JSON description of the current logging level. PUT -// requests change the logging level and expect a payload like: -// {"level":"info"} -// -// It's perfectly safe to change the logging level while a program is running. -func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { - type errorResponse struct { - Error string `json:"error"` - } - type payload struct { - Level *zapcore.Level `json:"level"` - } - - enc := json.NewEncoder(w) - - switch r.Method { - - case http.MethodGet: - current := lvl.Level() - enc.Encode(payload{Level: ¤t}) - - case http.MethodPut: - var req payload - - if errmess := func() string { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return fmt.Sprintf("Request body must be well-formed JSON: %v", err) - } - if req.Level == nil { - return "Must specify a logging level." - } - return "" - }(); errmess != "" { - w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: errmess}) - return - } - - lvl.SetLevel(*req.Level) - enc.Encode(req) - - default: - w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ - Error: "Only GET and PUT are supported.", - }) - } -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/pkg/c8y/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go deleted file mode 100644 index dad583aa..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package bufferpool houses zap's shared internal buffer pool. Third-party -// packages can recreate the same functionality with buffers.NewPool. -package bufferpool - -import "go.uber.org/zap/buffer" - -var ( - _pool = buffer.NewPool() - // Get retrieves a buffer from the pool, creating one if necessary. - Get = _pool.Get -) diff --git a/pkg/c8y/vendor/go.uber.org/zap/internal/color/color.go b/pkg/c8y/vendor/go.uber.org/zap/internal/color/color.go deleted file mode 100644 index c4d5d02a..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/internal/color/color.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package color adds coloring functionality for TTY output. -package color - -import "fmt" - -// Foreground colors. -const ( - Black Color = iota + 30 - Red - Green - Yellow - Blue - Magenta - Cyan - White -) - -// Color represents a text color. -type Color uint8 - -// Add adds the coloring to the given string. -func (c Color) Add(s string) string { - return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/internal/exit/exit.go b/pkg/c8y/vendor/go.uber.org/zap/internal/exit/exit.go deleted file mode 100644 index dfc5b05f..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/internal/exit/exit.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package exit provides stubs so that unit tests can exercise code that calls -// os.Exit(1). -package exit - -import "os" - -var real = func() { os.Exit(1) } - -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() -} - -// A StubbedExit is a testing fake for os.Exit. -type StubbedExit struct { - Exited bool - prev func() -} - -// Stub substitutes a fake for the call to os.Exit(1). -func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit - return s -} - -// WithStub runs the supplied function with Exit stubbed. It returns the stub -// used, so that users can test whether the process would have crashed. -func WithStub(f func()) *StubbedExit { - s := Stub() - defer s.Unstub() - f() - return s -} - -// Unstub restores the previous exit function. -func (se *StubbedExit) Unstub() { - real = se.prev -} - -func (se *StubbedExit) exit() { - se.Exited = true -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/level.go b/pkg/c8y/vendor/go.uber.org/zap/level.go deleted file mode 100644 index 3567a9a1..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/level.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "go.uber.org/atomic" - "go.uber.org/zap/zapcore" -) - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel = zapcore.DebugLevel - // InfoLevel is the default logging priority. - InfoLevel = zapcore.InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel = zapcore.WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel = zapcore.ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel = zapcore.DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel = zapcore.PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel = zapcore.FatalLevel -) - -// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with -// an anonymous function. -// -// It's particularly useful when splitting log output between different -// outputs (e.g., standard error and standard out). For sample code, see the -// package-level AdvancedConfiguration example. -type LevelEnablerFunc func(zapcore.Level) bool - -// Enabled calls the wrapped function. -func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } - -// An AtomicLevel is an atomically changeable, dynamic logging level. It lets -// you safely change the log level of a tree of loggers (the root logger and -// any children created by adding context) at runtime. -// -// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to -// alter its level. -// -// AtomicLevels must be created with the NewAtomicLevel constructor to allocate -// their internal atomic pointer. -type AtomicLevel struct { - l *atomic.Int32 -} - -// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging -// enabled. -func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } -} - -// NewAtomicLevelAt is a convenience function that creates an AtomicLevel -// and then calls SetLevel with the given level. -func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { - a := NewAtomicLevel() - a.SetLevel(l) - return a -} - -// Enabled implements the zapcore.LevelEnabler interface, which allows the -// AtomicLevel to be used in place of traditional static levels. -func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { - return lvl.Level().Enabled(l) -} - -// Level returns the minimum enabled log level. -func (lvl AtomicLevel) Level() zapcore.Level { - return zapcore.Level(int8(lvl.l.Load())) -} - -// SetLevel alters the logging level. -func (lvl AtomicLevel) SetLevel(l zapcore.Level) { - lvl.l.Store(int32(l)) -} - -// String returns the string representation of the underlying Level. -func (lvl AtomicLevel) String() string { - return lvl.Level().String() -} - -// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text -// representations as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl *AtomicLevel) UnmarshalText(text []byte) error { - if lvl.l == nil { - lvl.l = &atomic.Int32{} - } - - var l zapcore.Level - if err := l.UnmarshalText(text); err != nil { - return err - } - - lvl.SetLevel(l) - return nil -} - -// MarshalText marshals the AtomicLevel to a byte slice. It uses the same -// text representation as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl AtomicLevel) MarshalText() (text []byte, err error) { - return lvl.Level().MarshalText() -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/logger.go b/pkg/c8y/vendor/go.uber.org/zap/logger.go deleted file mode 100644 index e065634d..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/logger.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io" - "os" - "runtime" - "strings" - "time" - - "go.uber.org/zap/zapcore" -) - -// A Logger provides fast, leveled, structured logging. All methods are safe -// for concurrent use. -// -// The Logger is designed for contexts in which every microsecond and every -// allocation matters, so its API intentionally favors performance and type -// safety over brevity. For most applications, the SugaredLogger strikes a -// better balance between performance and ergonomics. -type Logger struct { - core zapcore.Core - - development bool - name string - errorOutput zapcore.WriteSyncer - - addCaller bool - addStack zapcore.LevelEnabler - - callerSkip int -} - -// New constructs a new Logger from the provided zapcore.Core and Options. If -// the passed zapcore.Core is nil, it falls back to using a no-op -// implementation. -// -// This is the most flexible way to construct a Logger, but also the most -// verbose. For typical use cases, the highly-opinionated presets -// (NewProduction, NewDevelopment, and NewExample) or the Config struct are -// more convenient. -// -// For sample code, see the package-level AdvancedConfiguration example. -func New(core zapcore.Core, options ...Option) *Logger { - if core == nil { - return NewNop() - } - log := &Logger{ - core: core, - errorOutput: zapcore.Lock(os.Stderr), - addStack: zapcore.FatalLevel + 1, - } - return log.WithOptions(options...) -} - -// NewNop returns a no-op Logger. It never writes out logs or internal errors, -// and it never runs user-defined hooks. -// -// Using WithOptions to replace the Core or error output of a no-op Logger can -// re-enable logging. -func NewNop() *Logger { - return &Logger{ - core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(io.Discard), - addStack: zapcore.FatalLevel + 1, - } -} - -// NewProduction builds a sensible production Logger that writes InfoLevel and -// above logs to standard error as JSON. -// -// It's a shortcut for NewProductionConfig().Build(...Option). -func NewProduction(options ...Option) (*Logger, error) { - return NewProductionConfig().Build(options...) -} - -// NewDevelopment builds a development Logger that writes DebugLevel and above -// logs to standard error in a human-friendly format. -// -// It's a shortcut for NewDevelopmentConfig().Build(...Option). -func NewDevelopment(options ...Option) (*Logger, error) { - return NewDevelopmentConfig().Build(options...) -} - -// NewExample builds a Logger that's designed for use in zap's testable -// examples. It writes DebugLevel and above logs to standard out as JSON, but -// omits the timestamp and calling function to keep example output -// short and deterministic. -func NewExample(options ...Option) *Logger { - encoderCfg := zapcore.EncoderConfig{ - MessageKey: "msg", - LevelKey: "level", - NameKey: "logger", - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - } - core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) - return New(core).WithOptions(options...) -} - -// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, -// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a -// single application to use both Loggers and SugaredLoggers, converting -// between them on the boundaries of performance-sensitive code. -func (log *Logger) Sugar() *SugaredLogger { - core := log.clone() - core.callerSkip += 2 - return &SugaredLogger{core} -} - -// Named adds a new path segment to the logger's name. Segments are joined by -// periods. By default, Loggers are unnamed. -func (log *Logger) Named(s string) *Logger { - if s == "" { - return log - } - l := log.clone() - if log.name == "" { - l.name = s - } else { - l.name = strings.Join([]string{l.name, s}, ".") - } - return l -} - -// WithOptions clones the current Logger, applies the supplied Options, and -// returns the resulting Logger. It's safe to use concurrently. -func (log *Logger) WithOptions(opts ...Option) *Logger { - c := log.clone() - for _, opt := range opts { - opt.apply(c) - } - return c -} - -// With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. -func (log *Logger) With(fields ...Field) *Logger { - if len(fields) == 0 { - return log - } - l := log.clone() - l.core = l.core.With(fields) - return l -} - -// Check returns a CheckedEntry if logging a message at the specified level -// is enabled. It's a completely optional optimization; in high-performance -// applications, Check can help avoid allocating a slice to hold fields. -func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - return log.check(lvl, msg) -} - -// Debug logs a message at DebugLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Debug(msg string, fields ...Field) { - if ce := log.check(DebugLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Info logs a message at InfoLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Info(msg string, fields ...Field) { - if ce := log.check(InfoLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Warn logs a message at WarnLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Warn(msg string, fields ...Field) { - if ce := log.check(WarnLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Error logs a message at ErrorLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Error(msg string, fields ...Field) { - if ce := log.check(ErrorLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// DPanic logs a message at DPanicLevel. The message includes any fields -// passed at the log site, as well as any fields accumulated on the logger. -// -// If the logger is in development mode, it then panics (DPanic means -// "development panic"). This is useful for catching errors that are -// recoverable, but shouldn't ever happen. -func (log *Logger) DPanic(msg string, fields ...Field) { - if ce := log.check(DPanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Panic logs a message at PanicLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then panics, even if logging at PanicLevel is disabled. -func (log *Logger) Panic(msg string, fields ...Field) { - if ce := log.check(PanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Fatal logs a message at FatalLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then calls os.Exit(1), even if logging at FatalLevel is -// disabled. -func (log *Logger) Fatal(msg string, fields ...Field) { - if ce := log.check(FatalLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Sync calls the underlying Core's Sync method, flushing any buffered log -// entries. Applications should take care to call Sync before exiting. -func (log *Logger) Sync() error { - return log.core.Sync() -} - -// Core returns the Logger's underlying zapcore.Core. -func (log *Logger) Core() zapcore.Core { - return log.core -} - -func (log *Logger) clone() *Logger { - copy := *log - return © -} - -func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). - const callerSkipOffset = 2 - - // Create basic checked entry thru the core; this will be non-nil if the - // log message will actually be written somewhere. - ent := zapcore.Entry{ - LoggerName: log.name, - Time: time.Now(), - Level: lvl, - Message: msg, - } - ce := log.core.Check(ent, nil) - willWrite := ce != nil - - // Set up any required terminal behavior. - switch ent.Level { - case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) - case zapcore.FatalLevel: - ce = ce.Should(ent, zapcore.WriteThenFatal) - case zapcore.DPanicLevel: - if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) - } - } - - // Only do further annotation if we're going to write this message; checked - // entries that exist only for terminal behavior don't benefit from - // annotation. - if !willWrite { - return ce - } - - // Thread the error output through to the CheckedEntry. - ce.ErrorOutput = log.errorOutput - if log.addCaller { - ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) - if !ce.Entry.Caller.Defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) - log.errorOutput.Sync() - } - } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = Stack("").String - } - - return ce -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/options.go b/pkg/c8y/vendor/go.uber.org/zap/options.go deleted file mode 100644 index 7a6b0fca..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/options.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import "go.uber.org/zap/zapcore" - -// An Option configures a Logger. -type Option interface { - apply(*Logger) -} - -// optionFunc wraps a func so it satisfies the Option interface. -type optionFunc func(*Logger) - -func (f optionFunc) apply(log *Logger) { - f(log) -} - -// WrapCore wraps or replaces the Logger's underlying zapcore.Core. -func WrapCore(f func(zapcore.Core) zapcore.Core) Option { - return optionFunc(func(log *Logger) { - log.core = f(log.core) - }) -} - -// Hooks registers functions which will be called each time the Logger writes -// out an Entry. Repeated use of Hooks is additive. -// -// Hooks are useful for simple side effects, like capturing metrics for the -// number of emitted logs. More complex side effects, including anything that -// requires access to the Entry's structured fields, should be implemented as -// a zapcore.Core instead. See zapcore.RegisterHooks for details. -func Hooks(hooks ...func(zapcore.Entry) error) Option { - return optionFunc(func(log *Logger) { - log.core = zapcore.RegisterHooks(log.core, hooks...) - }) -} - -// Fields adds fields to the Logger. -func Fields(fs ...Field) Option { - return optionFunc(func(log *Logger) { - log.core = log.core.With(fs) - }) -} - -// ErrorOutput sets the destination for errors generated by the Logger. Note -// that this option only affects internal errors; for sample code that sends -// error-level logs to a different location from info- and debug-level logs, -// see the package-level AdvancedConfiguration example. -// -// The supplied WriteSyncer must be safe for concurrent use. The Open and -// zapcore.Lock functions are the simplest ways to protect files with a mutex. -func ErrorOutput(w zapcore.WriteSyncer) Option { - return optionFunc(func(log *Logger) { - log.errorOutput = w - }) -} - -// Development puts the logger in development mode, which makes DPanic-level -// logs panic instead of simply logging an error. -func Development() Option { - return optionFunc(func(log *Logger) { - log.development = true - }) -} - -// AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. -func AddCaller() Option { - return optionFunc(func(log *Logger) { - log.addCaller = true - }) -} - -// AddCallerSkip increases the number of callers skipped by caller annotation -// (as enabled by the AddCaller option). When building wrappers around the -// Logger and SugaredLogger, supplying this Option prevents zap from always -// reporting the wrapper code as the caller. -func AddCallerSkip(skip int) Option { - return optionFunc(func(log *Logger) { - log.callerSkip += skip - }) -} - -// AddStacktrace configures the Logger to record a stack trace for all messages at -// or above a given level. -func AddStacktrace(lvl zapcore.LevelEnabler) Option { - return optionFunc(func(log *Logger) { - log.addStack = lvl - }) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/sink.go b/pkg/c8y/vendor/go.uber.org/zap/sink.go deleted file mode 100644 index ff0becfe..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/sink.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "io" - "net/url" - "os" - "strings" - "sync" - - "go.uber.org/zap/zapcore" -) - -const schemeFile = "file" - -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} - -// Sink defines the interface to write to and close logger destinations. -type Sink interface { - zapcore.WriteSyncer - io.Closer -} - -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - -type errSinkNotFound struct { - scheme string -} - -func (e *errSinkNotFound) Error() string { - return fmt.Sprintf("no sink found for scheme %q", e.scheme) -} - -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - if scheme == "" { - return errors.New("can't register a sink factory for empty string") - } - normalized, err := normalizeScheme(scheme) - if err != nil { - return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) - } - if _, ok := _sinkFactories[normalized]; ok { - return fmt.Errorf("sink factory already registered for scheme %q", normalized) - } - _sinkFactories[normalized] = factory - return nil -} - -func newSink(rawURL string) (Sink, error) { - u, err := url.Parse(rawURL) - if err != nil { - return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) - } - if u.Scheme == "" { - u.Scheme = schemeFile - } - - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() - if !ok { - return nil, &errSinkNotFound{u.Scheme} - } - return factory(u) -} - -func newFileSink(u *url.URL) (Sink, error) { - if u.User != nil { - return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) - } - if u.Fragment != "" { - return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) - } - if u.RawQuery != "" { - return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) - } - // Error messages are better if we check hostname and port separately. - if u.Port() != "" { - return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) - } - if hn := u.Hostname(); hn != "" && hn != "localhost" { - return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) - } - switch u.Path { - case "stdout": - return nopCloserSink{os.Stdout}, nil - case "stderr": - return nopCloserSink{os.Stderr}, nil - } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) -} - -func normalizeScheme(s string) (string, error) { - // https://tools.ietf.org/html/rfc3986#section-3.1 - s = strings.ToLower(s) - if first := s[0]; 'a' > first || 'z' < first { - return "", errors.New("must start with a letter") - } - for i := 1; i < len(s); i++ { // iterate over bytes, not runes - c := s[i] - switch { - case 'a' <= c && c <= 'z': - continue - case '0' <= c && c <= '9': - continue - case c == '.' || c == '+' || c == '-': - continue - } - return "", fmt.Errorf("may not contain %q", c) - } - return s, nil -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/stacktrace.go b/pkg/c8y/vendor/go.uber.org/zap/stacktrace.go deleted file mode 100644 index 100fac21..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/stacktrace.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "runtime" - "strings" - "sync" - - "go.uber.org/zap/internal/bufferpool" -) - -const _zapPackage = "go.uber.org/zap" - -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } - - // We add "." and "/" suffixes to the package name to ensure we only match - // the exact package and not any package with the same prefix. - _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") - _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) -) - -func takeStacktrace() string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Counters and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) - } - - i := 0 - skipZapFrames := true // skip all consecutive zap frames at the beginning. - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) - - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if skipZapFrames && isZapFrame(frame.Function) { - continue - } else { - skipZapFrames = false - } - - if i != 0 { - buffer.AppendByte('\n') - } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) - } - - return buffer.String() -} - -func isZapFrame(function string) bool { - for _, prefix := range _zapStacktracePrefixes { - if strings.HasPrefix(function, prefix) { - return true - } - } - - // We can't use a prefix match here since the location of the vendor - // directory affects the prefix. Instead we do a contains match. - for _, contains := range _zapStacktraceVendorContains { - if strings.Contains(function, contains) { - return true - } - } - - return false -} - -type programCounters struct { - pcs []uintptr -} - -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} -} - -func addPrefix(prefix string, ss ...string) []string { - withPrefix := make([]string, len(ss)) - for i, s := range ss { - withPrefix[i] = prefix + s - } - return withPrefix -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/sugar.go b/pkg/c8y/vendor/go.uber.org/zap/sugar.go deleted file mode 100644 index 77ca227f..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/sugar.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -const ( - _oddNumberErrMsg = "Ignored key without a value." - _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." -) - -// A SugaredLogger wraps the base Logger functionality in a slower, but less -// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar -// method. -// -// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. -type SugaredLogger struct { - base *Logger -} - -// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring -// is quite inexpensive, so it's reasonable for a single application to use -// both Loggers and SugaredLoggers, converting between them on the boundaries -// of performance-sensitive code. -func (s *SugaredLogger) Desugar() *Logger { - base := s.base.clone() - base.callerSkip -= 2 - return base -} - -// Named adds a sub-scope to the logger's name. See Logger.Named for details. -func (s *SugaredLogger) Named(name string) *SugaredLogger { - return &SugaredLogger{base: s.base.Named(name)} -} - -// With adds a variadic number of fields to the logging context. It accepts a -// mix of strongly-typed Field objects and loosely-typed key-value pairs. When -// processing pairs, the first element of the pair is used as the field key -// and the second as the field value. -// -// For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) -// is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) -// -// Note that the keys in key-value pairs should be strings. In development, -// passing a non-string key panics. In production, the logger is more -// forgiving: a separate error is logged, but the key-value pair is skipped -// and execution continues. Passing an orphaned key triggers similar behavior: -// panics in development and errors in production. -func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { - return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} -} - -// Debug uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Debug(args ...interface{}) { - s.log(DebugLevel, "", args, nil) -} - -// Info uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Info(args ...interface{}) { - s.log(InfoLevel, "", args, nil) -} - -// Warn uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Warn(args ...interface{}) { - s.log(WarnLevel, "", args, nil) -} - -// Error uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Error(args ...interface{}) { - s.log(ErrorLevel, "", args, nil) -} - -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanic(args ...interface{}) { - s.log(DPanicLevel, "", args, nil) -} - -// Panic uses fmt.Sprint to construct and log a message, then panics. -func (s *SugaredLogger) Panic(args ...interface{}) { - s.log(PanicLevel, "", args, nil) -} - -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. -func (s *SugaredLogger) Fatal(args ...interface{}) { - s.log(FatalLevel, "", args, nil) -} - -// Debugf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Debugf(template string, args ...interface{}) { - s.log(DebugLevel, template, args, nil) -} - -// Infof uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Infof(template string, args ...interface{}) { - s.log(InfoLevel, template, args, nil) -} - -// Warnf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Warnf(template string, args ...interface{}) { - s.log(WarnLevel, template, args, nil) -} - -// Errorf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Errorf(template string, args ...interface{}) { - s.log(ErrorLevel, template, args, nil) -} - -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { - s.log(DPanicLevel, template, args, nil) -} - -// Panicf uses fmt.Sprintf to log a templated message, then panics. -func (s *SugaredLogger) Panicf(template string, args ...interface{}) { - s.log(PanicLevel, template, args, nil) -} - -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. -func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { - s.log(FatalLevel, template, args, nil) -} - -// Debugw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -// -// When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) -func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { - s.log(DebugLevel, msg, nil, keysAndValues) -} - -// Infow logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { - s.log(InfoLevel, msg, nil, keysAndValues) -} - -// Warnw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { - s.log(WarnLevel, msg, nil, keysAndValues) -} - -// Errorw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { - s.log(ErrorLevel, msg, nil, keysAndValues) -} - -// DPanicw logs a message with some additional context. In development, the -// logger then panics. (See DPanicLevel for details.) The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { - s.log(DPanicLevel, msg, nil, keysAndValues) -} - -// Panicw logs a message with some additional context, then panics. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { - s.log(PanicLevel, msg, nil, keysAndValues) -} - -// Fatalw logs a message with some additional context, then calls os.Exit. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { - s.log(FatalLevel, msg, nil, keysAndValues) -} - -// Sync flushes any buffered log entries. -func (s *SugaredLogger) Sync() error { - return s.base.Sync() -} - -func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { - // If logging at this level is completely disabled, skip the overhead of - // string formatting. - if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { - return - } - - // Format with Sprint, Sprintf, or neither. - msg := template - if msg == "" && len(fmtArgs) > 0 { - msg = fmt.Sprint(fmtArgs...) - } else if msg != "" && len(fmtArgs) > 0 { - msg = fmt.Sprintf(template, fmtArgs...) - } - - if ce := s.base.Check(lvl, msg); ce != nil { - ce.Write(s.sweetenFields(context)...) - } -} - -func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { - if len(args) == 0 { - return nil - } - - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs - - for i := 0; i < len(args); { - // This is a strongly-typed field. Consume it and move on. - if f, ok := args[i].(Field); ok { - fields = append(fields, f) - i++ - continue - } - - // Make sure this element isn't a dangling key. - if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) - break - } - - // Consume this value and the next, treating them as a key-value pair. If the - // key isn't a string, add this pair to the slice of invalid pairs. - key, val := args[i], args[i+1] - if keyStr, ok := key.(string); !ok { - // Subsequent errors are likely, so allocate once up front. - if cap(invalid) == 0 { - invalid = make(invalidPairs, 0, len(args)/2) - } - invalid = append(invalid, invalidPair{i, key, val}) - } else { - fields = append(fields, Any(keyStr, val)) - } - i += 2 - } - - // If we encountered any invalid key-value pairs, log an error. - if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) - } - return fields -} - -type invalidPair struct { - position int - key, value interface{} -} - -func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddInt64("position", int64(p.position)) - Any("key", p.key).AddTo(enc) - Any("value", p.value).AddTo(enc) - return nil -} - -type invalidPairs []invalidPair - -func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { - var err error - for i := range ps { - err = multierr.Append(err, enc.AppendObject(ps[i])) - } - return err -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/time.go b/pkg/c8y/vendor/go.uber.org/zap/time.go deleted file mode 100644 index c5a1f162..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/time.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import "time" - -func timeToMillis(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/writer.go b/pkg/c8y/vendor/go.uber.org/zap/writer.go deleted file mode 100644 index dba3770e..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/writer.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -// Open is a high-level wrapper that takes a variadic number of URLs, opens or -// creates each of the specified resources, and combines them into a locked -// WriteSyncer. It also returns any error encountered and a function to close -// any opened files. -// -// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a -// scheme and URLs with the "file" scheme. Third-party code may register -// factories for other schemes using RegisterSink. -// -// URLs with the "file" scheme must use absolute paths on the local -// filesystem. No user, password, port, fragments, or query parameters are -// allowed, and the hostname must be empty or "localhost". -// -// Since it's common to write logs to the local filesystem, URLs without a -// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without -// a scheme, the special paths "stdout" and "stderr" are interpreted as -// os.Stdout and os.Stderr. When specified without a scheme, relative file -// paths also work. -func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) - if err != nil { - return nil, nil, err - } - - writer := CombineWriteSyncers(writers...) - return writer, close, nil -} - -func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { - writers := make([]zapcore.WriteSyncer, 0, len(paths)) - closers := make([]io.Closer, 0, len(paths)) - close := func() { - for _, c := range closers { - c.Close() - } - } - - var openErr error - for _, path := range paths { - sink, err := newSink(path) - if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) - continue - } - writers = append(writers, sink) - closers = append(closers, sink) - } - if openErr != nil { - close() - return writers, nil, openErr - } - - return writers, close, nil -} - -// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a -// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op -// WriteSyncer. -// -// It's provided purely as a convenience; the result is no different from -// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. -func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { - if len(writers) == 0 { - return zapcore.AddSync(io.Discard) - } - return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/console_encoder.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/console_encoder.go deleted file mode 100644 index b7875966..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "sync" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} - -func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) -} - -func putSliceEncoder(e *sliceArrayEncoder) { - e.elems = e.elems[:0] - _sliceEncoderPool.Put(e) -} - -type consoleEncoder struct { - *jsonEncoder -} - -// NewConsoleEncoder creates an encoder whose output is designed for human - -// rather than machine - consumption. It serializes the core log entry data -// (message, level, timestamp, etc.) in a plain-text format and leaves the -// structured context as JSON. -// -// Note that although the console encoder doesn't use the keys specified in the -// encoder configuration, it will omit any element whose key is set to the empty -// string. -func NewConsoleEncoder(cfg EncoderConfig) Encoder { - return consoleEncoder{newJSONEncoder(cfg, true)} -} - -func (c consoleEncoder) Clone() Encoder { - return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} -} - -func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - line := bufferpool.Get() - - // We don't want the entry's metadata to be quoted and escaped (if it's - // encoded as strings), which means that we can't use the JSON encoder. The - // simplest option is to use the memory encoder and fmt.Fprint. - // - // If this ever becomes a performance bottleneck, we can implement - // ArrayEncoder for our plain-text format. - arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { - c.EncodeTime(ent.Time, arr) - } - if c.LevelKey != "" && c.EncodeLevel != nil { - c.EncodeLevel(ent.Level, arr) - } - if ent.LoggerName != "" && c.NameKey != "" { - nameEncoder := c.EncodeName - - if nameEncoder == nil { - // Fall back to FullNameEncoder for backward compatibility. - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, arr) - } - if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) - } - for i := range arr.elems { - if i > 0 { - line.AppendByte('\t') - } - fmt.Fprint(line, arr.elems[i]) - } - putSliceEncoder(arr) - - // Add the message itself. - if c.MessageKey != "" { - c.addTabIfNecessary(line) - line.AppendString(ent.Message) - } - - // Add any structured context. - c.writeContext(line, fields) - - // If there's no stacktrace key, honor that; this allows users to force - // single-line output. - if ent.Stack != "" && c.StacktraceKey != "" { - line.AppendByte('\n') - line.AppendString(ent.Stack) - } - - if c.LineEnding != "" { - line.AppendString(c.LineEnding) - } else { - line.AppendString(DefaultLineEnding) - } - return line, nil -} - -func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { - context := c.jsonEncoder.Clone().(*jsonEncoder) - defer context.buf.Free() - - addFields(context, extra) - context.closeOpenNamespaces() - if context.buf.Len() == 0 { - return - } - - c.addTabIfNecessary(line) - line.AppendByte('{') - line.Write(context.buf.Bytes()) - line.AppendByte('}') -} - -func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { - if line.Len() > 0 { - line.AppendByte('\t') - } -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/core.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/core.go deleted file mode 100644 index a1ef8b03..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/core.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// Core is a minimal, fast logger interface. It's designed for library authors -// to wrap in a more user-friendly API. -type Core interface { - LevelEnabler - - // With adds structured context to the Core. - With([]Field) Core - // Check determines whether the supplied Entry should be logged (using the - // embedded LevelEnabler and possibly some extra logic). If the entry - // should be logged, the Core adds itself to the CheckedEntry and returns - // the result. - // - // Callers must use Check before calling Write. - Check(Entry, *CheckedEntry) *CheckedEntry - // Write serializes the Entry and any Fields supplied at the log site and - // writes them to their destination. - // - // If called, Write should always log the Entry and Fields; it should not - // replicate the logic of Check. - Write(Entry, []Field) error - // Sync flushes buffered logs (if any). - Sync() error -} - -type nopCore struct{} - -// NewNopCore returns a no-op Core. -func NewNopCore() Core { return nopCore{} } -func (nopCore) Enabled(Level) bool { return false } -func (n nopCore) With([]Field) Core { return n } -func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } -func (nopCore) Write(Entry, []Field) error { return nil } -func (nopCore) Sync() error { return nil } - -// NewCore creates a Core that writes logs to a WriteSyncer. -func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { - return &ioCore{ - LevelEnabler: enab, - enc: enc, - out: ws, - } -} - -type ioCore struct { - LevelEnabler - enc Encoder - out WriteSyncer -} - -func (c *ioCore) With(fields []Field) Core { - clone := c.clone() - addFields(clone.enc, fields) - return clone -} - -func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if c.Enabled(ent.Level) { - return ce.AddCore(ent, c) - } - return ce -} - -func (c *ioCore) Write(ent Entry, fields []Field) error { - buf, err := c.enc.EncodeEntry(ent, fields) - if err != nil { - return err - } - _, err = c.out.Write(buf.Bytes()) - buf.Free() - if err != nil { - return err - } - if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() - } - return nil -} - -func (c *ioCore) Sync() error { - return c.out.Sync() -} - -func (c *ioCore) clone() *ioCore { - return &ioCore{ - LevelEnabler: c.LevelEnabler, - enc: c.enc.Clone(), - out: c.out, - } -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/doc.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/doc.go deleted file mode 100644 index 31000e91..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zapcore defines and implements the low-level interfaces upon which -// zap is built. By providing alternate implementations of these interfaces, -// external packages can extend zap's capabilities. -package zapcore // import "go.uber.org/zap/zapcore" diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/encoder.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/encoder.go deleted file mode 100644 index f0509522..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/encoder.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" - - "go.uber.org/zap/buffer" -) - -// DefaultLineEnding defines the default line ending when writing logs. -// Alternate line endings specified in EncoderConfig can override this -// behavior. -const DefaultLineEnding = "\n" - -// A LevelEncoder serializes a Level to a primitive type. -type LevelEncoder func(Level, PrimitiveArrayEncoder) - -// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, -// InfoLevel is serialized to "info". -func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.String()) -} - -// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. -// For example, InfoLevel is serialized to "info" and colored blue. -func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToLowercaseColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.String()) - } - enc.AppendString(s) -} - -// CapitalLevelEncoder serializes a Level to an all-caps string. For example, -// InfoLevel is serialized to "INFO". -func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.CapitalString()) -} - -// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. -// For example, InfoLevel is serialized to "INFO" and colored blue. -func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToCapitalColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.CapitalString()) - } - enc.AppendString(s) -} - -// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to -// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, -// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else -// is unmarshaled to LowercaseLevelEncoder. -func (e *LevelEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "capital": - *e = CapitalLevelEncoder - case "capitalColor": - *e = CapitalColorLevelEncoder - case "color": - *e = LowercaseColorLevelEncoder - default: - *e = LowercaseLevelEncoder - } - return nil -} - -// A TimeEncoder serializes a time.Time to a primitive type. -type TimeEncoder func(time.Time, PrimitiveArrayEncoder) - -// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds -// since the Unix epoch. -func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - sec := float64(nanos) / float64(time.Second) - enc.AppendFloat64(sec) -} - -// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of -// milliseconds since the Unix epoch. -func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - millis := float64(nanos) / float64(time.Millisecond) - enc.AppendFloat64(millis) -} - -// EpochNanosTimeEncoder serializes a time.Time to an integer number of -// nanoseconds since the Unix epoch. -func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendInt64(t.UnixNano()) -} - -// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string -// with millisecond precision. -func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) -} - -// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are -// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to -// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. -func (e *TimeEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "iso8601", "ISO8601": - *e = ISO8601TimeEncoder - case "millis": - *e = EpochMillisTimeEncoder - case "nanos": - *e = EpochNanosTimeEncoder - default: - *e = EpochTimeEncoder - } - return nil -} - -// A DurationEncoder serializes a time.Duration to a primitive type. -type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) - -// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. -func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendFloat64(float64(d) / float64(time.Second)) -} - -// NanosDurationEncoder serializes a time.Duration to an integer number of -// nanoseconds elapsed. -func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendInt64(int64(d)) -} - -// StringDurationEncoder serializes a time.Duration using its built-in String -// method. -func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendString(d.String()) -} - -// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled -// to StringDurationEncoder, and anything else is unmarshaled to -// NanosDurationEncoder. -func (e *DurationEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "string": - *e = StringDurationEncoder - case "nanos": - *e = NanosDurationEncoder - default: - *e = SecondsDurationEncoder - } - return nil -} - -// A CallerEncoder serializes an EntryCaller to a primitive type. -type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) - -// FullCallerEncoder serializes a caller in /full/path/to/package/file:line -// format. -func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.String()) -} - -// ShortCallerEncoder serializes a caller in package/file:line format, trimming -// all but the final directory from the full path. -func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.TrimmedPath()) -} - -// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to -// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. -func (e *CallerEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullCallerEncoder - default: - *e = ShortCallerEncoder - } - return nil -} - -// A NameEncoder serializes a period-separated logger name to a primitive -// type. -type NameEncoder func(string, PrimitiveArrayEncoder) - -// FullNameEncoder serializes the logger name as-is. -func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { - enc.AppendString(loggerName) -} - -// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is -// unmarshaled to FullNameEncoder. -func (e *NameEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullNameEncoder - default: - *e = FullNameEncoder - } - return nil -} - -// An EncoderConfig allows users to configure the concrete encoders supplied by -// zapcore. -type EncoderConfig struct { - // Set the keys used for each log entry. If any key is empty, that portion - // of the entry is omitted. - MessageKey string `json:"messageKey" yaml:"messageKey"` - LevelKey string `json:"levelKey" yaml:"levelKey"` - TimeKey string `json:"timeKey" yaml:"timeKey"` - NameKey string `json:"nameKey" yaml:"nameKey"` - CallerKey string `json:"callerKey" yaml:"callerKey"` - StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` - LineEnding string `json:"lineEnding" yaml:"lineEnding"` - // Configure the primitive representations of common complex types. For - // example, some users may want all time.Times serialized as floating-point - // seconds since epoch, while others may prefer ISO8601 strings. - EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` - EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` - EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` - EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` - // Unlike the other primitive type encoders, EncodeName is optional. The - // zero value falls back to FullNameEncoder. - EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` -} - -// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a -// map- or struct-like object to the logging context. Like maps, ObjectEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ObjectEncoder interface { - // Logging-specific marshalers. - AddArray(key string, marshaler ArrayMarshaler) error - AddObject(key string, marshaler ObjectMarshaler) error - - // Built-in types. - AddBinary(key string, value []byte) // for arbitrary bytes - AddByteString(key string, value []byte) // for UTF-8 encoded bytes - AddBool(key string, value bool) - AddComplex128(key string, value complex128) - AddComplex64(key string, value complex64) - AddDuration(key string, value time.Duration) - AddFloat64(key string, value float64) - AddFloat32(key string, value float32) - AddInt(key string, value int) - AddInt64(key string, value int64) - AddInt32(key string, value int32) - AddInt16(key string, value int16) - AddInt8(key string, value int8) - AddString(key, value string) - AddTime(key string, value time.Time) - AddUint(key string, value uint) - AddUint64(key string, value uint64) - AddUint32(key string, value uint32) - AddUint16(key string, value uint16) - AddUint8(key string, value uint8) - AddUintptr(key string, value uintptr) - - // AddReflected uses reflection to serialize arbitrary objects, so it's slow - // and allocation-heavy. - AddReflected(key string, value interface{}) error - // OpenNamespace opens an isolated namespace where all subsequent fields will - // be added. Applications can use namespaces to prevent key collisions when - // injecting loggers into sub-components or third-party libraries. - OpenNamespace(key string) -} - -// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding -// array-like objects to the logging context. Of note, it supports mixed-type -// arrays even though they aren't typical in Go. Like slices, ArrayEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ArrayEncoder interface { - // Built-in types. - PrimitiveArrayEncoder - - // Time-related types. - AppendDuration(time.Duration) - AppendTime(time.Time) - - // Logging-specific marshalers. - AppendArray(ArrayMarshaler) error - AppendObject(ObjectMarshaler) error - - // AppendReflected uses reflection to serialize arbitrary objects, so it's - // slow and allocation-heavy. - AppendReflected(value interface{}) error -} - -// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals -// only in Go's built-in types. It's included only so that Duration- and -// TimeEncoders cannot trigger infinite recursion. -type PrimitiveArrayEncoder interface { - // Built-in types. - AppendBool(bool) - AppendByteString([]byte) // for UTF-8 encoded bytes - AppendComplex128(complex128) - AppendComplex64(complex64) - AppendFloat64(float64) - AppendFloat32(float32) - AppendInt(int) - AppendInt64(int64) - AppendInt32(int32) - AppendInt16(int16) - AppendInt8(int8) - AppendString(string) - AppendUint(uint) - AppendUint64(uint64) - AppendUint32(uint32) - AppendUint16(uint16) - AppendUint8(uint8) - AppendUintptr(uintptr) -} - -// Encoder is a format-agnostic interface for all log entry marshalers. Since -// log encoders don't need to support the same wide range of use cases as -// general-purpose marshalers, it's possible to make them faster and -// lower-allocation. -// -// Implementations of the ObjectEncoder interface's methods can, of course, -// freely modify the receiver. However, the Clone and EncodeEntry methods will -// be called concurrently and shouldn't modify the receiver. -type Encoder interface { - ObjectEncoder - - // Clone copies the encoder, ensuring that adding fields to the copy doesn't - // affect the original. - Clone() Encoder - - // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. - EncodeEntry(Entry, []Field) (*buffer.Buffer, error) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/entry.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/entry.go deleted file mode 100644 index 7d9893f3..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/entry.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "strings" - "sync" - "time" - - "go.uber.org/zap/internal/bufferpool" - "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" -) - -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) - -func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) - ce.reset() - return ce -} - -func putCheckedEntry(ce *CheckedEntry) { - if ce == nil { - return - } - _cePool.Put(ce) -} - -// NewEntryCaller makes an EntryCaller from the return signature of -// runtime.Caller. -func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { - if !ok { - return EntryCaller{} - } - return EntryCaller{ - PC: pc, - File: file, - Line: line, - Defined: true, - } -} - -// EntryCaller represents the caller of a logging function. -type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int -} - -// String returns the full path and line number of the caller. -func (ec EntryCaller) String() string { - return ec.FullPath() -} - -// FullPath returns a /full/path/to/package/file:line description of the -// caller. -func (ec EntryCaller) FullPath() string { - if !ec.Defined { - return "undefined" - } - buf := bufferpool.Get() - buf.AppendString(ec.File) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// TrimmedPath returns a package/file:line description of the caller, -// preserving only the leaf directory name and file name. -func (ec EntryCaller) TrimmedPath() string { - if !ec.Defined { - return "undefined" - } - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - // Find the last separator. - // - idx := strings.LastIndexByte(ec.File, '/') - if idx == -1 { - return ec.FullPath() - } - // Find the penultimate separator. - idx = strings.LastIndexByte(ec.File[:idx], '/') - if idx == -1 { - return ec.FullPath() - } - buf := bufferpool.Get() - // Keep everything after the penultimate separator. - buf.AppendString(ec.File[idx+1:]) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// An Entry represents a complete log message. The entry's structured context -// is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. -// -// Entries are pooled, so any functions that accept them MUST be careful not to -// retain references to them. -type Entry struct { - Level Level - Time time.Time - LoggerName string - Message string - Caller EntryCaller - Stack string -} - -// CheckWriteAction indicates what action to take after a log entry is -// processed. Actions are ordered in increasing severity. -type CheckWriteAction uint8 - -const ( - // WriteThenNoop indicates that nothing special needs to be done. It's the - // default behavior. - WriteThenNoop CheckWriteAction = iota - // WriteThenPanic causes a panic after Write. - WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. - WriteThenFatal -) - -// CheckedEntry is an Entry together with a collection of Cores that have -// already agreed to log it. -// -// CheckedEntry references should be created by calling AddCore or Should on a -// nil *CheckedEntry. References are returned to a pool after Write, and MUST -// NOT be retained after calling their Write method. -type CheckedEntry struct { - Entry - ErrorOutput WriteSyncer - dirty bool // best-effort detection of pool misuse - should CheckWriteAction - cores []Core -} - -func (ce *CheckedEntry) reset() { - ce.Entry = Entry{} - ce.ErrorOutput = nil - ce.dirty = false - ce.should = WriteThenNoop - for i := range ce.cores { - // don't keep references to cores - ce.cores[i] = nil - } - ce.cores = ce.cores[:0] -} - -// Write writes the entry to the stored Cores, returns any errors, and returns -// the CheckedEntry reference to a pool for immediate re-use. Finally, it -// executes any required CheckWriteAction. -func (ce *CheckedEntry) Write(fields ...Field) { - if ce == nil { - return - } - - if ce.dirty { - if ce.ErrorOutput != nil { - // Make a best effort to detect unsafe re-use of this CheckedEntry. - // If the entry is dirty, log an internal error; because the - // CheckedEntry is being used after it was returned to the pool, - // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) - ce.ErrorOutput.Sync() - } - return - } - ce.dirty = true - - var err error - for i := range ce.cores { - err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) - } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) - ce.ErrorOutput.Sync() - } - } - - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() - } -} - -// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be -// used by Core.Check implementations, and is safe to call on nil CheckedEntry -// references. -func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.cores = append(ce.cores, core) - return ce -} - -// Should sets this CheckedEntry's CheckWriteAction, which controls whether a -// Core will panic or fatal after writing this log entry. Like AddCore, it's -// safe to call on nil CheckedEntry references. -func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.should = should - return ce -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/error.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/error.go deleted file mode 100644 index a67c7bac..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/error.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "sync" -) - -// Encodes the given error into fields of an object. A field with the given -// name is added for the error message. -// -// If the error implements fmt.Formatter, a field with the name ${key}Verbose -// is also added with the full verbose error message. -// -// Finally, if the error implements errorGroup (from go.uber.org/multierr) or -// causer (from github.com/pkg/errors), a ${key}Causes field is added with an -// array of objects containing the errors this error was comprised of. -// -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } -func encodeError(key string, err error, enc ObjectEncoder) error { - basic := err.Error() - enc.AddString(key, basic) - - switch e := err.(type) { - case errorGroup: - return enc.AddArray(key+"Causes", errArray(e.Errors())) - case fmt.Formatter: - verbose := fmt.Sprintf("%+v", e) - if verbose != basic { - // This is a rich error type, like those produced by - // github.com/pkg/errors. - enc.AddString(key+"Verbose", verbose) - } - } - return nil -} - -type errorGroup interface { - // Provides read-only access to the underlying list of errors, preferably - // without causing any allocs. - Errors() []error -} - -type causer interface { - // Provides access to the error that caused this error. - Cause() error -} - -// Note that errArry and errArrayElem are very similar to the version -// implemented in the top-level error.go file. We can't re-use this because -// that would require exporting errArray as part of the zapcore API. - -// Encodes a list of errors using the standard error encoding logic. -type errArray []error - -func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - - el := newErrArrayElem(errs[i]) - arr.AppendObject(el) - el.Free() - } - return nil -} - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Encodes any error into a {"error": ...} re-using the same errors logic. -// -// May be passed in place of an array to build a single-element array. -type errArrayElem struct{ err error } - -func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) - e.err = err - return e -} - -func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { - return arr.AppendObject(e) -} - -func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { - return encodeError("error", e.err, enc) -} - -func (e *errArrayElem) Free() { - e.err = nil - _errArrayElemPool.Put(e) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/field.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/field.go deleted file mode 100644 index 6a5e33e2..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/field.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "fmt" - "math" - "reflect" - "time" -) - -// A FieldType indicates which member of the Field union struct should be used -// and how it should be serialized. -type FieldType uint8 - -const ( - // UnknownType is the default field type. Attempting to add it to an encoder will panic. - UnknownType FieldType = iota - // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. - ArrayMarshalerType - // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. - ObjectMarshalerType - // BinaryType indicates that the field carries an opaque binary blob. - BinaryType - // BoolType indicates that the field carries a bool. - BoolType - // ByteStringType indicates that the field carries UTF-8 encoded bytes. - ByteStringType - // Complex128Type indicates that the field carries a complex128. - Complex128Type - // Complex64Type indicates that the field carries a complex128. - Complex64Type - // DurationType indicates that the field carries a time.Duration. - DurationType - // Float64Type indicates that the field carries a float64. - Float64Type - // Float32Type indicates that the field carries a float32. - Float32Type - // Int64Type indicates that the field carries an int64. - Int64Type - // Int32Type indicates that the field carries an int32. - Int32Type - // Int16Type indicates that the field carries an int16. - Int16Type - // Int8Type indicates that the field carries an int8. - Int8Type - // StringType indicates that the field carries a string. - StringType - // TimeType indicates that the field carries a time.Time. - TimeType - // Uint64Type indicates that the field carries a uint64. - Uint64Type - // Uint32Type indicates that the field carries a uint32. - Uint32Type - // Uint16Type indicates that the field carries a uint16. - Uint16Type - // Uint8Type indicates that the field carries a uint8. - Uint8Type - // UintptrType indicates that the field carries a uintptr. - UintptrType - // ReflectType indicates that the field carries an interface{}, which should - // be serialized using reflection. - ReflectType - // NamespaceType signals the beginning of an isolated namespace. All - // subsequent fields should be added to the new namespace. - NamespaceType - // StringerType indicates that the field carries a fmt.Stringer. - StringerType - // ErrorType indicates that the field carries an error. - ErrorType - // SkipType indicates that the field is a no-op. - SkipType -) - -// A Field is a marshaling operation used to add a key-value pair to a logger's -// context. Most fields are lazily marshaled, so it's inexpensive to add fields -// to disabled debug-level log statements. -type Field struct { - Key string - Type FieldType - Integer int64 - String string - Interface interface{} -} - -// AddTo exports a field through the ObjectEncoder interface. It's primarily -// useful to library authors, and shouldn't be necessary in most applications. -func (f Field) AddTo(enc ObjectEncoder) { - var err error - - switch f.Type { - case ArrayMarshalerType: - err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) - case ObjectMarshalerType: - err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) - case BinaryType: - enc.AddBinary(f.Key, f.Interface.([]byte)) - case BoolType: - enc.AddBool(f.Key, f.Integer == 1) - case ByteStringType: - enc.AddByteString(f.Key, f.Interface.([]byte)) - case Complex128Type: - enc.AddComplex128(f.Key, f.Interface.(complex128)) - case Complex64Type: - enc.AddComplex64(f.Key, f.Interface.(complex64)) - case DurationType: - enc.AddDuration(f.Key, time.Duration(f.Integer)) - case Float64Type: - enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) - case Float32Type: - enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) - case Int64Type: - enc.AddInt64(f.Key, f.Integer) - case Int32Type: - enc.AddInt32(f.Key, int32(f.Integer)) - case Int16Type: - enc.AddInt16(f.Key, int16(f.Integer)) - case Int8Type: - enc.AddInt8(f.Key, int8(f.Integer)) - case StringType: - enc.AddString(f.Key, f.String) - case TimeType: - if f.Interface != nil { - enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) - } else { - // Fall back to UTC if location is nil. - enc.AddTime(f.Key, time.Unix(0, f.Integer)) - } - case Uint64Type: - enc.AddUint64(f.Key, uint64(f.Integer)) - case Uint32Type: - enc.AddUint32(f.Key, uint32(f.Integer)) - case Uint16Type: - enc.AddUint16(f.Key, uint16(f.Integer)) - case Uint8Type: - enc.AddUint8(f.Key, uint8(f.Integer)) - case UintptrType: - enc.AddUintptr(f.Key, uintptr(f.Integer)) - case ReflectType: - err = enc.AddReflected(f.Key, f.Interface) - case NamespaceType: - enc.OpenNamespace(f.Key) - case StringerType: - enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) - case ErrorType: - encodeError(f.Key, f.Interface.(error), enc) - case SkipType: - break - default: - panic(fmt.Sprintf("unknown field type: %v", f)) - } - - if err != nil { - enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) - } -} - -// Equals returns whether two fields are equal. For non-primitive types such as -// errors, marshalers, or reflect types, it uses reflect.DeepEqual. -func (f Field) Equals(other Field) bool { - if f.Type != other.Type { - return false - } - if f.Key != other.Key { - return false - } - - switch f.Type { - case BinaryType, ByteStringType: - return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) - case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: - return reflect.DeepEqual(f.Interface, other.Interface) - default: - return f == other - } -} - -func addFields(enc ObjectEncoder, fields []Field) { - for i := range fields { - fields[i].AddTo(enc) - } -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/hook.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/hook.go deleted file mode 100644 index 5db4afb3..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/hook.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type hooked struct { - Core - funcs []func(Entry) error -} - -// RegisterHooks wraps a Core and runs a collection of user-defined callback -// hooks each time a message is logged. Execution of the callbacks is blocking. -// -// This offers users an easy way to register simple callbacks (e.g., metrics -// collection) without implementing the full Core interface. -func RegisterHooks(core Core, hooks ...func(Entry) error) Core { - funcs := append([]func(Entry) error{}, hooks...) - return &hooked{ - Core: core, - funcs: funcs, - } -} - -func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - // Let the wrapped Core decide whether to log this message or not. This - // also gives the downstream a chance to register itself directly with the - // CheckedEntry. - if downstream := h.Core.Check(ent, ce); downstream != nil { - return downstream.AddCore(ent, h) - } - return ce -} - -func (h *hooked) With(fields []Field) Core { - return &hooked{ - Core: h.Core.With(fields), - funcs: h.funcs, - } -} - -func (h *hooked) Write(ent Entry, _ []Field) error { - // Since our downstream had a chance to register itself directly with the - // CheckedMessage, we don't need to call it here. - var err error - for i := range h.funcs { - err = multierr.Append(err, h.funcs[i](ent)) - } - return err -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/json_encoder.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/json_encoder.go deleted file mode 100644 index 2dc67d81..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "encoding/base64" - "encoding/json" - "math" - "sync" - "time" - "unicode/utf8" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -// For JSON-escaping; see jsonEncoder.safeAddString below. -const _hex = "0123456789abcdef" - -var _jsonPool = sync.Pool{New: func() interface{} { - return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} - -func putJSONEncoder(enc *jsonEncoder) { - if enc.reflectBuf != nil { - enc.reflectBuf.Free() - } - enc.EncoderConfig = nil - enc.buf = nil - enc.spaced = false - enc.openNamespaces = 0 - enc.reflectBuf = nil - enc.reflectEnc = nil - _jsonPool.Put(enc) -} - -type jsonEncoder struct { - *EncoderConfig - buf *buffer.Buffer - spaced bool // include spaces after colons and commas - openNamespaces int - - // for encoding generic values by reflection - reflectBuf *buffer.Buffer - reflectEnc *json.Encoder -} - -// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder -// appropriately escapes all field keys and values. -// -// Note that the encoder doesn't deduplicate keys, so it's possible to produce -// a message like -// {"foo":"bar","foo":"baz"} -// This is permitted by the JSON specification, but not encouraged. Many -// libraries will ignore duplicate key-value pairs (typically keeping the last -// pair) when unmarshaling, but users should attempt to avoid adding duplicate -// keys. -func NewJSONEncoder(cfg EncoderConfig) Encoder { - return newJSONEncoder(cfg, false) -} - -func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { - return &jsonEncoder{ - EncoderConfig: &cfg, - buf: bufferpool.Get(), - spaced: spaced, - } -} - -func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { - enc.addKey(key) - return enc.AppendArray(arr) -} - -func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { - enc.addKey(key) - return enc.AppendObject(obj) -} - -func (enc *jsonEncoder) AddBinary(key string, val []byte) { - enc.AddString(key, base64.StdEncoding.EncodeToString(val)) -} - -func (enc *jsonEncoder) AddByteString(key string, val []byte) { - enc.addKey(key) - enc.AppendByteString(val) -} - -func (enc *jsonEncoder) AddBool(key string, val bool) { - enc.addKey(key) - enc.AppendBool(val) -} - -func (enc *jsonEncoder) AddComplex128(key string, val complex128) { - enc.addKey(key) - enc.AppendComplex128(val) -} - -func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { - enc.addKey(key) - enc.AppendDuration(val) -} - -func (enc *jsonEncoder) AddFloat64(key string, val float64) { - enc.addKey(key) - enc.AppendFloat64(val) -} - -func (enc *jsonEncoder) AddInt64(key string, val int64) { - enc.addKey(key) - enc.AppendInt64(val) -} - -func (enc *jsonEncoder) resetReflectBuf() { - if enc.reflectBuf == nil { - enc.reflectBuf = bufferpool.Get() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - } else { - enc.reflectBuf.Reset() - } -} - -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(obj) - if err != nil { - return err - } - enc.reflectBuf.TrimNewline() - enc.addKey(key) - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) - return err -} - -func (enc *jsonEncoder) OpenNamespace(key string) { - enc.addKey(key) - enc.buf.AppendByte('{') - enc.openNamespaces++ -} - -func (enc *jsonEncoder) AddString(key, val string) { - enc.addKey(key) - enc.AppendString(val) -} - -func (enc *jsonEncoder) AddTime(key string, val time.Time) { - enc.addKey(key) - enc.AppendTime(val) -} - -func (enc *jsonEncoder) AddUint64(key string, val uint64) { - enc.addKey(key) - enc.AppendUint64(val) -} - -func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('[') - err := arr.MarshalLogArray(enc) - enc.buf.AppendByte(']') - return err -} - -func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('{') - err := obj.MarshalLogObject(enc) - enc.buf.AppendByte('}') - return err -} - -func (enc *jsonEncoder) AppendBool(val bool) { - enc.addElementSeparator() - enc.buf.AppendBool(val) -} - -func (enc *jsonEncoder) AppendByteString(val []byte) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddByteString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendComplex128(val complex128) { - enc.addElementSeparator() - // Cast to a platform-independent, fixed-size type. - r, i := float64(real(val)), float64(imag(val)) - enc.buf.AppendByte('"') - // Because we're always in a quoted string, we can use strconv without - // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') - enc.buf.AppendFloat(i, 64) - enc.buf.AppendByte('i') - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendDuration(val time.Duration) { - cur := enc.buf.Len() - enc.EncodeDuration(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep - // JSON valid. - enc.AppendInt64(int64(val)) - } -} - -func (enc *jsonEncoder) AppendInt64(val int64) { - enc.addElementSeparator() - enc.buf.AppendInt(val) -} - -func (enc *jsonEncoder) AppendReflected(val interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(val) - if err != nil { - return err - } - enc.reflectBuf.TrimNewline() - enc.addElementSeparator() - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) - return err -} - -func (enc *jsonEncoder) AppendString(val string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTime(val time.Time) { - cur := enc.buf.Len() - enc.EncodeTime(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep - // output JSON valid. - enc.AppendInt64(val.UnixNano()) - } -} - -func (enc *jsonEncoder) AppendUint64(val uint64) { - enc.addElementSeparator() - enc.buf.AppendUint(val) -} - -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } - -func (enc *jsonEncoder) Clone() Encoder { - clone := enc.clone() - clone.buf.Write(enc.buf.Bytes()) - return clone -} - -func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() - clone.EncoderConfig = enc.EncoderConfig - clone.spaced = enc.spaced - clone.openNamespaces = enc.openNamespaces - clone.buf = bufferpool.Get() - return clone -} - -func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - final := enc.clone() - final.buf.AppendByte('{') - - if final.LevelKey != "" { - final.addKey(final.LevelKey) - cur := final.buf.Len() - final.EncodeLevel(ent.Level, final) - if cur == final.buf.Len() { - // User-supplied EncodeLevel was a no-op. Fall back to strings to keep - // output JSON valid. - final.AppendString(ent.Level.String()) - } - } - if final.TimeKey != "" { - final.AddTime(final.TimeKey, ent.Time) - } - if ent.LoggerName != "" && final.NameKey != "" { - final.addKey(final.NameKey) - cur := final.buf.Len() - nameEncoder := final.EncodeName - - // if no name encoder provided, fall back to FullNameEncoder for backwards - // compatibility - if nameEncoder == nil { - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, final) - if cur == final.buf.Len() { - // User-supplied EncodeName was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.LoggerName) - } - } - if ent.Caller.Defined && final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) - } - } - if final.MessageKey != "" { - final.addKey(enc.MessageKey) - final.AppendString(ent.Message) - } - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - addFields(final, fields) - final.closeOpenNamespaces() - if ent.Stack != "" && final.StacktraceKey != "" { - final.AddString(final.StacktraceKey, ent.Stack) - } - final.buf.AppendByte('}') - if final.LineEnding != "" { - final.buf.AppendString(final.LineEnding) - } else { - final.buf.AppendString(DefaultLineEnding) - } - - ret := final.buf - putJSONEncoder(final) - return ret, nil -} - -func (enc *jsonEncoder) truncate() { - enc.buf.Reset() -} - -func (enc *jsonEncoder) closeOpenNamespaces() { - for i := 0; i < enc.openNamespaces; i++ { - enc.buf.AppendByte('}') - } -} - -func (enc *jsonEncoder) addKey(key string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(key) - enc.buf.AppendByte('"') - enc.buf.AppendByte(':') - if enc.spaced { - enc.buf.AppendByte(' ') - } -} - -func (enc *jsonEncoder) addElementSeparator() { - last := enc.buf.Len() - 1 - if last < 0 { - return - } - switch enc.buf.Bytes()[last] { - case '{', '[', ':', ',', ' ': - return - default: - enc.buf.AppendByte(',') - if enc.spaced { - enc.buf.AppendByte(' ') - } - } -} - -func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { - enc.addElementSeparator() - switch { - case math.IsNaN(val): - enc.buf.AppendString(`"NaN"`) - case math.IsInf(val, 1): - enc.buf.AppendString(`"+Inf"`) - case math.IsInf(val, -1): - enc.buf.AppendString(`"-Inf"`) - default: - enc.buf.AppendFloat(val, bitSize) - } -} - -// safeAddString JSON-escapes a string and appends it to the internal buffer. -// Unlike the standard library's encoder, it doesn't attempt to protect the -// user from browser vulnerabilities or JSONP-related problems. -func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } -} - -// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. -func (enc *jsonEncoder) safeAddByteString(s []byte) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/level.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/level.go deleted file mode 100644 index e575c9f4..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/level.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "errors" - "fmt" -) - -var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") - -// A Level is a logging priority. Higher levels are more important. -type Level int8 - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel Level = iota - 1 - // InfoLevel is the default logging priority. - InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel - - _minLevel = DebugLevel - _maxLevel = FatalLevel -) - -// String returns a lower-case ASCII representation of the log level. -func (l Level) String() string { - switch l { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warn" - case ErrorLevel: - return "error" - case DPanicLevel: - return "dpanic" - case PanicLevel: - return "panic" - case FatalLevel: - return "fatal" - default: - return fmt.Sprintf("Level(%d)", l) - } -} - -// CapitalString returns an all-caps ASCII representation of the log level. -func (l Level) CapitalString() string { - // Printing levels in all-caps is common enough that we should export this - // functionality. - switch l { - case DebugLevel: - return "DEBUG" - case InfoLevel: - return "INFO" - case WarnLevel: - return "WARN" - case ErrorLevel: - return "ERROR" - case DPanicLevel: - return "DPANIC" - case PanicLevel: - return "PANIC" - case FatalLevel: - return "FATAL" - default: - return fmt.Sprintf("LEVEL(%d)", l) - } -} - -// MarshalText marshals the Level to text. Note that the text representation -// drops the -Level suffix (see example). -func (l Level) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText -// expects the text representation of a Level to drop the -Level suffix (see -// example). -// -// In particular, this makes it easy to configure logging levels using YAML, -// TOML, or JSON files. -func (l *Level) UnmarshalText(text []byte) error { - if l == nil { - return errUnmarshalNilLevel - } - if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { - return fmt.Errorf("unrecognized level: %q", text) - } - return nil -} - -func (l *Level) unmarshalText(text []byte) bool { - switch string(text) { - case "debug", "DEBUG": - *l = DebugLevel - case "info", "INFO", "": // make the zero value useful - *l = InfoLevel - case "warn", "WARN": - *l = WarnLevel - case "error", "ERROR": - *l = ErrorLevel - case "dpanic", "DPANIC": - *l = DPanicLevel - case "panic", "PANIC": - *l = PanicLevel - case "fatal", "FATAL": - *l = FatalLevel - default: - return false - } - return true -} - -// Set sets the level for the flag.Value interface. -func (l *Level) Set(s string) error { - return l.UnmarshalText([]byte(s)) -} - -// Get gets the level for the flag.Getter interface. -func (l *Level) Get() interface{} { - return *l -} - -// Enabled returns true if the given level is at or above this level. -func (l Level) Enabled(lvl Level) bool { - return lvl >= l -} - -// LevelEnabler decides whether a given logging level is enabled when logging a -// message. -// -// Enablers are intended to be used to implement deterministic filters; -// concerns like sampling are better implemented as a Core. -// -// Each concrete Level value implements a static LevelEnabler which returns -// true for itself and all higher logging levels. For example WarnLevel.Enabled() -// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and -// FatalLevel, but return false for InfoLevel and DebugLevel. -type LevelEnabler interface { - Enabled(Level) bool -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/level_strings.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/level_strings.go deleted file mode 100644 index 7af8dadc..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/level_strings.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/zap/internal/color" - -var ( - _levelToColor = map[Level]color.Color{ - DebugLevel: color.Magenta, - InfoLevel: color.Blue, - WarnLevel: color.Yellow, - ErrorLevel: color.Red, - DPanicLevel: color.Red, - PanicLevel: color.Red, - FatalLevel: color.Red, - } - _unknownLevelColor = color.Red - - _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) - _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) -) - -func init() { - for level, color := range _levelToColor { - _levelToLowercaseColorString[level] = color.Add(level.String()) - _levelToCapitalColorString[level] = color.Add(level.CapitalString()) - } -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/marshaler.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/marshaler.go deleted file mode 100644 index 2627a653..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/marshaler.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// ObjectMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -type ObjectMarshaler interface { - MarshalLogObject(ObjectEncoder) error -} - -// ObjectMarshalerFunc is a type adapter that turns a function into an -// ObjectMarshaler. -type ObjectMarshalerFunc func(ObjectEncoder) error - -// MarshalLogObject calls the underlying function. -func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { - return f(enc) -} - -// ArrayMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -type ArrayMarshaler interface { - MarshalLogArray(ArrayEncoder) error -} - -// ArrayMarshalerFunc is a type adapter that turns a function into an -// ArrayMarshaler. -type ArrayMarshalerFunc func(ArrayEncoder) error - -// MarshalLogArray calls the underlying function. -func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { - return f(enc) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/memory_encoder.go deleted file mode 100644 index 6ef85b09..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "time" - -// MapObjectEncoder is an ObjectEncoder backed by a simple -// map[string]interface{}. It's not fast enough for production use, but it's -// helpful in tests. -type MapObjectEncoder struct { - // Fields contains the entire encoded log context. - Fields map[string]interface{} - // cur is a pointer to the namespace we're currently writing to. - cur map[string]interface{} -} - -// NewMapObjectEncoder creates a new map-backed ObjectEncoder. -func NewMapObjectEncoder() *MapObjectEncoder { - m := make(map[string]interface{}) - return &MapObjectEncoder{ - Fields: m, - cur: m, - } -} - -// AddArray implements ObjectEncoder. -func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { - arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} - err := v.MarshalLogArray(arr) - m.cur[key] = arr.elems - return err -} - -// AddObject implements ObjectEncoder. -func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { - newMap := NewMapObjectEncoder() - m.cur[k] = newMap.Fields - return v.MarshalLogObject(newMap) -} - -// AddBinary implements ObjectEncoder. -func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } - -// AddByteString implements ObjectEncoder. -func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } - -// AddBool implements ObjectEncoder. -func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } - -// AddDuration implements ObjectEncoder. -func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } - -// AddComplex128 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } - -// AddComplex64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } - -// AddFloat64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } - -// AddFloat32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } - -// AddInt implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } - -// AddInt64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } - -// AddInt32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } - -// AddInt16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } - -// AddInt8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } - -// AddString implements ObjectEncoder. -func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } - -// AddTime implements ObjectEncoder. -func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } - -// AddUint implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } - -// AddUint64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } - -// AddUint32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } - -// AddUint16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } - -// AddUint8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } - -// AddUintptr implements ObjectEncoder. -func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } - -// AddReflected implements ObjectEncoder. -func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { - m.cur[k] = v - return nil -} - -// OpenNamespace implements ObjectEncoder. -func (m *MapObjectEncoder) OpenNamespace(k string) { - ns := make(map[string]interface{}) - m.cur[k] = ns - m.cur = ns -} - -// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like -// the MapObjectEncoder, it's not designed for production use. -type sliceArrayEncoder struct { - elems []interface{} -} - -func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { - enc := &sliceArrayEncoder{} - err := v.MarshalLogArray(enc) - s.elems = append(s.elems, enc.elems) - return err -} - -func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { - m := NewMapObjectEncoder() - err := v.MarshalLogObject(m) - s.elems = append(s.elems, m.Fields) - return err -} - -func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { - s.elems = append(s.elems, v) - return nil -} - -func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/sampler.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/sampler.go deleted file mode 100644 index e3164186..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/sampler.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" - - "go.uber.org/atomic" -) - -const ( - _numLevels = _maxLevel - _minLevel + 1 - _countersPerLevel = 4096 -) - -type counter struct { - resetAt atomic.Int64 - counter atomic.Uint64 -} - -type counters [_numLevels][_countersPerLevel]counter - -func newCounters() *counters { - return &counters{} -} - -func (cs *counters) get(lvl Level, key string) *counter { - i := lvl - _minLevel - j := fnv32a(key) % _countersPerLevel - return &cs[i][j] -} - -// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc -func fnv32a(s string) uint32 { - const ( - offset32 = 2166136261 - prime32 = 16777619 - ) - hash := uint32(offset32) - for i := 0; i < len(s); i++ { - hash ^= uint32(s[i]) - hash *= prime32 - } - return hash -} - -func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { - tn := t.UnixNano() - resetAfter := c.resetAt.Load() - if resetAfter > tn { - return c.counter.Inc() - } - - c.counter.Store(1) - - newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { - // We raced with another goroutine trying to reset, and it also reset - // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() - } - - return 1 -} - -type sampler struct { - Core - - counts *counters - tick time.Duration - first, thereafter uint64 -} - -// NewSampler creates a Core that samples incoming entries, which caps the CPU -// and I/O load of logging while attempting to preserve a representative subset -// of your logs. -// -// Zap samples by logging the first N entries with a given level and message -// each tick. If more Entries with the same level and message are seen during -// the same interval, every Mth message is logged and the rest are dropped. -// -// Keep in mind that zap's sampling implementation is optimized for speed over -// absolute precision; under load, each tick may be slightly over- or -// under-sampled. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return &sampler{ - Core: core, - tick: tick, - counts: newCounters(), - first: uint64(first), - thereafter: uint64(thereafter), - } -} - -func (s *sampler) With(fields []Field) Core { - return &sampler{ - Core: s.Core.With(fields), - tick: s.tick, - counts: s.counts, - first: s.first, - thereafter: s.thereafter, - } -} - -func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if !s.Enabled(ent.Level) { - return ce - } - - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - return ce - } - return s.Core.Check(ent, ce) -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/tee.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/tee.go deleted file mode 100644 index 07a32eef..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/tee.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type multiCore []Core - -// NewTee creates a Core that duplicates log entries into two or more -// underlying Cores. -// -// Calling it with a single Core returns the input unchanged, and calling -// it with no input returns a no-op Core. -func NewTee(cores ...Core) Core { - switch len(cores) { - case 0: - return NewNopCore() - case 1: - return cores[0] - default: - return multiCore(cores) - } -} - -func (mc multiCore) With(fields []Field) Core { - clone := make(multiCore, len(mc)) - for i := range mc { - clone[i] = mc[i].With(fields) - } - return clone -} - -func (mc multiCore) Enabled(lvl Level) bool { - for i := range mc { - if mc[i].Enabled(lvl) { - return true - } - } - return false -} - -func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - for i := range mc { - ce = mc[i].Check(ent, ce) - } - return ce -} - -func (mc multiCore) Write(ent Entry, fields []Field) error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Write(ent, fields)) - } - return err -} - -func (mc multiCore) Sync() error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Sync()) - } - return err -} diff --git a/pkg/c8y/vendor/go.uber.org/zap/zapcore/write_syncer.go b/pkg/c8y/vendor/go.uber.org/zap/zapcore/write_syncer.go deleted file mode 100644 index 209e25fe..00000000 --- a/pkg/c8y/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "io" - "sync" - - "go.uber.org/multierr" -) - -// A WriteSyncer is an io.Writer that can also flush any buffered data. Note -// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. -type WriteSyncer interface { - io.Writer - Sync() error -} - -// AddSync converts an io.Writer to a WriteSyncer. It attempts to be -// intelligent: if the concrete type of the io.Writer implements WriteSyncer, -// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. -func AddSync(w io.Writer) WriteSyncer { - switch w := w.(type) { - case WriteSyncer: - return w - default: - return writerWrapper{w} - } -} - -type lockedWriteSyncer struct { - sync.Mutex - ws WriteSyncer -} - -// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In -// particular, *os.Files must be locked before use. -func Lock(ws WriteSyncer) WriteSyncer { - if _, ok := ws.(*lockedWriteSyncer); ok { - // no need to layer on another lock - return ws - } - return &lockedWriteSyncer{ws: ws} -} - -func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { - s.Lock() - n, err := s.ws.Write(bs) - s.Unlock() - return n, err -} - -func (s *lockedWriteSyncer) Sync() error { - s.Lock() - err := s.ws.Sync() - s.Unlock() - return err -} - -type writerWrapper struct { - io.Writer -} - -func (w writerWrapper) Sync() error { - return nil -} - -type multiWriteSyncer []WriteSyncer - -// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes -// and sync calls, much like io.MultiWriter. -func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { - if len(ws) == 1 { - return ws[0] - } - // Copy to protect against https://github.com/golang/go/issues/7809 - return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) -} - -// See https://golang.org/src/io/multi.go -// When not all underlying syncers write the same number of bytes, -// the smallest number is returned even though Write() is called on -// all of them. -func (ws multiWriteSyncer) Write(p []byte) (int, error) { - var writeErr error - nWritten := 0 - for _, w := range ws { - n, err := w.Write(p) - writeErr = multierr.Append(writeErr, err) - if nWritten == 0 && n != 0 { - nWritten = n - } else if n < nWritten { - nWritten = n - } - } - return nWritten, writeErr -} - -func (ws multiWriteSyncer) Sync() error { - var err error - for _, w := range ws { - err = multierr.Append(err, w.Sync()) - } - return err -} diff --git a/pkg/c8y/vendor/golang.org/x/net/AUTHORS b/pkg/c8y/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/pkg/c8y/vendor/golang.org/x/net/CONTRIBUTORS b/pkg/c8y/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/pkg/c8y/vendor/golang.org/x/net/LICENSE b/pkg/c8y/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/c8y/vendor/golang.org/x/net/PATENTS b/pkg/c8y/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/pkg/c8y/vendor/golang.org/x/net/context/context.go b/pkg/c8y/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index a3c021d3..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/pkg/c8y/vendor/golang.org/x/net/context/go17.go b/pkg/c8y/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b7..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/pkg/c8y/vendor/golang.org/x/net/context/go19.go b/pkg/c8y/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index d88bd1db..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/pkg/c8y/vendor/golang.org/x/net/context/pre_go17.go b/pkg/c8y/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592d..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/pkg/c8y/vendor/golang.org/x/net/context/pre_go19.go b/pkg/c8y/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index b105f80b..00000000 --- a/pkg/c8y/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/pkg/c8y/vendor/gopkg.in/tomb.v2/LICENSE b/pkg/c8y/vendor/gopkg.in/tomb.v2/LICENSE deleted file mode 100644 index a4249bb3..00000000 --- a/pkg/c8y/vendor/gopkg.in/tomb.v2/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -tomb - support for clean goroutine termination in Go. - -Copyright (c) 2010-2011 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/c8y/vendor/gopkg.in/tomb.v2/README.md b/pkg/c8y/vendor/gopkg.in/tomb.v2/README.md deleted file mode 100644 index e7f282b5..00000000 --- a/pkg/c8y/vendor/gopkg.in/tomb.v2/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Installation and usage ----------------------- - -See [gopkg.in/tomb.v2](https://gopkg.in/tomb.v2) for documentation and usage details. diff --git a/pkg/c8y/vendor/gopkg.in/tomb.v2/context.go b/pkg/c8y/vendor/gopkg.in/tomb.v2/context.go deleted file mode 100644 index f0fe56f5..00000000 --- a/pkg/c8y/vendor/gopkg.in/tomb.v2/context.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build go1.7 - -package tomb - -import ( - "context" -) - -// WithContext returns a new tomb that is killed when the provided parent -// context is canceled, and a copy of parent with a replaced Done channel -// that is closed when either the tomb is dying or the parent is canceled. -// The returned context may also be obtained via the tomb's Context method. -func WithContext(parent context.Context) (*Tomb, context.Context) { - var t Tomb - t.init() - if parent.Done() != nil { - go func() { - select { - case <-t.Dying(): - case <-parent.Done(): - t.Kill(parent.Err()) - } - }() - } - t.parent = parent - child, cancel := context.WithCancel(parent) - t.addChild(parent, child, cancel) - return &t, child -} - -// Context returns a context that is a copy of the provided parent context with -// a replaced Done channel that is closed when either the tomb is dying or the -// parent is cancelled. -// -// If parent is nil, it defaults to the parent provided via WithContext, or an -// empty background parent if the tomb wasn't created via WithContext. -func (t *Tomb) Context(parent context.Context) context.Context { - t.init() - t.m.Lock() - defer t.m.Unlock() - - if parent == nil { - if t.parent == nil { - t.parent = context.Background() - } - parent = t.parent.(context.Context) - } - - if child, ok := t.child[parent]; ok { - return child.context.(context.Context) - } - - child, cancel := context.WithCancel(parent) - t.addChild(parent, child, cancel) - return child -} - -func (t *Tomb) addChild(parent context.Context, child context.Context, cancel func()) { - if t.reason != ErrStillAlive { - cancel() - return - } - if t.child == nil { - t.child = make(map[interface{}]childContext) - } - t.child[parent] = childContext{child, cancel, child.Done()} - for parent, child := range t.child { - select { - case <-child.done: - delete(t.child, parent) - default: - } - } -} diff --git a/pkg/c8y/vendor/gopkg.in/tomb.v2/context16.go b/pkg/c8y/vendor/gopkg.in/tomb.v2/context16.go deleted file mode 100644 index d47d83a5..00000000 --- a/pkg/c8y/vendor/gopkg.in/tomb.v2/context16.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !go1.7 - -package tomb - -import ( - "golang.org/x/net/context" -) - -// WithContext returns a new tomb that is killed when the provided parent -// context is canceled, and a copy of parent with a replaced Done channel -// that is closed when either the tomb is dying or the parent is canceled. -// The returned context may also be obtained via the tomb's Context method. -func WithContext(parent context.Context) (*Tomb, context.Context) { - var t Tomb - t.init() - if parent.Done() != nil { - go func() { - select { - case <-t.Dying(): - case <-parent.Done(): - t.Kill(parent.Err()) - } - }() - } - t.parent = parent - child, cancel := context.WithCancel(parent) - t.addChild(parent, child, cancel) - return &t, child -} - -// Context returns a context that is a copy of the provided parent context with -// a replaced Done channel that is closed when either the tomb is dying or the -// parent is cancelled. -// -// If parent is nil, it defaults to the parent provided via WithContext, or an -// empty background parent if the tomb wasn't created via WithContext. -func (t *Tomb) Context(parent context.Context) context.Context { - t.init() - t.m.Lock() - defer t.m.Unlock() - - if parent == nil { - if t.parent == nil { - t.parent = context.Background() - } - parent = t.parent.(context.Context) - } - - if child, ok := t.child[parent]; ok { - return child.context.(context.Context) - } - - child, cancel := context.WithCancel(parent) - t.addChild(parent, child, cancel) - return child -} - -func (t *Tomb) addChild(parent context.Context, child context.Context, cancel func()) { - if t.reason != ErrStillAlive { - cancel() - return - } - if t.child == nil { - t.child = make(map[interface{}]childContext) - } - t.child[parent] = childContext{child, cancel, child.Done()} - for parent, child := range t.child { - select { - case <-child.done: - delete(t.child, parent) - default: - } - } -} diff --git a/pkg/c8y/vendor/gopkg.in/tomb.v2/tomb.go b/pkg/c8y/vendor/gopkg.in/tomb.v2/tomb.go deleted file mode 100644 index 069b3058..00000000 --- a/pkg/c8y/vendor/gopkg.in/tomb.v2/tomb.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (c) 2011 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of the copyright holder nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The tomb package handles clean goroutine tracking and termination. -// -// The zero value of a Tomb is ready to handle the creation of a tracked -// goroutine via its Go method, and then any tracked goroutine may call -// the Go method again to create additional tracked goroutines at -// any point. -// -// If any of the tracked goroutines returns a non-nil error, or the -// Kill or Killf method is called by any goroutine in the system (tracked -// or not), the tomb Err is set, Alive is set to false, and the Dying -// channel is closed to flag that all tracked goroutines are supposed -// to willingly terminate as soon as possible. -// -// Once all tracked goroutines terminate, the Dead channel is closed, -// and Wait unblocks and returns the first non-nil error presented -// to the tomb via a result or an explicit Kill or Killf method call, -// or nil if there were no errors. -// -// It is okay to create further goroutines via the Go method while -// the tomb is in a dying state. The final dead state is only reached -// once all tracked goroutines terminate, at which point calling -// the Go method again will cause a runtime panic. -// -// Tracked functions and methods that are still running while the tomb -// is in dying state may choose to return ErrDying as their error value. -// This preserves the well established non-nil error convention, but is -// understood by the tomb as a clean termination. The Err and Wait -// methods will still return nil if all observed errors were either -// nil or ErrDying. -// -// For background and a detailed example, see the following blog post: -// -// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control -// -package tomb - -import ( - "errors" - "fmt" - "sync" -) - -// A Tomb tracks the lifecycle of one or more goroutines as alive, -// dying or dead, and the reason for their death. -// -// See the package documentation for details. -type Tomb struct { - m sync.Mutex - alive int - dying chan struct{} - dead chan struct{} - reason error - - // context.Context is available in Go 1.7+. - parent interface{} - child map[interface{}]childContext -} - -type childContext struct { - context interface{} - cancel func() - done <-chan struct{} -} - -var ( - ErrStillAlive = errors.New("tomb: still alive") - ErrDying = errors.New("tomb: dying") -) - -func (t *Tomb) init() { - t.m.Lock() - if t.dead == nil { - t.dead = make(chan struct{}) - t.dying = make(chan struct{}) - t.reason = ErrStillAlive - } - t.m.Unlock() -} - -// Dead returns the channel that can be used to wait until -// all goroutines have finished running. -func (t *Tomb) Dead() <-chan struct{} { - t.init() - return t.dead -} - -// Dying returns the channel that can be used to wait until -// t.Kill is called. -func (t *Tomb) Dying() <-chan struct{} { - t.init() - return t.dying -} - -// Wait blocks until all goroutines have finished running, and -// then returns the reason for their death. -func (t *Tomb) Wait() error { - t.init() - <-t.dead - t.m.Lock() - reason := t.reason - t.m.Unlock() - return reason -} - -// Go runs f in a new goroutine and tracks its termination. -// -// If f returns a non-nil error, t.Kill is called with that -// error as the death reason parameter. -// -// It is f's responsibility to monitor the tomb and return -// appropriately once it is in a dying state. -// -// It is safe for the f function to call the Go method again -// to create additional tracked goroutines. Once all tracked -// goroutines return, the Dead channel is closed and the -// Wait method unblocks and returns the death reason. -// -// Calling the Go method after all tracked goroutines return -// causes a runtime panic. For that reason, calling the Go -// method a second time out of a tracked goroutine is unsafe. -func (t *Tomb) Go(f func() error) { - t.init() - t.m.Lock() - defer t.m.Unlock() - select { - case <-t.dead: - panic("tomb.Go called after all goroutines terminated") - default: - } - t.alive++ - go t.run(f) -} - -func (t *Tomb) run(f func() error) { - err := f() - t.m.Lock() - defer t.m.Unlock() - t.alive-- - if t.alive == 0 || err != nil { - t.kill(err) - if t.alive == 0 { - close(t.dead) - } - } -} - -// Kill puts the tomb in a dying state for the given reason, -// closes the Dying channel, and sets Alive to false. -// -// Althoguh Kill may be called multiple times, only the first -// non-nil error is recorded as the death reason. -// -// If reason is ErrDying, the previous reason isn't replaced -// even if nil. It's a runtime error to call Kill with ErrDying -// if t is not in a dying state. -func (t *Tomb) Kill(reason error) { - t.init() - t.m.Lock() - defer t.m.Unlock() - t.kill(reason) -} - -func (t *Tomb) kill(reason error) { - if reason == ErrStillAlive { - panic("tomb: Kill with ErrStillAlive") - } - if reason == ErrDying { - if t.reason == ErrStillAlive { - panic("tomb: Kill with ErrDying while still alive") - } - return - } - if t.reason == ErrStillAlive { - t.reason = reason - close(t.dying) - for _, child := range t.child { - child.cancel() - } - t.child = nil - return - } - if t.reason == nil { - t.reason = reason - return - } -} - -// Killf calls the Kill method with an error built providing the received -// parameters to fmt.Errorf. The generated error is also returned. -func (t *Tomb) Killf(f string, a ...interface{}) error { - err := fmt.Errorf(f, a...) - t.Kill(err) - return err -} - -// Err returns the death reason, or ErrStillAlive if the tomb -// is not in a dying or dead state. -func (t *Tomb) Err() (reason error) { - t.init() - t.m.Lock() - reason = t.reason - t.m.Unlock() - return -} - -// Alive returns true if the tomb is not in a dying or dead state. -func (t *Tomb) Alive() bool { - return t.Err() == ErrStillAlive -} diff --git a/pkg/c8y/vendor/modules.txt b/pkg/c8y/vendor/modules.txt deleted file mode 100644 index 4893c68b..00000000 --- a/pkg/c8y/vendor/modules.txt +++ /dev/null @@ -1,33 +0,0 @@ -# github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 -github.com/cihub/seelog -github.com/cihub/seelog/archive -github.com/cihub/seelog/archive/gzip -github.com/cihub/seelog/archive/tar -github.com/cihub/seelog/archive/zip -# github.com/google/go-querystring v1.0.0 -github.com/google/go-querystring/query -# github.com/gorilla/websocket v1.4.0 -github.com/gorilla/websocket -# github.com/obeattie/ohmyglob v0.0.0-20150811221449-290764208a0d -github.com/obeattie/ohmyglob -# github.com/pkg/errors v0.8.1 -github.com/pkg/errors -# github.com/tidwall/gjson v1.1.3 -github.com/tidwall/gjson -# github.com/tidwall/match v0.0.0-20171002075945-1731857f09b1 -github.com/tidwall/match -# go.uber.org/atomic v1.3.2 -go.uber.org/atomic -# go.uber.org/multierr v1.1.0 -go.uber.org/multierr -# go.uber.org/zap v1.9.1 -go.uber.org/zap -go.uber.org/zap/internal/bufferpool -go.uber.org/zap/zapcore -go.uber.org/zap/buffer -go.uber.org/zap/internal/color -go.uber.org/zap/internal/exit -# golang.org/x/net v0.0.0-20181201002055-351d144fa1fc -golang.org/x/net/context -# gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 -gopkg.in/tomb.v2