diff --git a/.gitignore b/.gitignore index fbdeb9e7..31c60b22 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ plik plikd clients +server/plik.db server/files server/public/public clients @@ -11,13 +12,12 @@ server/common/version.go release releases debs -plik_*.cfg +plikd_*.cfg *bower_components *node_modules *.log - -plik_refactor - +*.key +*.crt # IntelliJ IDEA .idea plik.iml diff --git a/.travis.yml b/.travis.yml index 5e5ab5db..431baa81 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,14 +1,13 @@ language: go go: - - 1.3 - - 1.4 - - tip + - 1.5.2 before_install: - npm install -g bower -before_script: - - make client - - make server - - cd server +before_script: + - go get -u github.com/golang/lint/golint + +script: + - make test && make \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index c9fa5292..d5ed3f53 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ # Let's start with a fresh debian jessie FROM debian:jessie -# Some generic informations +# Some generic information MAINTAINER Charles-Antoine Mathieu MAINTAINER Mathieu Bodjikian @@ -17,7 +17,7 @@ RUN useradd -U -d /home/plik -m -s /bin/false plik # Expose the plik port EXPOSE 8080 -# Copy plik +# Copy plik ADD server /home/plik/server/ ADD clients /home/plik/clients/ RUN chown -R plik:plik /home/plik diff --git a/Makefile b/Makefile index aecf682f..2bcdc1e1 100644 --- a/Makefile +++ b/Makefile @@ -24,11 +24,11 @@ # THE SOFTWARE. ### -RELEASE_VERSION="1.1.1" +RELEASE_VERSION="1.2-RC1" RELEASE_DIR="release/plik-$(RELEASE_VERSION)" RELEASE_TARGETS=darwin-386 darwin-amd64 freebsd-386 \ freebsd-amd64 linux-386 linux-amd64 linux-arm openbsd-386 \ -openbsd-amd64 +openbsd-amd64 windows-amd64 windows-386 GOHOSTOS=`go env GOHOSTOS` GOHOSTARCH=`go env GOHOSTARCH` @@ -36,7 +36,7 @@ GOHOSTARCH=`go env GOHOSTARCH` DEBROOT_SERVER=debs/server DEBROOT_CLIENT=debs/client -all: clean frontend clients server +all: clean clean-frontend frontend clients server ### # Build frontend ressources @@ -44,7 +44,7 @@ all: clean frontend clients server frontend: @if [ ! -d server/public/node_modules ]; then cd server/public && npm install ; fi @if [ ! -d server/public/bower_components ]; then cd server/public && node_modules/bower/bin/bower install --allow-root ; fi - @if [ ! -d server/public/public ]; then cd server/public && node_modules/grunt-cli/bin/grunt ; fi ; + @if [ ! -d server/public/public ]; then cd server/public && node_modules/grunt-cli/bin/grunt ; fi ### @@ -66,11 +66,32 @@ servers: frontend export GOARCH=`echo $$target | cut -d "-" -f 2`; \ mkdir -p ../servers/$$target; \ if [ $$GOOS = "windows" ] ; then SERVER_PATH=$$SERVER_DIR/plikd.exe ; fi ; \ + if [ -e $$SERVER_PATH ] ; then continue ; fi ; \ echo "Compiling plik server for $$target to $$SERVER_PATH"; \ go build -o $$SERVER_PATH ; \ done @sed -i -e "s/$(RELEASE_VERSION)/##VERSION##/g" server/common/config.go + +### +# Build plik utils for all architectures +### +utils: servers + @cd utils && for util in `ls *.go` ; do \ + for target in $(RELEASE_TARGETS) ; do \ + UTIL_DIR=../servers/$$target/utils; \ + UTIL_BASE=`basename $$util .go`; \ + UTIL_PATH=$$UTIL_DIR/$$UTIL_BASE; \ + mkdir -p $$UTIL_DIR; \ + export GOOS=`echo $$target | cut -d "-" -f 1`; \ + if [ $$GOOS = "windows" ] ; then UTIL_PATH=$$UTIL_DIR/$$UTIL_BASE.exe ; fi ; \ + if [ -e $$UTIL_PATH ] ; then continue ; fi ; \ + echo "Compiling plik util file2bolt for $$target to $$UTIL_PATH"; \ + go build -o $$UTIL_PATH $$util ; \ + done ; \ + done + + ### # Build plik client for the current architecture ### @@ -91,6 +112,7 @@ clients: export GOARCH=`echo $$target | cut -d "-" -f 2`; \ mkdir -p $$CLIENT_DIR; \ if [ $$GOOS = "windows" ] ; then CLIENT_PATH=$$CLIENT_DIR/plik.exe ; fi ; \ + if [ -e $$CLIENT_PATH ] ; then continue ; fi ; \ echo "Compiling plik client for $$target to $$CLIENT_PATH"; \ go build -o $$CLIENT_PATH ; \ md5sum $$CLIENT_PATH | awk '{print $$1}' > $$CLIENT_MD5; \ @@ -102,7 +124,7 @@ clients: ## docker: release @cp Dockerfile $(RELEASE_DIR) - @cd $(RELEASE_DIR) && docker build -t plik . + @cd $(RELEASE_DIR) && docker build -t rootgg/plik . ### # Make server and clients Debian packages @@ -160,6 +182,7 @@ debs-client: clients ### release-template: clean frontend clients @mkdir -p $(RELEASE_DIR)/server/public + @mkdir -p $(RELEASE_DIR)/server/utils @cp -R clients $(RELEASE_DIR) @cp -R server/plikd.cfg $(RELEASE_DIR)/server @@ -170,6 +193,7 @@ release-template: clean frontend clients @cp -R server/public/partials $(RELEASE_DIR)/server/public @cp -R server/public/public $(RELEASE_DIR)/server/public @cp -R server/public/index.html $(RELEASE_DIR)/server/public + @cp -R server/public/favicon.ico $(RELEASE_DIR)/server/public ### @@ -183,22 +207,30 @@ release: release-template server ### # Build release archives for all architectures ### -releases: release-template servers +releases: release-template servers utils @mkdir -p releases @cd release && for target in $(RELEASE_TARGETS) ; do \ SERVER_PATH=../servers/$$target/plikd; \ + UTIL_DIR=../servers/$$target/utils; \ OS=`echo $$target | cut -d "-" -f 1`; \ ARCH=`echo $$target | cut -d "-" -f 2`; \ if [ $$OS = "darwin" ] ; then OS="macos" ; fi ; \ if [ $$OS = "windows" ] ; then SERVER_PATH=../servers/$$target/plikd.exe ; fi ; \ if [ $$ARCH = "386" ] ; then ARCH="32bits" ; fi ; \ if [ $$ARCH = "amd64" ] ; then ARCH="64bits" ; fi ; \ - TARBALL_NAME=plik-$(RELEASE_VERSION)-$$OS-$$ARCH.tar.gz; \ - echo "Packaging plik release for $$target to $$TARBALL_NAME"; \ cp -R $$SERVER_PATH plik-$(RELEASE_VERSION)/server; \ - tar czvf ../releases/$$TARBALL_NAME plik-$(RELEASE_VERSION); \ + cp -R $$UTIL_DIR plik-$(RELEASE_VERSION)/server; \ + if [ $$OS = "windows" ] ; then \ + TARBALL_NAME=plik-$(RELEASE_VERSION)-$$OS-$$ARCH.zip; \ + echo "Packaging plik release for $$target to $$TARBALL_NAME"; \ + zip -r ../releases/$$TARBALL_NAME plik-$(RELEASE_VERSION); \ + else \ + TARBALL_NAME=plik-$(RELEASE_VERSION)-$$OS-$$ARCH.tar.gz; \ + echo "Packaging plik release for $$target to $$TARBALL_NAME"; \ + tar czvf ../releases/$$TARBALL_NAME plik-$(RELEASE_VERSION); \ + fi \ done @md5sum releases/* > releases/md5sum.txt @@ -223,21 +255,26 @@ test: done; \ echo -n "go vet $$directory : "; \ VET=`go vet ./... 2>&1`; \ - if [ $$? = 0 ] ; then echo "OK" ; else echo "FAIL" && echo $$VET && ERR="1" ; fi ; \ + if [ $$? = 0 ] ; then echo "OK" ; else echo "FAIL" && echo "$$VET" && ERR="1" ; fi ; \ echo -n "go lint $$directory : "; \ LINT=`golint ./...`; \ - if [ "$$LINT" = "" ] ; then echo "OK" ; else echo "FAIL" && echo $$LINT && ERR="1" ; fi ; \ + if [ "$$LINT" = "" ] ; then echo "OK" ; else echo "FAIL" && echo "$$LINT" && ERR="1" ; fi ; \ cd - 2>&1 > /dev/null; \ done ; if [ "$$ERR" = "1" ] ; then exit 1 ; fi @echo "cli client integration tests :\n" && cd client && ./test.sh +### +# Remove frontend build files +### +clean-frontend: + @rm -rf server/public/bower_components + @rm -rf server/public/public + ### # Remove all build files ### clean: @rm -rf server/common/version.go - @rm -rf server/public/bower_components - @rm -rf server/public/public @rm -rf server/plikd @rm -rf client/plik @rm -rf clients @@ -246,6 +283,11 @@ clean: @rm -rf release @rm -rf releases +### +# Remove all build files and node modules +### +clean-all: clean + @rm -rf server/public/node_modules ### # Since the client/server directories are not generated diff --git a/README.md b/README.md index 7f3226a7..256099dc 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Plik is an simple and powerful file uploading system written in golang. ### Main features - Multiple data backends : File, OpenStack Swift, WeedFS - - Multiple metadata backends : File, MongoDB + - Multiple metadata backends : File, MongoDB, Bolt - Shorten backends : Shorten upload urls (is.gd && w000t.me available) - OneShot : Files are destructed after the first download - Stream : Files are streamed from the uploader to the downloader (nothing stored server side) @@ -17,9 +17,11 @@ Plik is an simple and powerful file uploading system written in golang. - Password : Protect upload with login/password (Auth Basic) - Yubikey : Protect upload with your yubikey. (One Time Password) - Comments : Add custom message (in Markdown format) + - User authentication : Google / OVH + - Upload restriction : Source IP / Token ### Version -1.1.1 +1.2-RC1 ### Installation @@ -74,138 +76,6 @@ To make release archives : $ make releases ``` - -### Docker -Plik comes with a simple Dockerfile that allows you to run it in a container. - -First, you need to build the docker image : -```sh -$ make docker -``` - -Then you can run an instance and map the local port 80 to the plik port : -```sh -$ docker run -t -d -p 80:8080 plik -ab9b2c99da1f3e309cd3b12392b9084b5cafcca0325d7d47ff76f5b1e475d1b9 -``` - -You can also use a volume to store uploads on a local folder. -Here, we map local folder /data to the /home/plik/server/files folder of the container (this is the default uploads directory) : -```sh -$ docker run -t -d -p 80:8080 -v /data:/home/plik/server/files plik -ab9b2c99da1f3e309cd3b12392b9084b5cafcca0325d7d47ff76f5b1e475d1b9 -``` - -To use a different config file, you can also map a single file to the container at runtime : -```sh -$ docker run -t -d -p 80:8080 -v plikd.cfg:/home/plik/server/plikd.cfg plik -ab9b2c99da1f3e309cd3b12392b9084b5cafcca0325d7d47ff76f5b1e475d1b9 -``` - -### API -Plik server expose a REST-full API to manage uploads and get files : - -Get and create upload : - - - **POST** /upload - - Params (json object in request body) : - - oneshot (bool) - - stream (bool) - - removable (bool) - - ttl (int) - - login (string) - - password (string) - - files (see below) - - Return : - JSON formatted upload object. - Important fields : - - id (required to upload files) - - uploadToken (required to upload/remove files) - - files (see below) - - For stream mode you need to know the file id before the upload starts as it will block. - File size and/or file type also need to be known before the upload starts as they have to be printed - in HTTP response headers. - To get the file ids pass a "files" json object with each file you are about to upload. - Fill the reference field with an arbitrary string to avoid matching file ids using the fileName field. - This is also used to notify of MISSING files when file upload is not yet finished or has failed. - ``` - "files" : { - "0" : { - "fileName": "file.txt", - "fileSize": 12345, - "fileType": "text/plain", - "reference": "0" - },... - } - ``` - - **GET** /upload/:uploadid: - - Get upload metadata (files list, upload date, ttl,...) - -Upload file : - - - **POST** /$mode/:uploadid:/:fileid:/:filename: - - Request body must be a multipart request with a part named "file" containing file data. - - - **POST** /file/:uploadid: - - Same as above without passing file id, won't work for stream mode. - -Get file : - - - **HEAD** /$mode/:uploadid:/:fileid:/:filename: - - Returns only HTTP headers. Usefull to know Content-Type and Content-Type without downloading the file. Especially if upload has OneShot option enabled. - - - **GET** /$mode/:uploadid:/:fileid:/:filename: - - Download file. Filename **MUST** match. A browser, might try to display the file if it's a jpeg for example. You may try to force download with ?dl=1 in url. - - - **GET** /$mode/:uploadid:/:fileid:/:filename:/yubikey/:yubikeyOtp: - - Same as previous call, except that you can specify a Yubikey OTP in the URL if the upload is Yubikey restricted. - -Remove file : - - - **DELETE** /$mode/:uploadid:/:fileid:/:filename: - - Delete file. Upload **MUST** have "removable" option enabled. - -Show server details : - - - **GET** /version - - Show plik server version, and some build informations (build host, date, git revision,...) - - - **GET** /config - - Show plik server configuration (ttl values, max file size, ...) - -QRCode : - - - **GET** /qrcode - - Generate a QRCode image from an url - - Params : - - url : The url you want to store in the QRCode - - size : The size of the generated image in pixels (default: 250, max: 1000) - - -$mode can be "file" or "stream" depending if stream mode is enabled. See FAQ for more details. - -Examples : -```sh -Create an upload (in the json response, you'll have upload id and upload token) -$ curl -X POST http://127.0.0.1:8080/upload - -Create a OneShot upload -$ curl -X POST -d '{ "OneShot" : true }' http://127.0.0.1:8080/upload - -Upload a file to upload -$ curl -X POST --header "X-UploadToken: M9PJftiApG1Kqr81gN3Fq1HJItPENMhl" -F "file=@test.txt" http://127.0.0.1:8080/file/IsrIPIsDskFpN12E - -Get headers -$ curl -I http://127.0.0.1:8080/file/IsrIPIsDskFpN12E/sFjIeokH23M35tN4/test.txt -HTTP/1.1 200 OK -Content-Disposition: filename=test.txt -Content-Length: 3486 -Content-Type: text/plain; charset=utf-8 -Date: Fri, 15 May 2015 09:16:20 GMT - -``` - ### Cli client Plik is shipped with a powerful golang multiplatform cli client (downloadable in web interface) : @@ -223,6 +93,7 @@ Options: -t, --ttl TTL Time before expiration (Upload will be removed in m|h|d) -n, --name NAME Set file name when piping from STDIN --server SERVER Overrides plik url + --token TOKEN Specify an upload token --comments COMMENT Set comments of the upload ( MarkDown compatible ) -p Protect the upload with login and password --password PASSWD Protect the upload with login:password ( if omitted default login is "plik" ) @@ -256,30 +127,93 @@ curl -s 'https://127.0.0.1:8080/file/0KfNj6eMb93ilCrl/q73tEBEqM04b22GP/mydirecto Client configuration and preferences are stored at ~/.plikrc ( overridable with PLIKRC environement variable ) -### FAQ +### Authentication -##### I have an error when uploading from client : "Unable to upload file : HTTP error 411 Length Required" +Plik can authenticate users using Google and/or OVH API. +Once authenticated the only call Plik will ever make to those API is get the user ID, name and email. +Plik will never forward any upload data or metadata to any third party. +If source IP address restriction is enabled, user accounts can only be created from trusted IPs. But then +authenticated users can upload files without source IP restriction. -Under nginx < 1.3.9, you must enable HttpChunkin module to allow transfer-encoding "chunked". -You might want to install the "nginx-extras" Debian package with built-in HttpChunkin module. + - **Google** : + - You'll need to create a new application in the [Google Developper Console](https://console.developers.google.com) + - You'll be handed a Google API ClientID and a Google API ClientSecret that you'll need to put in the plikd.cfg file. + - Do not forget to whitelist valid origin and redirect url ( https://yourdomain/auth/google/callback ) for your domain. + + - **OVH** : + - You'll need to create a new application in the OVH API : https://eu.api.ovh.com/createApp/ + - You'll be handed an OVH application key and an OVH application secret key that you'll need to put in the plikd.cfg file. -And add in your server configuration : +Once authenticated a user can generate upload tokens that can be specified in the ~/.plikrc file to authenticate +the command line client. -```sh - chunkin on; - error_page 411 = @my_411_error; - location @my_411_error { - chunkin_resume; - } +``` +Token = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +``` + +### Available data backends + +Plik is shipped with multiple data backend for uploaded files and metadata backend for the upload metadata. + + - File databackend : + +Store uploaded files in a local or mounted file system directory. This is suitable for multiple instance deployment if all instances can share the directory. + + - Openstack Swift databackend : http://docs.openstack.org/developer/swift/ + +Openstack Swift is a highly available, distributed, eventually consistent object/blob store. + + - SeaweedFS databackend : https://github.com/chrislusf/seaweedfs + +SeaweedFS is a simple and highly scalable distributed file system. + +### Available metadata backends + + - File metadata backend : (DEPRECATED) + +This backend has been deprecated in Plik 1.2 in favor of BoltDB backend. +The authentication mechanisms ( User / Tokens ) are NOT implemented in this backend. +Migration from file backend to BoltDB backend can be done using the migrate_from_file_to_bolt script. + +``` +server/utils/file2bolt --directory server/files --db server/plik.db ``` +This backend save upload metadata as JSON in a .config file in the upload directory. +This is only suitable for a single instance deployment as locking append at the process level. +Using multiple plik instance with this backend will result in corrupted metadata JSON files. Use mongodb backend instead. + + - Bolt metadata backend : https://github.com/boltdb/bolt + +This is the successor of the file metadata backend, it store all the metadata in a single bolt.db file. +Performance is improved by keeping all metadata in memory to avoid costly filesystem stat operations. +Boltdb also support of atomic transactions that ensure the metadata consistency over time. + +Only suitable for a single instance deployment as the Bolt database can only be opened by a single process at a time. + + - Mongodb metadata backend : https://www.mongodb.org + +Suitable for distributed / High Availability deployment. + +### API +Plik server expose a HTTP API to manage uploads and get files : + +See the [Plik API reference](documentation/api.md) + +### Docker +Plik comes with a simple Dockerfile that allows you to run it in a container : + +See the [Plik Docker reference](documentation/docker.md) + +### FAQ + ##### Why is stream mode broken in multiple instance deployement ? Beacause stream mode isn't stateless. As the uploader request will block on one plik instance the downloader request **MUST** go to the same instance to succeed. The load balancing strategy **MUST** be aware of this and route stream requests to the same instance by hashing the file id. Here is an example of how to achieve this using nginx and a little piece of LUA. -Make sur your nginx server is built with LUA scripting support. +Make sure your nginx server is built with LUA scripting support. You might want to install the "nginx-extras" Debian package (>1.7.2) with built-in LUA support. ``` upstream plik { @@ -313,14 +247,20 @@ server { } ``` -##### Is "file" metadata backend compatible with multi-instance ? - -Unfortunately, you may experience some weird behaviour using file metadata backend with multiple instances of plik. +##### I have an error when uploading from client : "Unable to upload file : HTTP error 411 Length Required" -The lock used in this backend is specific to a given instance, so the metadata file could be corrupted on concurrent requests. +Under nginx < 1.3.9, you must enable HttpChunkin module to allow transfer-encoding "chunked". +You might want to install the "nginx-extras" Debian package with built-in HttpChunkin module. -You can set a 'sticky' on the source ip but we recommend using the MongoDB metadata backend, when deploying a high available plik installation. +And add in your server configuration : +```sh + chunkin on; + error_page 411 = @my_411_error; + location @my_411_error { + chunkin_resume; + } +``` ##### How to disable nginx buffering ? @@ -335,6 +275,11 @@ Detailed documentation : http://nginx.org/en/docs/http/ngx_http_proxy_module.htm proxy_buffers 8 1M; client_body_buffer_size 1M; ``` + +##### Why authentication don't work with HTTP connections ? + +Plik session cookies have the "secure" flag set, so they can only be transmitted over secure HTTPS connections. + ##### How to take and upload screenshots like a boss ? ``` @@ -348,5 +293,6 @@ The screenshot is then removed of your home directory to avoid garbage. ##### How to contribute to the project ? -Contributions are welcome, you are free to implement other data/metadata/shorten backends and submit them via -pull requests. We will be happy to add them in the future releases. +Contributions are welcome, feel free to open issues and/or submit pull requests. +Please make your pull requests against the current development (RC) branch, not against master. +Please run/update the test suite using the makefile test target. \ No newline at end of file diff --git a/client/Godeps/Godeps.json b/client/Godeps/Godeps.json index d147673e..a6cb6389 100644 --- a/client/Godeps/Godeps.json +++ b/client/Godeps/Godeps.json @@ -1,14 +1,14 @@ { "ImportPath": "github.com/root-gg/plik/client", - "GoVersion": "go1.3.1", + "GoVersion": "go1.5.1", "Packages": [ "./..." ], "Deps": [ { "ImportPath": "github.com/BurntSushi/toml", - "Comment": "v0.1.0", - "Rev": "2ceedfee35ad3848e49308ab0c9a4f640cfb5fb2" + "Comment": "v0.1.0-18-g443a628", + "Rev": "443a628bc233f634a75bcbdd71fe5350789f1afa" }, { "ImportPath": "github.com/GeertJohan/yubigo", @@ -16,32 +16,46 @@ }, { "ImportPath": "github.com/cheggaaa/pb", - "Rev": "bd14546a551971ae7f460e6d6e527c5b56cd38d7" + "Rev": "467c52dcaefb4913d1feb16f6a771777c9613a64" }, { "ImportPath": "github.com/docopt/docopt-go", - "Comment": "0.6.1-1-gc5dac53", - "Rev": "c5dac536301992c0371c6115d998fb62944bfad3" + "Comment": "0.6.1-5-g854c423", + "Rev": "854c423c810880e30b9fecdabb12d54f4a92f9bb" + }, + { + "ImportPath": "github.com/kardianos/osext", + "Rev": "10da29423eb9a6269092eebdc2be32209612d9d2" }, { "ImportPath": "github.com/mitchellh/go-homedir", - "Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52" + "Rev": "1f6da4a72e57d4e7edd4a7295a585e0a3999a2d4" + }, + { + "ImportPath": "github.com/nu7hatch/gouuid", + "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" }, { "ImportPath": "github.com/olekukonko/ts", "Rev": "ecf753e7c962639ab5a1fb46f7da627d4c0a04b8" }, { - "ImportPath": "github.com/root-gg/context", - "Rev": "eb01ea15154712b6b44ee3bbf07863af6525afc6" + "ImportPath": "github.com/root-gg/juliet", + "Comment": "v1.0", + "Rev": "f7abeae007897a156a3a5af8fe8e5e653cb15f13" }, { "ImportPath": "github.com/root-gg/logger", "Rev": "5d9a47a3531200bc20998b96439220812c111707" }, + { + "ImportPath": "github.com/root-gg/plik/server/common", + "Comment": "1.1-28-ga42ed90", + "Rev": "a42ed908c0a0620806577fec48816b0c4b2c8314" + }, { "ImportPath": "github.com/root-gg/utils", - "Rev": "748890d0e7c37c07d7e0f9f5a66a69901b371e28" + "Rev": "38f45ede2ce220d9c08734edd8a13107022cc20d" }, { "ImportPath": "golang.org/x/crypto/cast5", diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md index 380bb36b..5a5df637 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md @@ -9,7 +9,7 @@ representations. (There is an example of this below.) Spec: https://github.com/mojombo/toml Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) +[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) Documentation: http://godoc.org/github.com/BurntSushi/toml @@ -111,7 +111,7 @@ type songs struct { Song []song } var favorites songs -if _, err := Decode(blob, &favorites); err != nil { +if _, err := toml.Decode(blob, &favorites); err != nil { log.Fatal(err) } diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go index b6d75d04..6c7d398b 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go @@ -12,6 +12,18 @@ import ( var e = fmt.Errorf +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // When using the various `Decode*` functions, the type `Primitive` may // be given to any value, and its decoding will be delayed. @@ -128,6 +140,7 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { // Save the undecoded data and the key context into the primitive @@ -141,6 +154,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + // Special case. Handle time.Time values specifically. // TODO: Remove this code when we decide to drop support for Go 1.1. // This isn't necessary in Go 1.2 because time.Time satisfies the encoding diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go index c8114453..ef6f545f 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go @@ -59,6 +59,29 @@ func (k Key) String() string { return strings.Join(k, ".") } +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } else { + return k[i] + } +} + func (k Key) add(piece string) Key { newKey := make(Key, len(k)+1) copy(newKey, k) diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index b940333d..00000000 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,540 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "reflect" - "testing" - "time" -) - -func init() { - log.SetFlags(0) -} - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": cats{Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - - tests := map[string]struct { - input string - decodeInto interface{} - wantDecoded interface{} - }{ - "embedded struct": { - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - "embedded non-nil pointer to struct": { - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - "embedded nil pointer to struct": { - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - "embedded int": { - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } - - for label, test := range tests { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands.J Geils] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go index 36187134..64e8c47e 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go @@ -118,7 +118,8 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { k := rv.Kind() switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: @@ -173,7 +174,8 @@ func (enc *Encoder) eElement(rv reflect.Value) { switch rv.Kind() { case reflect.Bool: enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: enc.wf(strconv.FormatInt(rv.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -223,28 +225,28 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { if len(key) == 0 { encPanic(errNoKey) } - panicIfInvalidKey(key, true) for i := 0; i < rv.Len(); i++ { trv := rv.Index(i) if isNil(trv) { continue } + panicIfInvalidKey(key) enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.String()) + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) enc.newline() enc.eMapOrStruct(key, trv) } } func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) if len(key) == 1 { // Output an extra new line between top-level tables. // (The newline isn't written if nothing else has been written though.) enc.newline() } if len(key) > 0 { - panicIfInvalidKey(key, true) - enc.wf("%s[%s]", enc.indentStr(key), key.String()) + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) enc.newline() } enc.eMapOrStruct(key, rv) @@ -348,10 +350,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) { writeFields(fieldsSub) } -// tomlTypeName returns the TOML type name of the Go value's type. It is used to -// determine whether the types of array elements are mixed (which is forbidden). -// If the Go value is nil, then it is illegal for it to be an array element, and -// valueIsNil is returned as true. +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. // Returns the TOML type of a Go value. The type may be `nil`, which means // no concrete TOML type could be found. @@ -362,7 +364,8 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { switch rv.Kind() { case reflect.Bool: return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return tomlInteger @@ -440,8 +443,8 @@ func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { if len(key) == 0 { encPanic(errNoKey) } - panicIfInvalidKey(key, false) - enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1]) + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) enc.newline() } @@ -479,37 +482,15 @@ func isNil(rv reflect.Value) bool { } } -func panicIfInvalidKey(key Key, hash bool) { - if hash { - for _, k := range key { - if !isValidTableName(k) { - encPanic(e("Key '%s' is not a valid table name. Table names "+ - "cannot contain '[', ']' or '.'.", key.String())) - } - } - } else { - if !isValidKeyName(key[len(key)-1]) { - encPanic(e("Key '%s' is not a name. Key names "+ - "cannot contain whitespace.", key.String())) - } - } -} - -func isValidTableName(s string) bool { - if len(s) == 0 { - return false - } - for _, r := range s { - if r == '[' || r == ']' || r == '.' { - return false +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) } } - return true } func isValidKeyName(s string) bool { - if len(s) == 0 { - return false - } - return true + return len(s) != 0 } diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 74a5ee5d..00000000 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,506 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - log.Printf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - map[string]int{ - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) anonymous non-struct": { - input: struct{ NonStruct }{5}, - wantError: errAnonNonStruct, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go index 140c44c1..d36e1dd6 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go @@ -14,6 +14,6 @@ import ( // so that Go 1.1 can be supported. type TextMarshaler encoding.TextMarshaler -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here -// so that Go 1.1 can be supported. +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. type TextUnmarshaler encoding.TextUnmarshaler diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go index fb285e7f..e8d503d0 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -11,8 +11,8 @@ type TextMarshaler interface { MarshalText() (text []byte, err error) } -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here -// so that Go 1.1 can be supported. +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. type TextUnmarshaler interface { UnmarshalText(text []byte) error } diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go index 3821fa27..21912285 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go @@ -14,6 +14,9 @@ const ( itemEOF itemText itemString + itemRawString + itemMultilineString + itemRawMultilineString itemBool itemInteger itemFloat @@ -42,6 +45,8 @@ const ( commentStart = '#' stringStart = '"' stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' ) type stateFn func(lx *lexer) stateFn @@ -256,38 +261,54 @@ func lexArrayTableEnd(lx *lexer) stateFn { } func lexTableNameStart(lx *lexer) stateFn { - switch lx.next() { - case tableEnd, eof: - return lx.errorf("Unexpected end of table. (Tables cannot " + + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("Unexpected end of table name. (Table names cannot " + "be empty.)") - case tableSep: - return lx.errorf("Unexpected table separator. (Tables cannot " + + case r == tableSep: + return lx.errorf("Unexpected table separator. (Table names cannot " + "be empty.)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + case isWhitespace(r): + return lexTableNameStart + default: + return lexBareTableName } - return lexTableName } // lexTableName lexes the name of a table. It assumes that at least one // valid character for the table has already been read. -func lexTableName(lx *lexer) stateFn { - switch lx.peek() { - case eof: - return lx.errorf("Unexpected end of table name %q.", lx.current()) - case tableStart: - return lx.errorf("Table names cannot contain %q or %q.", - tableStart, tableEnd) - case tableEnd: - lx.emit(itemText) - lx.next() - return lx.pop() - case tableSep: - lx.emit(itemText) - lx.next() +func lexBareTableName(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareTableName + case r == tableSep || r == tableEnd: + lx.backup() + lx.emitTrim(itemText) + return lexTableNameEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: lx.ignore() return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ + "instead.", r) } - lx.next() - return lexTableName } // lexKeyStart consumes a key name up until the first non-whitespace character. @@ -300,53 +321,48 @@ func lexKeyStart(lx *lexer) stateFn { case isWhitespace(r) || isNL(r): lx.next() return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey } - - lx.ignore() - lx.emit(itemKeyStart) - lx.next() - return lexKey } -// lexKey consumes the text of a key. Assumes that the first character (which -// is not whitespace) has already been consumed. -func lexKey(lx *lexer) stateFn { - r := lx.peek() - - // Keys cannot contain a '#' character. - if r == commentStart { - return lx.errorf("Key cannot contain a '#' character.") - } - - // XXX: Possible divergence from spec? - // "Keys start with the first non-whitespace character and end with the - // last non-whitespace character before the equals sign." - // Note here that whitespace is either a tab or a space. - // But we'll call it quits if we see a new line too. - if isNL(r) { +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): lx.emitTrim(itemText) return lexKeyEnd - } - - // Let's also call it quits if we see an equals sign. - if r == keySep { + case r == keySep: + lx.backup() lx.emitTrim(itemText) return lexKeyEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) } - - lx.next() - return lexKey } -// lexKeyEnd consumes the end of a key (up to the key separator). -// Assumes that any whitespace after a key has been consumed. +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). func lexKeyEnd(lx *lexer) stateFn { - r := lx.next() - if r == keySep { + switch r := lx.next(); { + case r == keySep: return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("Expected key separator %q, but got %q instead.", + keySep, r) } - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) } // lexValue starts the consumption of a value anywhere a value is expected. @@ -354,7 +370,8 @@ func lexKeyEnd(lx *lexer) stateFn { // After a value is lexed, the last state on the next is popped and returned. func lexValue(lx *lexer) stateFn { // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new lines. + // In array syntax, the array states are responsible for ignoring new + // lines. r := lx.next() if isWhitespace(r) { return lexSkip(lx, lexValue) @@ -366,8 +383,25 @@ func lexValue(lx *lexer) stateFn { lx.emit(itemArray) return lexArrayValue case r == stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } lx.ignore() // ignore the '"' return lexString + case r == rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString case r == 't': return lexTrue case r == 'f': @@ -441,6 +475,7 @@ func lexString(lx *lexer) stateFn { case isNL(r): return lx.errorf("Strings cannot contain new lines.") case r == '\\': + lx.push(lexString) return lexStringEscape case r == stringEnd: lx.backup() @@ -452,8 +487,88 @@ func lexString(lx *lexer) stateFn { return lexString } -// lexStringEscape consumes an escaped character. It assumes that the preceding -// '\\' has already been consumed. +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '\\': + return lexMultilineStringEscape + case r == stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case isNL(r): + return lx.errorf("Strings cannot contain new lines.") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + lx.next() + return lexMultilineString + } else { + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) + } +} + func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { @@ -469,35 +584,45 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '"': fallthrough - case '/': - fallthrough case '\\': - return lexString + return lx.pop() case 'u': - return lexStringUnicode + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape } return lx.errorf("Invalid escape character %q. Only the following "+ "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r) + "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ + "\\uXXXX and \\UXXXXXXXX.", r) } -// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes -// that the '\x' has already been consumed. -func lexStringUnicode(lx *lexer) stateFn { +func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune - for i := 0; i < 4; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\x', "+ + return lx.errorf("Expected four hexadecimal digits after '\\u', "+ "but got '%s' instead.", lx.current()) } } - return lexString + return lx.pop() } -// lexNumberOrDateStart consumes either a (positive) integer, float or datetime. -// It assumes that NO negative sign has been consumed. +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ + "but got '%s' instead.", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either a (positive) integer, float or +// datetime. It assumes that NO negative sign has been consumed. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() if !isDigit(r) { @@ -557,9 +682,10 @@ func lexDateAfterYear(lx *lexer) stateFn { return lx.pop() } -// lexNumberStart consumes either an integer or a float. It assumes that a -// negative sign has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. +// lexNumberStart consumes either an integer or a float. It assumes that +// a negative sign has already been read, but that *no* digits have been +// consumed. lexNumberStart will move to the appropriate integer or float +// states. func lexNumberStart(lx *lexer) stateFn { // we MUST see a digit. Even floats have to start with a digit. r := lx.next() @@ -693,6 +819,14 @@ func isHexadecimal(r rune) bool { (r >= 'A' && r <= 'F') } +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + func (itype itemType) String() string { switch itype { case itemError: @@ -705,6 +839,12 @@ func (itype itemType) String() string { return "Text" case itemString: return "String" + case itemRawString: + return "String" + case itemMultilineString: + return "String" + case itemRawMultilineString: + return "String" case itemBool: return "Bool" case itemInteger: diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go index 43afe3c3..c6069be1 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" "time" + "unicode" "unicode/utf8" ) @@ -66,7 +67,7 @@ func parse(data string) (p *parser, err error) { } func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d, key '%s': %s", + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", p.approxLine, p.current(), fmt.Sprintf(format, v...)) panic(parseError(msg)) } @@ -74,7 +75,7 @@ func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) next() item { it := p.lx.nextItem() if it.typ == itemError { - p.panicf("Near line %d: %s", it.line, it.val) + p.panicf("%s", it.val) } return it } @@ -101,12 +102,12 @@ func (p *parser) topLevel(item item) { p.approxLine = item.line p.expect(itemText) case itemTableStart: - kg := p.expect(itemText) + kg := p.next() p.approxLine = kg.line - key := make(Key, 0) - for ; kg.typ == itemText; kg = p.next() { - key = append(key, kg.val) + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) } p.assertEqual(itemTableEnd, kg.typ) @@ -114,12 +115,12 @@ func (p *parser) topLevel(item item) { p.setType("", tomlHash) p.ordered = append(p.ordered, key) case itemArrayTableStart: - kg := p.expect(itemText) + kg := p.next() p.approxLine = kg.line - key := make(Key, 0) - for ; kg.typ == itemText; kg = p.next() { - key = append(key, kg.val) + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) } p.assertEqual(itemArrayTableEnd, kg.typ) @@ -127,27 +128,48 @@ func (p *parser) topLevel(item item) { p.setType("", tomlArrayHash) p.ordered = append(p.ordered, key) case itemKeyStart: - kname := p.expect(itemText) - p.currentKey = kname.val + kname := p.next() p.approxLine = kname.line + p.currentKey = p.keyString(kname) val, typ := p.value(p.next()) p.setValue(p.currentKey, val) p.setType(p.currentKey, typ) p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" default: p.bug("Unexpected type at top level: %s", item.typ) } } +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. func (p *parser) value(it item) (interface{}, tomlType) { switch it.typ { case itemString: - return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it) + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) case itemBool: switch it.val { case "true": @@ -352,7 +374,8 @@ func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true } -// removeImplicit stops tagging the given key as having been implicitly created. +// removeImplicit stops tagging the given key as having been implicitly +// created. func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false } @@ -374,31 +397,85 @@ func (p *parser) current() string { return fmt.Sprintf("%s.%s", p.context, p.currentKey) } -func replaceEscapes(s string) string { - return strings.NewReplacer( - "\\b", "\u0008", - "\\t", "\u0009", - "\\n", "\u000A", - "\\f", "\u000C", - "\\r", "\u000D", - "\\\"", "\u0022", - "\\/", "\u002F", - "\\\\", "\u005C", - ).Replace(s) +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:len(s)] } -func (p *parser) replaceUnicode(s string) string { - indexEsc := func() int { - return strings.Index(s, "\\u") +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } } - for i := indexEsc(); i != -1; i = indexEsc() { - asciiBytes := s[i+2 : i+6] - s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1) + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } } - return s + return string(replaced) } -func (p *parser) asciiEscapeToUnicode(s string) string { +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the "+ @@ -409,9 +486,13 @@ func (p *parser) asciiEscapeToUnicode(s string) string { // I honestly don't understand how this works. I can't seem // to find a way to make this fail. I figured this would fail on invalid // UTF-8 characters like U+DCFF, but it doesn't. - r := string(rune(hex)) - if !utf8.ValidString(r) { + if !utf8.ValidString(string(rune(hex))) { p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) } - return string(r) + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString } diff --git a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go index 79dac6b1..c73f8afc 100644 --- a/client/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go +++ b/client/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go @@ -56,6 +56,12 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { return tomlDatetime case itemString: return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString case itemBool: return tomlBool } @@ -77,8 +83,8 @@ func (p *parser) typeOfArray(types []tomlType) tomlType { theType := types[0] for _, t := range types[1:] { if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but arrays "+ - "must be homogeneous.", theType, t) + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) } } return tomlArray diff --git a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go b/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go index 1dd210be..e024e36d 100644 --- a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go +++ b/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format.go @@ -6,24 +6,21 @@ import ( "strings" ) -type Units int - const ( // By default, without type handle - U_NO Units = iota + U_NO = 0 // Handle as b, Kb, Mb, etc - U_BYTES + U_BYTES = 1 ) // Format integer -func Format(i int64, units Units) string { +func Format(i int64, units int) string { switch units { case U_BYTES: return FormatBytes(i) - default: - // by default just convert to string - return strconv.FormatInt(i, 10) } + // by default just convert to string + return strconv.Itoa(int(i)) } // Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B diff --git a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format_test.go b/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format_test.go deleted file mode 100644 index b76275e2..00000000 --- a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/format_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package pb - -import ( - "fmt" - "strconv" - "testing" -) - -func Test_DefaultsToInteger(t *testing.T) { - value := int64(1000) - expected := strconv.Itoa(int(value)) - actual := Format(value, -1) - - if actual != expected { - t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual)) - } -} - -func Test_CanFormatAsInteger(t *testing.T) { - value := int64(1000) - expected := strconv.Itoa(int(value)) - actual := Format(value, U_NO) - - if actual != expected { - t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual)) - } -} - -func Test_CanFormatAsBytes(t *testing.T) { - value := int64(1000) - expected := "1000 B" - actual := Format(value, U_BYTES) - - if actual != expected { - t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual)) - } -} diff --git a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go b/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go index ac3eec4c..bc9aa0e2 100644 --- a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go +++ b/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go @@ -5,7 +5,6 @@ import ( "io" "math" "strings" - "sync" "sync/atomic" "time" ) @@ -25,13 +24,13 @@ var ( ) // Create new progress bar object -func New(total int) *ProgressBar { +func New(total int) (pb *ProgressBar) { return New64(int64(total)) } // Create new progress bar object uding int64 as total -func New64(total int64) *ProgressBar { - pb := &ProgressBar{ +func New64(total int64) (pb *ProgressBar) { + pb = &ProgressBar{ Total: total, RefreshRate: DEFAULT_REFRESH_RATE, ShowPercent: true, @@ -39,17 +38,18 @@ func New64(total int64) *ProgressBar { ShowBar: true, ShowTimeLeft: true, ShowFinalTime: true, - Units: U_NO, ManualUpdate: false, - isFinish: make(chan struct{}), currentValue: -1, } - return pb.Format(FORMAT) + pb.Format(FORMAT) + return } // Create new object and start -func StartNew(total int) *ProgressBar { - return New(total).Start() +func StartNew(total int) (pb *ProgressBar) { + pb = New(total) + pb.Start() + return } // Callback for custom output @@ -71,14 +71,12 @@ type ProgressBar struct { Output io.Writer Callback Callback NotPrint bool - Units Units + Units int Width int ForceWidth bool ManualUpdate bool - finishOnce sync.Once //Guards isFinish - isFinish chan struct{} - + isFinish int32 startTime time.Time currentValue int64 @@ -92,7 +90,7 @@ type ProgressBar struct { } // Start print -func (pb *ProgressBar) Start() *ProgressBar { +func (pb *ProgressBar) Start() { pb.startTime = time.Now() if pb.Total == 0 { pb.ShowBar = false @@ -102,7 +100,6 @@ func (pb *ProgressBar) Start() *ProgressBar { if !pb.ManualUpdate { go pb.writer() } - return pb } // Increment current value @@ -112,12 +109,7 @@ func (pb *ProgressBar) Increment() int { // Set current value func (pb *ProgressBar) Set(current int) { - pb.Set64(int64(current)) -} - -// Set64 sets the current value as int64 -func (pb *ProgressBar) Set64(current int64) { - atomic.StoreInt64(&pb.current, current) + atomic.StoreInt64(&pb.current, int64(current)) } // Add to current value @@ -130,69 +122,75 @@ func (pb *ProgressBar) Add64(add int64) int64 { } // Set prefix string -func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { +func (pb *ProgressBar) Prefix(prefix string) (bar *ProgressBar) { pb.prefix = prefix return pb } // Set postfix string -func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { +func (pb *ProgressBar) Postfix(postfix string) (bar *ProgressBar) { pb.postfix = postfix return pb } // Set custom format for bar // Example: bar.Format("[=>_]") -func (pb *ProgressBar) Format(format string) *ProgressBar { +func (pb *ProgressBar) Format(format string) (bar *ProgressBar) { + bar = pb formatEntries := strings.Split(format, "") - if len(formatEntries) == 5 { - pb.BarStart = formatEntries[0] - pb.BarEnd = formatEntries[4] - pb.Empty = formatEntries[3] - pb.Current = formatEntries[1] - pb.CurrentN = formatEntries[2] + if len(formatEntries) != 5 { + return } - return pb + pb.BarStart = formatEntries[0] + pb.BarEnd = formatEntries[4] + pb.Empty = formatEntries[3] + pb.Current = formatEntries[1] + pb.CurrentN = formatEntries[2] + return } // Set bar refresh rate -func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar { +func (pb *ProgressBar) SetRefreshRate(rate time.Duration) (bar *ProgressBar) { + bar = pb pb.RefreshRate = rate - return pb + return } // Set units // bar.SetUnits(U_NO) - by default // bar.SetUnits(U_BYTES) - for Mb, Kb, etc -func (pb *ProgressBar) SetUnits(units Units) *ProgressBar { - pb.Units = units - return pb +func (pb *ProgressBar) SetUnits(units int) (bar *ProgressBar) { + bar = pb + switch units { + case U_NO, U_BYTES: + pb.Units = units + } + return } // Set max width, if width is bigger than terminal width, will be ignored -func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar { +func (pb *ProgressBar) SetMaxWidth(width int) (bar *ProgressBar) { + bar = pb pb.Width = width pb.ForceWidth = false - return pb + return } // Set bar width -func (pb *ProgressBar) SetWidth(width int) *ProgressBar { +func (pb *ProgressBar) SetWidth(width int) (bar *ProgressBar) { + bar = pb pb.Width = width pb.ForceWidth = true - return pb + return } // End print func (pb *ProgressBar) Finish() { - //Protect multiple calls - pb.finishOnce.Do(func() { - close(pb.isFinish) - pb.write(atomic.LoadInt64(&pb.current)) - if !pb.NotPrint { - fmt.Println() - } - }) + atomic.StoreInt32(&pb.isFinish, 1) + pb.write(atomic.LoadInt64(&pb.current)) + if !pb.NotPrint { + fmt.Println() + } } // End print and write string 'str' @@ -242,19 +240,16 @@ func (pb *ProgressBar) write(current int64) { // time left fromStart := time.Now().Sub(pb.startTime) - select { - case <-pb.isFinish: + if atomic.LoadInt32(&pb.isFinish) != 0 { if pb.ShowFinalTime { left := (fromStart / time.Second) * time.Second timeLeftBox = left.String() } - default: - if pb.ShowTimeLeft && current > 0 { - perEntry := fromStart / time.Duration(current) - left := time.Duration(pb.Total-current) * perEntry - left = (left / time.Second) * time.Second - timeLeftBox = left.String() - } + } else if pb.ShowTimeLeft && current > 0 { + perEntry := fromStart / time.Duration(current) + left := time.Duration(pb.Total-current) * perEntry + left = (left / time.Second) * time.Second + timeLeftBox = left.String() } // speed @@ -329,14 +324,12 @@ func (pb *ProgressBar) Update() { // Internal loop for writing progressbar func (pb *ProgressBar) writer() { - pb.Update() for { - select { - case <-pb.isFinish: - return - case <-time.After(pb.RefreshRate): - pb.Update() + if atomic.LoadInt32(&pb.isFinish) != 0 { + break } + pb.Update() + time.Sleep(pb.RefreshRate) } } diff --git a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go b/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go deleted file mode 100644 index dfe394fd..00000000 --- a/client/Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package pb - -import ( - "testing" -) - -func Test_IncrementAddsOne(t *testing.T) { - count := 5000 - bar := New(count) - expected := 1 - actual := bar.Increment() - - if actual != expected { - t.Errorf("Expected {%d} was {%d}", expected, actual) - } -} - -func Test_Width(t *testing.T) { - count := 5000 - bar := New(count) - width := 100 - bar.SetWidth(100).Callback = func(out string) { - if len(out) != width { - t.Errorf("Bar width expected {%d} was {%d}", len(out), width) - } - } - bar.Start() - bar.Increment() - bar.Finish() -} - -func Test_MultipleFinish(t *testing.T) { - bar := New(5000) - bar.Add(2000) - bar.Finish() - bar.Finish() -} diff --git a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/.travis.yml b/client/Godeps/_workspace/src/github.com/docopt/docopt-go/.travis.yml index 2e8c8af6..4778a929 100644 --- a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/.travis.yml +++ b/client/Godeps/_workspace/src/github.com/docopt/docopt-go/.travis.yml @@ -5,25 +5,24 @@ language: go go: - - 1.2.2 + - 1.2 - 1.3 - - tip matrix: fast_finish: true before_install: - go get code.google.com/p/go.tools/cmd/vet - - go get -v github.com/golang/lint/golint - - go get -v code.google.com/p/go.tools/cmd/cover - - go get -v github.com/mattn/goveralls + - go get code.google.com/p/go.tools/cmd/cover + - go get github.com/golang/lint/golint + - go get github.com/mattn/goveralls install: - go get -d -v ./... && go build -v ./... script: - go vet -x ./... - - $HOME/gopath/bin/golint . + - $HOME/gopath/bin/golint ./... - go test -v ./... - go test -covermode=count -coverprofile=profile.cov . diff --git a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt.go b/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt.go index fe79a534..add84a2e 100644 --- a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt.go +++ b/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt.go @@ -7,7 +7,7 @@ Package docopt parses command-line arguments based on a help message. ⚠ Use the alias “docopt-go”: import "github.com/docopt/docopt-go" or - $ go get github.com/github/docopt-go + $ go get github.com/docopt/docopt-go */ package docopt diff --git a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt_test.go b/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt_test.go deleted file mode 100644 index 945eab52..00000000 --- a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/docopt_test.go +++ /dev/null @@ -1,1536 +0,0 @@ -/* -Based of off docopt.py: https://github.com/docopt/docopt - -Licensed under terms of MIT license (see LICENSE-MIT) -Copyright (c) 2013 Keith Batten, kbatten@gmail.com -*/ - -package docopt - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "regexp" - "strings" - "testing" -) - -func TestPatternFlat(t *testing.T) { - q := patternList{ - newArgument("N", nil), - newOption("-a", "", 0, false), - newArgument("M", nil)} - p, err := newRequired( - newOneOrMore(newArgument("N", nil)), - newOption("-a", "", 0, false), - newArgument("M", nil)).flat(patternDefault) - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - q = patternList{newOptionsShortcut()} - p, err = newRequired( - newOptional(newOptionsShortcut()), - newOptional(newOption("-a", "", 0, false))).flat(patternOptionSSHORTCUT) - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - return -} - -func TestOption(t *testing.T) { - if !parseOption("-h").eq(newOption("-h", "", 0, false)) { - t.Fail() - } - if !parseOption("--help").eq(newOption("", "--help", 0, false)) { - t.Fail() - } - if !parseOption("-h --help").eq(newOption("-h", "--help", 0, false)) { - t.Fail() - } - if !parseOption("-h, --help").eq(newOption("-h", "--help", 0, false)) { - t.Fail() - } - - if !parseOption("-h TOPIC").eq(newOption("-h", "", 1, false)) { - t.Fail() - } - if !parseOption("--help TOPIC").eq(newOption("", "--help", 1, false)) { - t.Fail() - } - if !parseOption("-h TOPIC --help TOPIC").eq(newOption("-h", "--help", 1, false)) { - t.Fail() - } - if !parseOption("-h TOPIC, --help TOPIC").eq(newOption("-h", "--help", 1, false)) { - t.Fail() - } - if !parseOption("-h TOPIC, --help=TOPIC").eq(newOption("-h", "--help", 1, false)) { - t.Fail() - } - - if !parseOption("-h Description...").eq(newOption("-h", "", 0, false)) { - t.Fail() - } - if !parseOption("-h --help Description...").eq(newOption("-h", "--help", 0, false)) { - t.Fail() - } - if !parseOption("-h TOPIC Description...").eq(newOption("-h", "", 1, false)) { - t.Fail() - } - - if !parseOption(" -h").eq(newOption("-h", "", 0, false)) { - t.Fail() - } - - if !parseOption("-h TOPIC Description... [default: 2]").eq(newOption("-h", "", 1, "2")) { - t.Fail() - } - if !parseOption("-h TOPIC Descripton... [default: topic-1]").eq(newOption("-h", "", 1, "topic-1")) { - t.Fail() - } - if !parseOption("--help=TOPIC ... [default: 3.14]").eq(newOption("", "--help", 1, "3.14")) { - t.Fail() - } - if !parseOption("-h, --help=DIR ... [default: ./]").eq(newOption("-h", "--help", 1, "./")) { - t.Fail() - } - if !parseOption("-h TOPIC Descripton... [dEfAuLt: 2]").eq(newOption("-h", "", 1, "2")) { - t.Fail() - } - return -} - -func TestOptionName(t *testing.T) { - if newOption("-h", "", 0, false).name != "-h" { - t.Fail() - } - if newOption("-h", "--help", 0, false).name != "--help" { - t.Fail() - } - if newOption("", "--help", 0, false).name != "--help" { - t.Fail() - } - return -} - -func TestCommands(t *testing.T) { - if v, err := Parse("Usage: prog add", []string{"add"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"add": true}) != true { - t.Error(err) - } - if v, err := Parse("Usage: prog [add]", []string{}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"add": false}) != true { - t.Error(err) - } - if v, err := Parse("Usage: prog [add]", []string{"add"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"add": true}) != true { - t.Error(err) - } - if v, err := Parse("Usage: prog (add|rm)", []string{"add"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"add": true, "rm": false}) != true { - t.Error(err) - } - if v, err := Parse("Usage: prog (add|rm)", []string{"rm"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"add": false, "rm": true}) != true { - t.Error(err) - } - if v, err := Parse("Usage: prog a b", []string{"a", "b"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"a": true, "b": true}) != true { - t.Error(err) - } - _, err := Parse("Usage: prog a b", []string{"b", "a"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } - return -} - -func TestFormalUsage(t *testing.T) { - doc := ` - Usage: prog [-hv] ARG - prog N M - - prog is a program` - usage := parseSection("usage:", doc)[0] - if usage != "Usage: prog [-hv] ARG\n prog N M" { - t.FailNow() - } - formal, err := formalUsage(usage) - if err != nil { - t.Fatal(err) - } - if formal != "( [-hv] ARG ) | ( N M )" { - t.Fail() - } - return -} - -func TestParseArgv(t *testing.T) { - o := patternList{ - newOption("-h", "", 0, false), - newOption("-v", "--verbose", 0, false), - newOption("-f", "--file", 1, false), - } - - p, err := parseArgv(tokenListFromString(""), &o, false) - q := patternList{} - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - p, err = parseArgv(tokenListFromString("-h"), &o, false) - q = patternList{newOption("-h", "", 0, true)} - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - p, err = parseArgv(tokenListFromString("-h --verbose"), &o, false) - q = patternList{ - newOption("-h", "", 0, true), - newOption("-v", "--verbose", 0, true), - } - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - p, err = parseArgv(tokenListFromString("-h --file f.txt"), &o, false) - q = patternList{ - newOption("-h", "", 0, true), - newOption("-f", "--file", 1, "f.txt"), - } - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - p, err = parseArgv(tokenListFromString("-h --file f.txt arg"), &o, false) - q = patternList{ - newOption("-h", "", 0, true), - newOption("-f", "--file", 1, "f.txt"), - newArgument("", "arg"), - } - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - p, err = parseArgv(tokenListFromString("-h --file f.txt arg arg2"), &o, false) - q = patternList{ - newOption("-h", "", 0, true), - newOption("-f", "--file", 1, "f.txt"), - newArgument("", "arg"), - newArgument("", "arg2"), - } - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } - - p, err = parseArgv(tokenListFromString("-h arg -- -v"), &o, false) - q = patternList{ - newOption("-h", "", 0, true), - newArgument("", "arg"), - newArgument("", "--"), - newArgument("", "-v"), - } - if reflect.DeepEqual(p, q) != true { - t.Error(err) - } -} - -func TestParsePattern(t *testing.T) { - o := patternList{ - newOption("-h", "", 0, false), - newOption("-v", "--verbose", 0, false), - newOption("-f", "--file", 1, false), - } - - p, err := parsePattern("[ -h ]", &o) - q := newRequired(newOptional(newOption("-h", "", 0, false))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("[ ARG ... ]", &o) - q = newRequired(newOptional( - newOneOrMore( - newArgument("ARG", nil)))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("[ -h | -v ]", &o) - q = newRequired( - newOptional( - newEither( - newOption("-h", "", 0, false), - newOption("-v", "--verbose", 0, false)))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("( -h | -v [ --file ] )", &o) - q = newRequired( - newRequired( - newEither( - newOption("-h", "", 0, false), - newRequired( - newOption("-v", "--verbose", 0, false), - newOptional( - newOption("-f", "--file", 1, nil)))))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("(-h|-v[--file=]N...)", &o) - q = newRequired( - newRequired( - newEither( - newOption("-h", "", 0, false), - newRequired( - newOption("-v", "--verbose", 0, false), - newOptional( - newOption("-f", "--file", 1, nil)), - newOneOrMore( - newArgument("N", nil)))))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("(N [M | (K | L)] | O P)", &o) - q = newRequired( - newRequired( - newEither( - newRequired( - newArgument("N", nil), - newOptional( - newEither( - newArgument("M", nil), - newRequired( - newEither( - newArgument("K", nil), - newArgument("L", nil)))))), - newRequired( - newArgument("O", nil), - newArgument("P", nil))))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("[ -h ] [N]", &o) - q = newRequired( - newOptional( - newOption("-h", "", 0, false)), - newOptional( - newArgument("N", nil))) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("[options]", &o) - q = newRequired( - newOptional( - newOptionsShortcut())) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("[options] A", &o) - q = newRequired( - newOptional( - newOptionsShortcut()), - newArgument("A", nil)) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("-v [options]", &o) - q = newRequired( - newOption("-v", "--verbose", 0, false), - newOptional( - newOptionsShortcut())) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("ADD", &o) - q = newRequired(newArgument("ADD", nil)) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("", &o) - q = newRequired(newArgument("", nil)) - if p.eq(q) != true { - t.Error(err) - } - - p, err = parsePattern("add", &o) - q = newRequired(newCommand("add", false)) - if p.eq(q) != true { - t.Error(err) - } -} - -func TestOptionMatch(t *testing.T) { - v, w, x := newOption("-a", "", 0, false).match( - &patternList{newOption("-a", "", 0, true)}, nil) - y := patternList{newOption("-a", "", 0, true)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newOption("-a", "", 0, false).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newOption("-a", "", 0, false).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - v, w, x = newOption("-a", "", 0, false).match( - &patternList{newArgument("N", nil)}, nil) - y = patternList{newArgument("N", nil)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newOption("-a", "", 0, false).match( - &patternList{ - newOption("-x", "", 0, false), - newOption("-a", "", 0, false), - newArgument("N", nil)}, nil) - y = patternList{ - newOption("-x", "", 0, false), - newArgument("N", nil)} - z := patternList{newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOption("-a", "", 0, false).match( - &patternList{ - newOption("-a", "", 0, true), - newOption("-a", "", 0, false)}, nil) - y = patternList{newOption("-a", "", 0, false)} - z = patternList{newOption("-a", "", 0, true)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } -} - -func TestArgumentMatch(t *testing.T) { - v, w, x := newArgument("N", nil).match( - &patternList{newArgument("N", 9)}, nil) - y := patternList{newArgument("N", 9)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newArgument("N", nil).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newArgument("N", nil).match( - &patternList{newOption("-x", "", 0, false), - newOption("-a", "", 0, false), - newArgument("", 5)}, nil) - y = patternList{newOption("-x", "", 0, false), - newOption("-a", "", 0, false)} - z := patternList{newArgument("N", 5)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newArgument("N", nil).match( - &patternList{newArgument("", 9), - newArgument("", 0)}, nil) - y = patternList{newArgument("", 0)} - z = patternList{newArgument("N", 9)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } -} - -func TestCommandMatch(t *testing.T) { - v, w, x := newCommand("c", false).match( - &patternList{newArgument("", "c")}, nil) - y := patternList{newCommand("c", true)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newCommand("c", false).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newCommand("c", false).match( - &patternList{ - newOption("-x", "", 0, false), - newOption("-a", "", 0, false), - newArgument("", "c")}, nil) - y = patternList{newOption("-x", "", 0, false), - newOption("-a", "", 0, false)} - z := patternList{newCommand("c", true)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newEither( - newCommand("add", false), - newCommand("rm", false)).match( - &patternList{newArgument("", "rm")}, nil) - y = patternList{newCommand("rm", true)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } -} - -func TestOptionalMatch(t *testing.T) { - v, w, x := newOptional(newOption("-a", "", 0, false)).match( - &patternList{newOption("-a", "", 0, false)}, nil) - y := patternList{newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newOptional(newOption("-a", "", 0, false)).match( - &patternList{}, nil) - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newOptional(newOption("-a", "", 0, false)).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newOptional(newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match( - &patternList{newOption("-a", "", 0, false)}, nil) - y = patternList{newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newOptional(newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match( - &patternList{newOption("-b", "", 0, false)}, nil) - y = patternList{newOption("-b", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newOptional(newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newOptional(newArgument("N", nil)).match( - &patternList{newArgument("", 9)}, nil) - y = patternList{newArgument("N", 9)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newOptional(newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match( - &patternList{newOption("-b", "", 0, false), - newOption("-x", "", 0, false), - newOption("-a", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - z := patternList{newOption("-a", "", 0, false), - newOption("-b", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } -} - -func TestRequiredMatch(t *testing.T) { - v, w, x := newRequired(newOption("-a", "", 0, false)).match( - &patternList{newOption("-a", "", 0, false)}, nil) - y := patternList{newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newRequired(newOption("-a", "", 0, false)).match(&patternList{}, nil) - if v != false || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - - v, w, x = newRequired(newOption("-a", "", 0, false)).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } - v, w, x = newRequired(newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match( - &patternList{newOption("-a", "", 0, false)}, nil) - y = patternList{newOption("-a", "", 0, false)} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, patternList{}) != true { - t.Fail() - } -} - -func TestEitherMatch(t *testing.T) { - v, w, x := newEither( - newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match( - &patternList{newOption("-a", "", 0, false)}, nil) - y := patternList{newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newEither( - newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match(&patternList{ - newOption("-a", "", 0, false), - newOption("-b", "", 0, false)}, nil) - y = patternList{newOption("-b", "", 0, false)} - z := patternList{newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newEither( - newOption("-a", "", 0, false), - newOption("-b", "", 0, false)).match(&patternList{ - newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - z = patternList{} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newEither( - newOption("-a", "", 0, false), - newOption("-b", "", 0, false), - newOption("-c", "", 0, false)).match(&patternList{ - newOption("-x", "", 0, false), - newOption("-b", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - z = patternList{newOption("-b", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - v, w, x = newEither( - newArgument("M", nil), - newRequired(newArgument("N", nil), - newArgument("M", nil))).match(&patternList{ - newArgument("", 1), - newArgument("", 2)}, nil) - y = patternList{} - z = patternList{newArgument("N", 1), newArgument("M", 2)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } -} - -func TestOneOrMoreMatch(t *testing.T) { - v, w, x := newOneOrMore(newArgument("N", nil)).match( - &patternList{newArgument("", 9)}, nil) - y := patternList{newArgument("N", 9)} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newArgument("N", nil)).match( - &patternList{}, nil) - y = patternList{} - z := patternList{} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newArgument("N", nil)).match( - &patternList{newOption("-x", "", 0, false)}, nil) - y = patternList{newOption("-x", "", 0, false)} - z = patternList{} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newArgument("N", nil)).match( - &patternList{newArgument("", 9), newArgument("", 8)}, nil) - y = patternList{} - z = patternList{newArgument("N", 9), newArgument("N", 8)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newArgument("N", nil)).match(&patternList{ - newArgument("", 9), - newOption("-x", "", 0, false), - newArgument("", 8)}, nil) - y = patternList{newOption("-x", "", 0, false)} - z = patternList{newArgument("N", 9), newArgument("N", 8)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newOption("-a", "", 0, false)).match(&patternList{ - newOption("-a", "", 0, false), - newArgument("", 8), - newOption("-a", "", 0, false)}, nil) - y = patternList{newArgument("", 8)} - z = patternList{newOption("-a", "", 0, false), newOption("-a", "", 0, false)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newOption("-a", "", 0, false)).match(&patternList{ - newArgument("", 8), - newOption("-x", "", 0, false)}, nil) - y = patternList{newArgument("", 8), newOption("-x", "", 0, false)} - z = patternList{} - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newRequired(newOption("-a", "", 0, false), - newArgument("N", nil))).match(&patternList{ - newOption("-a", "", 0, false), - newArgument("", 1), - newOption("-x", "", 0, false), - newOption("-a", "", 0, false), - newArgument("", 2)}, nil) - y = patternList{newOption("-x", "", 0, false)} - z = patternList{newOption("-a", "", 0, false), - newArgument("N", 1), - newOption("-a", "", 0, false), - newArgument("N", 2)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - v, w, x = newOneOrMore(newOptional(newArgument("N", nil))).match( - &patternList{newArgument("", 9)}, nil) - y = patternList{} - z = patternList{newArgument("N", 9)} - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } -} - -func TestListArgumentMatch(t *testing.T) { - p := newRequired( - newArgument("N", nil), - newArgument("N", nil)) - p.fix() - v, w, x := p.match(&patternList{newArgument("", "1"), - newArgument("", "2")}, nil) - y := patternList{newArgument("N", []string{"1", "2"})} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - p = newOneOrMore(newArgument("N", nil)) - p.fix() - v, w, x = p.match(&patternList{newArgument("", "1"), - newArgument("", "2"), newArgument("", "3")}, nil) - y = patternList{newArgument("N", []string{"1", "2", "3"})} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - p = newRequired(newArgument("N", nil), - newOneOrMore(newArgument("N", nil))) - p.fix() - v, w, x = p.match(&patternList{ - newArgument("", "1"), - newArgument("", "2"), - newArgument("", "3")}, nil) - y = patternList{newArgument("N", []string{"1", "2", "3"})} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - p = newRequired(newArgument("N", nil), - newRequired(newArgument("N", nil))) - p.fix() - v, w, x = p.match(&patternList{ - newArgument("", "1"), - newArgument("", "2")}, nil) - y = patternList{newArgument("N", []string{"1", "2"})} - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } -} - -func TestBasicPatternMatching(t *testing.T) { - // ( -a N [ -x Z ] ) - p := newRequired( - newOption("-a", "", 0, false), - newArgument("N", nil), - newOptional( - newOption("-x", "", 0, false), - newArgument("Z", nil))) - - // -a N - q := patternList{newOption("-a", "", 0, false), newArgument("", 9)} - y := patternList{newOption("-a", "", 0, false), newArgument("N", 9)} - v, w, x := p.match(&q, nil) - if v != true || - reflect.DeepEqual(*w, patternList{}) != true || - reflect.DeepEqual(*x, y) != true { - t.Fail() - } - - // -a -x N Z - q = patternList{newOption("-a", "", 0, false), - newOption("-x", "", 0, false), - newArgument("", 9), newArgument("", 5)} - y = patternList{} - z := patternList{newOption("-a", "", 0, false), newArgument("N", 9), - newOption("-x", "", 0, false), newArgument("Z", 5)} - v, w, x = p.match(&q, nil) - if v != true || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } - - // -x N Z # BZZ! - q = patternList{newOption("-x", "", 0, false), - newArgument("", 9), newArgument("", 5)} - y = patternList{newOption("-x", "", 0, false), - newArgument("", 9), newArgument("", 5)} - z = patternList{} - v, w, x = p.match(&q, nil) - if v != false || - reflect.DeepEqual(*w, y) != true || - reflect.DeepEqual(*x, z) != true { - t.Fail() - } -} - -func TestPatternEither(t *testing.T) { - p := newOption("-a", "", 0, false).transform() - q := newEither(newRequired( - newOption("-a", "", 0, false))) - if p.eq(q) != true { - t.Fail() - } - - p = newArgument("A", nil).transform() - q = newEither(newRequired( - newArgument("A", nil))) - if p.eq(q) != true { - t.Fail() - } - - p = newRequired( - newEither( - newOption("-a", "", 0, false), - newOption("-b", "", 0, false)), - newOption("-c", "", 0, false)).transform() - q = newEither( - newRequired( - newOption("-a", "", 0, false), - newOption("-c", "", 0, false)), - newRequired( - newOption("-b", "", 0, false), - newOption("-c", "", 0, false))) - if p.eq(q) != true { - t.Fail() - } - - p = newOptional(newOption("-a", "", 0, false), - newEither(newOption("-b", "", 0, false), - newOption("-c", "", 0, false))).transform() - q = newEither( - newRequired( - newOption("-b", "", 0, false), newOption("-a", "", 0, false)), - newRequired( - newOption("-c", "", 0, false), newOption("-a", "", 0, false))) - if p.eq(q) != true { - t.Fail() - } - - p = newEither(newOption("-x", "", 0, false), - newEither(newOption("-y", "", 0, false), - newOption("-z", "", 0, false))).transform() - q = newEither( - newRequired(newOption("-x", "", 0, false)), - newRequired(newOption("-y", "", 0, false)), - newRequired(newOption("-z", "", 0, false))) - if p.eq(q) != true { - t.Fail() - } - - p = newOneOrMore(newArgument("N", nil), - newArgument("M", nil)).transform() - q = newEither( - newRequired(newArgument("N", nil), newArgument("M", nil), - newArgument("N", nil), newArgument("M", nil))) - if p.eq(q) != true { - t.Fail() - } -} - -func TestPatternFixRepeatingArguments(t *testing.T) { - p := newOption("-a", "", 0, false) - p.fixRepeatingArguments() - if p.eq(newOption("-a", "", 0, false)) != true { - t.Fail() - } - - p = newArgument("N", nil) - p.fixRepeatingArguments() - if p.eq(newArgument("N", nil)) != true { - t.Fail() - } - - p = newRequired( - newArgument("N", nil), - newArgument("N", nil)) - q := newRequired( - newArgument("N", []string{}), - newArgument("N", []string{})) - p.fixRepeatingArguments() - if p.eq(q) != true { - t.Fail() - } - - p = newEither( - newArgument("N", nil), - newOneOrMore(newArgument("N", nil))) - q = newEither( - newArgument("N", []string{}), - newOneOrMore(newArgument("N", []string{}))) - p.fix() - if p.eq(q) != true { - t.Fail() - } -} - -func TestSet(t *testing.T) { - p := newArgument("N", nil) - q := newArgument("N", nil) - if reflect.DeepEqual(p, q) != true { - t.Fail() - } - pl := patternList{newArgument("N", nil), newArgument("N", nil)} - ql := patternList{newArgument("N", nil)} - if reflect.DeepEqual(pl.unique(), ql.unique()) != true { - t.Fail() - } -} - -func TestPatternFixIdentities1(t *testing.T) { - p := newRequired( - newArgument("N", nil), - newArgument("N", nil)) - if len(p.children) < 2 { - t.FailNow() - } - if p.children[0].eq(p.children[1]) != true { - t.Fail() - } - if p.children[0] == p.children[1] { - t.Fail() - } - p.fixIdentities(nil) - if p.children[0] != p.children[1] { - t.Fail() - } -} - -func TestPatternFixIdentities2(t *testing.T) { - p := newRequired( - newOptional( - newArgument("X", nil), - newArgument("N", nil)), - newArgument("N", nil)) - if len(p.children) < 2 { - t.FailNow() - } - if len(p.children[0].children) < 2 { - t.FailNow() - } - if p.children[0].children[1].eq(p.children[1]) != true { - t.Fail() - } - if p.children[0].children[1] == p.children[1] { - t.Fail() - } - p.fixIdentities(nil) - if p.children[0].children[1] != p.children[1] { - t.Fail() - } -} - -func TestLongOptionsErrorHandling(t *testing.T) { - _, err := Parse("Usage: prog", []string{"--non-existent"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(fmt.Sprintf("(%s) %s", reflect.TypeOf(err), err)) - } - _, err = Parse("Usage: prog [--version --verbose]\nOptions: --version\n --verbose", - []string{"--ver"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } - _, err = Parse("Usage: prog --long\nOptions: --long ARG", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } - _, err = Parse("Usage: prog --long ARG\nOptions: --long ARG", - []string{"--long"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(fmt.Sprintf("(%s) %s", reflect.TypeOf(err), err)) - } - _, err = Parse("Usage: prog --long=ARG\nOptions: --long", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } - _, err = Parse("Usage: prog --long\nOptions: --long", - []string{}, true, "--long=ARG", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } -} - -func TestShortOptionsErrorHandling(t *testing.T) { - _, err := Parse("Usage: prog -x\nOptions: -x this\n -x that", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(fmt.Sprintf("(%s) %s", reflect.TypeOf(err), err)) - } - _, err = Parse("Usage: prog", []string{"-x"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } - _, err = Parse("Usage: prog -o\nOptions: -o ARG", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } - _, err = Parse("Usage: prog -o ARG\nOptions: -o ARG", []string{"-o"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } -} - -func TestMatchingParen(t *testing.T) { - _, err := Parse("Usage: prog [a [b]", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } - _, err = Parse("Usage: prog [a [b] ] c )", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } -} - -func TestAllowDoubleDash(t *testing.T) { - if v, err := Parse("usage: prog [-o] [--] \noptions: -o", []string{"--", "-o"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-o": false, "": "-o", "--": true}) != true { - t.Error(err) - } - if v, err := Parse("usage: prog [-o] [--] \noptions: -o", []string{"-o", "1"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-o": true, "": "1", "--": false}) != true { - t.Error(err) - } - _, err := Parse("usage: prog [-o] \noptions:-o", []string{"-o"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { //"--" is not allowed; FIXME? - t.Error(err) - } -} - -func TestDocopt(t *testing.T) { - doc := `Usage: prog [-v] A - - Options: -v Be verbose.` - if v, err := Parse(doc, []string{"arg"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": false, "A": "arg"}) != true { - t.Error(err) - } - if v, err := Parse(doc, []string{"-v", "arg"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": true, "A": "arg"}) != true { - t.Error(err) - } - - doc = `Usage: prog [-vqr] [FILE] - prog INPUT OUTPUT - prog --help - - Options: - -v print status messages - -q report only file names - -r show all occurrences of the same error - --help - - ` - if v, err := Parse(doc, []string{"-v", "file.py"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": true, "-q": false, "-r": false, "--help": false, "FILE": "file.py", "INPUT": nil, "OUTPUT": nil}) != true { - t.Error(err) - } - if v, err := Parse(doc, []string{"-v"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": true, "-q": false, "-r": false, "--help": false, "FILE": nil, "INPUT": nil, "OUTPUT": nil}) != true { - t.Error(err) - } - - _, err := Parse(doc, []string{"-v", "input.py", "output.py"}, true, "", false, false) // does not match - if _, ok := err.(*UserError); !ok { - t.Error(err) - } - _, err = Parse(doc, []string{"--fake"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } - _, output, err := parseOutput(doc, []string{"--hel"}, true, "", false) - if err != nil || len(output) == 0 { - t.Error(err) - } -} - -func TestLanguageErrors(t *testing.T) { - _, err := Parse("no usage with colon here", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } - _, err = Parse("usage: here \n\n and again usage: here", []string{}, true, "", false, false) - if _, ok := err.(*LanguageError); !ok { - t.Error(err) - } -} - -func TestIssue40(t *testing.T) { - _, output, err := parseOutput("usage: prog --help-commands | --help", []string{"--help"}, true, "", false) - if err != nil || len(output) == 0 { - t.Error(err) - } - if v, err := Parse("usage: prog --aabb | --aa", []string{"--aa"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--aabb": false, "--aa": true}) != true { - t.Error(err) - } -} - -func TestIssue34UnicodeStrings(t *testing.T) { - // TODO: see if applicable -} - -func TestCountMultipleFlags(t *testing.T) { - if v, err := Parse("usage: prog [-v]", []string{"-v"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": true}) != true { - t.Error(err) - } - if v, err := Parse("usage: prog [-vv]", []string{}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": 0}) != true { - t.Error(err) - } - if v, err := Parse("usage: prog [-vv]", []string{"-v"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": 1}) != true { - t.Error(err) - } - if v, err := Parse("usage: prog [-vv]", []string{"-vv"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": 2}) != true { - t.Error(err) - } - _, err := Parse("usage: prog [-vv]", []string{"-vvv"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Error(err) - } - if v, err := Parse("usage: prog [-v | -vv | -vvv]", []string{"-vvv"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": 3}) != true { - t.Error(err) - } - if v, err := Parse("usage: prog [-v...]", []string{"-vvvvvv"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-v": 6}) != true { - t.Error(err) - } - if v, err := Parse("usage: prog [--ver --ver]", []string{"--ver", "--ver"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--ver": 2}) != true { - t.Error(err) - } -} - -func TestAnyOptionsParameter(t *testing.T) { - _, err := Parse("usage: prog [options]", - []string{"-foo", "--bar", "--spam=eggs"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Fail() - } - - _, err = Parse("usage: prog [options]", - []string{"--foo", "--bar", "--bar"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Fail() - } - _, err = Parse("usage: prog [options]", - []string{"--bar", "--bar", "--bar", "-ffff"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Fail() - } - _, err = Parse("usage: prog [options]", - []string{"--long=arg", "--long=another"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Fail() - } -} - -func TestDefaultValueForPositionalArguments(t *testing.T) { - doc := "Usage: prog [--data=...]\nOptions:\n\t-d --data= Input data [default: x]" - if v, err := Parse(doc, []string{}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--data": []string{"x"}}) != true { - t.Error(err) - } - - doc = "Usage: prog [--data=...]\nOptions:\n\t-d --data= Input data [default: x y]" - if v, err := Parse(doc, []string{}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--data": []string{"x", "y"}}) != true { - t.Error(err) - } - - doc = "Usage: prog [--data=...]\nOptions:\n\t-d --data= Input data [default: x y]" - if v, err := Parse(doc, []string{"--data=this"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--data": []string{"this"}}) != true { - t.Error(err) - } -} - -func TestIssue59(t *testing.T) { - if v, err := Parse("usage: prog --long=", []string{"--long="}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--long": ""}) != true { - t.Error(err) - } - - if v, err := Parse("usage: prog -l \noptions: -l ", []string{"-l", ""}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"-l": ""}) != true { - t.Error(err) - } -} - -func TestOptionsFirst(t *testing.T) { - if v, err := Parse("usage: prog [--opt] [...]", []string{"--opt", "this", "that"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--opt": true, "": []string{"this", "that"}}) != true { - t.Error(err) - } - - if v, err := Parse("usage: prog [--opt] [...]", []string{"this", "that", "--opt"}, true, "", false, false); reflect.DeepEqual(v, map[string]interface{}{"--opt": true, "": []string{"this", "that"}}) != true { - t.Error(err) - } - - if v, err := Parse("usage: prog [--opt] [...]", []string{"this", "that", "--opt"}, true, "", true, false); reflect.DeepEqual(v, map[string]interface{}{"--opt": false, "": []string{"this", "that", "--opt"}}) != true { - t.Error(err) - } -} - -func TestIssue68OptionsShortcutDoesNotIncludeOptionsInUsagePattern(t *testing.T) { - args, err := Parse("usage: prog [-ab] [options]\noptions: -x\n -y", []string{"-ax"}, true, "", false, false) - - if args["-a"] != true { - t.Error(err) - } - if args["-b"] != false { - t.Error(err) - } - if args["-x"] != true { - t.Error(err) - } - if args["-y"] != false { - t.Error(err) - } -} - -func TestIssue65EvaluateArgvWhenCalledNotWhenImported(t *testing.T) { - os.Args = strings.Fields("prog -a") - v, err := Parse("usage: prog [-ab]", nil, true, "", false, false) - w := map[string]interface{}{"-a": true, "-b": false} - if reflect.DeepEqual(v, w) != true { - t.Error(err) - } - - os.Args = strings.Fields("prog -b") - v, err = Parse("usage: prog [-ab]", nil, true, "", false, false) - w = map[string]interface{}{"-a": false, "-b": true} - if reflect.DeepEqual(v, w) != true { - t.Error(err) - } -} - -func TestIssue71DoubleDashIsNotAValidOptionArgument(t *testing.T) { - _, err := Parse("usage: prog [--log=LEVEL] [--] ...", - []string{"--log", "--", "1", "2"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Fail() - } - - _, err = Parse(`usage: prog [-l LEVEL] [--] ... - options: -l LEVEL`, []string{"-l", "--", "1", "2"}, true, "", false, false) - if _, ok := err.(*UserError); !ok { - t.Fail() - } -} - -func TestParseSection(t *testing.T) { - v := parseSection("usage:", "foo bar fizz buzz") - w := []string{} - if reflect.DeepEqual(v, w) != true { - t.Fail() - } - - v = parseSection("usage:", "usage: prog") - w = []string{"usage: prog"} - if reflect.DeepEqual(v, w) != true { - t.Fail() - } - - v = parseSection("usage:", "usage: -x\n -y") - w = []string{"usage: -x\n -y"} - if reflect.DeepEqual(v, w) != true { - t.Fail() - } - - usage := `usage: this - -usage:hai -usage: this that - -usage: foo - bar - -PROGRAM USAGE: - foo - bar -usage: -` + "\t" + `too -` + "\t" + `tar -Usage: eggs spam -BAZZ -usage: pit stop` - - v = parseSection("usage:", usage) - w = []string{"usage: this", - "usage:hai", - "usage: this that", - "usage: foo\n bar", - "PROGRAM USAGE:\n foo\n bar", - "usage:\n\ttoo\n\ttar", - "Usage: eggs spam", - "usage: pit stop", - } - if reflect.DeepEqual(v, w) != true { - t.Fail() - } -} - -func TestIssue126DefaultsNotParsedCorrectlyWhenTabs(t *testing.T) { - section := "Options:\n\t--foo= [default: bar]" - v := patternList{newOption("", "--foo", 1, "bar")} - if reflect.DeepEqual(parseDefaults(section), v) != true { - t.Fail() - } -} - -// conf file based test cases -func TestFileTestcases(t *testing.T) { - filenames := []string{"testcases.docopt", "test_golang.docopt"} - for _, filename := range filenames { - raw, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - - tests, err := parseTest(raw) - if err != nil { - t.Fatal(err) - } - for _, c := range tests { - result, err := Parse(c.doc, c.argv, true, "", false, false) - if _, ok := err.(*UserError); c.userError && !ok { - // expected a user-error - t.Error("testcase:", c.id, "result:", result) - } else if _, ok := err.(*UserError); !c.userError && ok { - // unexpected user-error - t.Error("testcase:", c.id, "error:", err, "result:", result) - } else if reflect.DeepEqual(c.expect, result) != true { - t.Error("testcase:", c.id, "result:", result, "expect:", c.expect) - } - } - } -} - -type testcase struct { - id int - doc string - prog string - argv []string - expect map[string]interface{} - userError bool -} - -func parseTest(raw []byte) ([]testcase, error) { - var res []testcase - commentPattern := regexp.MustCompile("#.*") - raw = commentPattern.ReplaceAll(raw, []byte("")) - raw = bytes.TrimSpace(raw) - if bytes.HasPrefix(raw, []byte(`"""`)) { - raw = raw[3:] - } - - id := 0 - for _, fixture := range bytes.Split(raw, []byte(`r"""`)) { - doc, _, body := stringPartition(string(fixture), `"""`) - for _, cas := range strings.Split(body, "$")[1:] { - argvString, _, expectString := stringPartition(strings.TrimSpace(cas), "\n") - prog, _, argvString := stringPartition(strings.TrimSpace(argvString), " ") - argv := []string{} - if len(argvString) > 0 { - argv = strings.Fields(argvString) - } - var expectUntyped interface{} - err := json.Unmarshal([]byte(expectString), &expectUntyped) - if err != nil { - return nil, err - } - switch expect := expectUntyped.(type) { - case string: // user-error - res = append(res, testcase{id, doc, prog, argv, nil, true}) - case map[string]interface{}: - // convert []interface{} values to []string - // convert float64 values to int - for k, vUntyped := range expect { - switch v := vUntyped.(type) { - case []interface{}: - itemList := make([]string, len(v)) - for i, itemUntyped := range v { - if item, ok := itemUntyped.(string); ok { - itemList[i] = item - } - } - expect[k] = itemList - case float64: - expect[k] = int(v) - } - } - res = append(res, testcase{id, doc, prog, argv, expect, false}) - default: - return nil, fmt.Errorf("unhandled json data type") - } - id++ - } - } - return res, nil -} - -// parseOutput wraps the Parse() function to also return stdout -func parseOutput(doc string, argv []string, help bool, version string, - optionsFirst bool) (map[string]interface{}, string, error) { - stdout := os.Stdout - r, w, _ := os.Pipe() - os.Stdout = w - - args, err := Parse(doc, argv, help, version, optionsFirst, false) - - outChan := make(chan string) - go func() { - var buf bytes.Buffer - io.Copy(&buf, r) - outChan <- buf.String() - }() - - w.Close() - os.Stdout = stdout - output := <-outChan - - return args, output, err -} - -var debugEnabled = false - -func debugOn(l ...interface{}) { - debugEnabled = true - debug(l...) -} -func debugOff(l ...interface{}) { - debug(l...) - debugEnabled = false -} - -func debug(l ...interface{}) { - if debugEnabled { - fmt.Println(l...) - } -} diff --git a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/example_test.go b/client/Godeps/_workspace/src/github.com/docopt/docopt-go/example_test.go deleted file mode 100644 index b87a149a..00000000 --- a/client/Godeps/_workspace/src/github.com/docopt/docopt-go/example_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package docopt - -import ( - "fmt" - "sort" -) - -func ExampleParse() { - usage := `Usage: - config_example tcp [] [--force] [--timeout=] - config_example serial [--baud=] [--timeout=] - config_example -h | --help | --version` - // parse the command line `comfig_example tcp 127.0.0.1 --force` - argv := []string{"tcp", "127.0.0.1", "--force"} - arguments, _ := Parse(usage, argv, true, "0.1.1rc", false) - // sort the keys of the arguments map - var keys []string - for k := range arguments { - keys = append(keys, k) - } - sort.Strings(keys) - // print the argument keys and values - for _, k := range keys { - fmt.Printf("%9s %v\n", k, arguments[k]) - } - // output: - // --baud - // --force true - // --help false - // --timeout - // --version false - // -h false - // 127.0.0.1 - // - // serial false - // tcp true -} diff --git a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext.go b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext.go index 7bef46f0..c0de8b7f 100644 --- a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext.go +++ b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext.go @@ -7,12 +7,18 @@ package osext import "path/filepath" +var cx, ce = executableClean() + +func executableClean() (string, error) { + p, err := executable() + return filepath.Clean(p), err +} + // Executable returns an absolute path that can be used to // re-invoke the current program. // It may not be valid after the current program exits. func Executable() (string, error) { - p, err := executable() - return filepath.Clean(p), err + return cx, ce } // Returns same path as Executable, returns just the folder diff --git a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go index b2598bc7..d59847ee 100644 --- a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go +++ b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux netbsd openbsd solaris dragonfly +// +build linux netbsd solaris dragonfly package osext @@ -27,7 +27,7 @@ func executable() (string, error) { return execpath, nil case "netbsd": return os.Readlink("/proc/curproc/exe") - case "openbsd", "dragonfly": + case "dragonfly": return os.Readlink("/proc/curproc/file") case "solaris": return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) diff --git a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go index b66cac87..66da0bcf 100644 --- a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go +++ b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin freebsd +// +build darwin freebsd openbsd package osext import ( "os" + "os/exec" "path/filepath" "runtime" "syscall" @@ -23,6 +24,8 @@ func executable() (string, error) { mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} case "darwin": mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} + case "openbsd": + mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */} } n := uintptr(0) @@ -42,14 +45,58 @@ func executable() (string, error) { if n == 0 { // This shouldn't happen. return "", nil } - for i, v := range buf { - if v == 0 { - buf = buf[:i] - break + + var execPath string + switch runtime.GOOS { + case "openbsd": + // buf now contains **argv, with pointers to each of the C-style + // NULL terminated arguments. + var args []string + argv := uintptr(unsafe.Pointer(&buf[0])) + Loop: + for { + argp := *(**[1 << 20]byte)(unsafe.Pointer(argv)) + if argp == nil { + break + } + for i := 0; uintptr(i) < n; i++ { + // we don't want the full arguments list + if string(argp[i]) == " " { + break Loop + } + if argp[i] != 0 { + continue + } + args = append(args, string(argp[:i])) + n -= uintptr(i) + break + } + if n < unsafe.Sizeof(argv) { + break + } + argv += unsafe.Sizeof(argv) + n -= unsafe.Sizeof(argv) + } + execPath = args[0] + // There is no canonical way to get an executable path on + // OpenBSD, so check PATH in case we are called directly + if execPath[0] != '/' && execPath[0] != '.' { + execIsInPath, err := exec.LookPath(execPath) + if err == nil { + execPath = execIsInPath + } } + default: + for i, v := range buf { + if v == 0 { + buf = buf[:i] + break + } + } + execPath = string(buf) } + var err error - execPath := string(buf) // execPath will not be empty due to above checks. // Try to get the absolute path if the execPath is not rooted. if execPath[0] != '/' { diff --git a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go b/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go deleted file mode 100644 index 77ccc28e..00000000 --- a/client/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin linux freebsd netbsd windows - -package osext - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "runtime" - "testing" -) - -const ( - executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE" - - executableEnvValueMatch = "match" - executableEnvValueDelete = "delete" -) - -func TestPrintExecutable(t *testing.T) { - ef, err := Executable() - if err != nil { - t.Fatalf("Executable failed: %v", err) - } - t.Log("Executable:", ef) -} -func TestPrintExecutableFolder(t *testing.T) { - ef, err := ExecutableFolder() - if err != nil { - t.Fatalf("ExecutableFolder failed: %v", err) - } - t.Log("Executable Folder:", ef) -} -func TestExecutableFolder(t *testing.T) { - ef, err := ExecutableFolder() - if err != nil { - t.Fatalf("ExecutableFolder failed: %v", err) - } - if ef[len(ef)-1] == filepath.Separator { - t.Fatal("ExecutableFolder ends with a trailing slash.") - } -} -func TestExecutableMatch(t *testing.T) { - ep, err := Executable() - if err != nil { - t.Fatalf("Executable failed: %v", err) - } - - // fullpath to be of the form "dir/prog". - dir := filepath.Dir(filepath.Dir(ep)) - fullpath, err := filepath.Rel(dir, ep) - if err != nil { - t.Fatalf("filepath.Rel: %v", err) - } - // Make child start with a relative program path. - // Alter argv[0] for child to verify getting real path without argv[0]. - cmd := &exec.Cmd{ - Dir: dir, - Path: fullpath, - Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)}, - } - out, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("exec(self) failed: %v", err) - } - outs := string(out) - if !filepath.IsAbs(outs) { - t.Fatalf("Child returned %q, want an absolute path", out) - } - if !sameFile(outs, ep) { - t.Fatalf("Child returned %q, not the same file as %q", out, ep) - } -} - -func TestExecutableDelete(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip() - } - fpath, err := Executable() - if err != nil { - t.Fatalf("Executable failed: %v", err) - } - - r, w := io.Pipe() - stderrBuff := &bytes.Buffer{} - stdoutBuff := &bytes.Buffer{} - cmd := &exec.Cmd{ - Path: fpath, - Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)}, - Stdin: r, - Stderr: stderrBuff, - Stdout: stdoutBuff, - } - err = cmd.Start() - if err != nil { - t.Fatalf("exec(self) start failed: %v", err) - } - - tempPath := fpath + "_copy" - _ = os.Remove(tempPath) - - err = copyFile(tempPath, fpath) - if err != nil { - t.Fatalf("copy file failed: %v", err) - } - err = os.Remove(fpath) - if err != nil { - t.Fatalf("remove running test file failed: %v", err) - } - err = os.Rename(tempPath, fpath) - if err != nil { - t.Fatalf("rename copy to previous name failed: %v", err) - } - - w.Write([]byte{0}) - w.Close() - - err = cmd.Wait() - if err != nil { - t.Fatalf("exec wait failed: %v", err) - } - - childPath := stderrBuff.String() - if !filepath.IsAbs(childPath) { - t.Fatalf("Child returned %q, want an absolute path", childPath) - } - if !sameFile(childPath, fpath) { - t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath) - } -} - -func sameFile(fn1, fn2 string) bool { - fi1, err := os.Stat(fn1) - if err != nil { - return false - } - fi2, err := os.Stat(fn2) - if err != nil { - return false - } - return os.SameFile(fi1, fi2) -} -func copyFile(dest, src string) error { - df, err := os.Create(dest) - if err != nil { - return err - } - defer df.Close() - - sf, err := os.Open(src) - if err != nil { - return err - } - defer sf.Close() - - _, err = io.Copy(df, sf) - return err -} - -func TestMain(m *testing.M) { - env := os.Getenv(executableEnvVar) - switch env { - case "": - os.Exit(m.Run()) - case executableEnvValueMatch: - // First chdir to another path. - dir := "/" - if runtime.GOOS == "windows" { - dir = filepath.VolumeName(".") - } - os.Chdir(dir) - if ep, err := Executable(); err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - } else { - fmt.Fprint(os.Stderr, ep) - } - case executableEnvValueDelete: - bb := make([]byte, 1) - var err error - n, err := os.Stdin.Read(bb) - if err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - os.Exit(2) - } - if n != 1 { - fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n) - os.Exit(2) - } - if ep, err := Executable(); err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - } else { - fmt.Fprint(os.Stderr, ep) - } - } - os.Exit(0) -} diff --git a/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go b/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go index b6d35027..051f1116 100644 --- a/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go +++ b/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go @@ -5,6 +5,7 @@ import ( "errors" "os" "os/exec" + "path/filepath" "runtime" "strings" ) @@ -43,7 +44,7 @@ func Expand(path string) (string, error) { return "", err } - return dir + path[1:], nil + return filepath.Join(dir, path[1:]), nil } func dirUnix() (string, error) { diff --git a/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir_test.go b/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir_test.go deleted file mode 100644 index 89e74c37..00000000 --- a/client/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package homedir - -import ( - "fmt" - "os/user" - "testing" -) - -func TestDir(t *testing.T) { - u, err := user.Current() - if err != nil { - t.Fatalf("err: %s", err) - } - - dir, err := Dir() - if err != nil { - t.Fatalf("err: %s", err) - } - - if u.HomeDir != dir { - t.Fatalf("%#v != %#v", u.HomeDir, dir) - } -} - -func TestExpand(t *testing.T) { - u, err := user.Current() - if err != nil { - t.Fatalf("err: %s", err) - } - - cases := []struct { - Input string - Output string - Err bool - }{ - { - "/foo", - "/foo", - false, - }, - - { - "~/foo", - fmt.Sprintf("%s/foo", u.HomeDir), - false, - }, - - { - "", - "", - false, - }, - - { - "~", - u.HomeDir, - false, - }, - - { - "~foo/foo", - "", - true, - }, - } - - for _, tc := range cases { - actual, err := Expand(tc.Input) - if (err != nil) != tc.Err { - t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err) - } - - if actual != tc.Output { - t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual) - } - } -} diff --git a/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/.gitignore b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/.gitignore new file mode 100644 index 00000000..f9d9cd8a --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/.gitignore @@ -0,0 +1,11 @@ +_obj +_test +*.6 +*.out +_testmain.go +\#* +.\#* +*.log +_cgo* +*.o +*.a diff --git a/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/COPYING b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/COPYING new file mode 100644 index 00000000..d7849fd8 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2011 by Krzysztof Kowalik + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/README.md b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/README.md new file mode 100644 index 00000000..e3d025d5 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/README.md @@ -0,0 +1,21 @@ +# Pure Go UUID implementation + +This package provides immutable UUID structs and the functions +NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +and 5 UUIDs as specified in [RFC 4122](http://www.ietf.org/rfc/rfc4122.txt). + +## Installation + +Use the `go` tool: + + $ go get github.com/nu7hatch/gouuid + +## Usage + +See [documentation and examples](http://godoc.org/github.com/nu7hatch/gouuid) +for more information. + +## Copyright + +Copyright (C) 2011 by Krzysztof Kowalik . See [COPYING](https://github.com/nu7hatch/gouuid/tree/master/COPYING) +file for details. diff --git a/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/uuid.go b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/uuid.go new file mode 100644 index 00000000..ac9623b7 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/nu7hatch/gouuid/uuid.go @@ -0,0 +1,173 @@ +// This package provides immutable UUID structs and the functions +// NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +// and 5 UUIDs as specified in RFC 4122. +// +// Copyright (C) 2011 by Krzysztof Kowalik +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "hash" + "regexp" +) + +// The UUID reserved variants. +const ( + ReservedNCS byte = 0x80 + ReservedRFC4122 byte = 0x40 + ReservedMicrosoft byte = 0x20 + ReservedFuture byte = 0x00 +) + +// The following standard UUIDs are for use with NewV3() or NewV5(). +var ( + NamespaceDNS, _ = ParseHex("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = ParseHex("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = ParseHex("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// Pattern used to parse hex string representation of the UUID. +// FIXME: do something to consider both brackets at one time, +// current one allows to parse string with only one opening +// or closing bracket. +const hexPattern = "^(urn\\:uuid\\:)?\\{?([a-z0-9]{8})-([a-z0-9]{4})-" + + "([1-5][a-z0-9]{3})-([a-z0-9]{4})-([a-z0-9]{12})\\}?$" + +var re = regexp.MustCompile(hexPattern) + +// A UUID representation compliant with specification in +// RFC 4122 document. +type UUID [16]byte + +// ParseHex creates a UUID object from given hex string +// representation. Function accepts UUID string in following +// formats: +// +// uuid.ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// uuid.ParseHex("{6ba7b814-9dad-11d1-80b4-00c04fd430c8}") +// uuid.ParseHex("urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// +func ParseHex(s string) (u *UUID, err error) { + md := re.FindStringSubmatch(s) + if md == nil { + err = errors.New("Invalid UUID string") + return + } + hash := md[2] + md[3] + md[4] + md[5] + md[6] + b, err := hex.DecodeString(hash) + if err != nil { + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Parse creates a UUID object from given bytes slice. +func Parse(b []byte) (u *UUID, err error) { + if len(b) != 16 { + err = errors.New("Given slice is not valid UUID sequence") + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Generate a UUID based on the MD5 hash of a namespace identifier +// and a name. +func NewV3(ns *UUID, name []byte) (u *UUID, err error) { + if ns == nil { + err = errors.New("Invalid namespace UUID") + return + } + u = new(UUID) + // Set all bits to MD5 hash generated from namespace and name. + u.setBytesFromHash(md5.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(3) + return +} + +// Generate a random UUID. +func NewV4() (u *UUID, err error) { + u = new(UUID) + // Set all bits to randomly (or pseudo-randomly) chosen values. + _, err = rand.Read(u[:]) + if err != nil { + return + } + u.setVariant(ReservedRFC4122) + u.setVersion(4) + return +} + +// Generate a UUID based on the SHA-1 hash of a namespace identifier +// and a name. +func NewV5(ns *UUID, name []byte) (u *UUID, err error) { + u = new(UUID) + // Set all bits to truncated SHA1 hash generated from namespace + // and name. + u.setBytesFromHash(sha1.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(5) + return +} + +// Generate a MD5 hash of a namespace and a name, and copy it to the +// UUID slice. +func (u *UUID) setBytesFromHash(hash hash.Hash, ns, name []byte) { + hash.Write(ns[:]) + hash.Write(name) + copy(u[:], hash.Sum([]byte{})[:16]) +} + +// Set the two most significant bits (bits 6 and 7) of the +// clock_seq_hi_and_reserved to zero and one, respectively. +func (u *UUID) setVariant(v byte) { + switch v { + case ReservedNCS: + u[8] = (u[8] | ReservedNCS) & 0xBF + case ReservedRFC4122: + u[8] = (u[8] | ReservedRFC4122) & 0x7F + case ReservedMicrosoft: + u[8] = (u[8] | ReservedMicrosoft) & 0x3F + } +} + +// Variant returns the UUID Variant, which determines the internal +// layout of the UUID. This will be one of the constants: RESERVED_NCS, +// RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE. +func (u *UUID) Variant() byte { + if u[8]&ReservedNCS == ReservedNCS { + return ReservedNCS + } else if u[8]&ReservedRFC4122 == ReservedRFC4122 { + return ReservedRFC4122 + } else if u[8]&ReservedMicrosoft == ReservedMicrosoft { + return ReservedMicrosoft + } + return ReservedFuture +} + +// Set the four most significant bits (bits 12 through 15) of the +// time_hi_and_version field to the 4-bit version number. +func (u *UUID) setVersion(v byte) { + u[6] = (u[6] & 0xF) | (v << 4) +} + +// Version returns a version number of the algorithm used to +// generate the UUID sequence. +func (u *UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Returns unparsed version of the generated UUID sequence. +func (u *UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/client/Godeps/_workspace/src/github.com/olekukonko/ts/ts_test.go b/client/Godeps/_workspace/src/github.com/olekukonko/ts/ts_test.go deleted file mode 100644 index 4998e7c0..00000000 --- a/client/Godeps/_workspace/src/github.com/olekukonko/ts/ts_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Terminal API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package ts - -import ( - "fmt" - "testing" -) - -func ExampleGetSize() { - size, _ := GetSize() - fmt.Println(size.Col()) // Get Width - fmt.Println(size.Row()) // Get Height - fmt.Println(size.PosX()) // Get X position - fmt.Println(size.PosY()) // Get Y position -} - -func TestSize(t *testing.T) { - size, err := GetSize() - - if err != nil { - t.Fatal(err) - } - if size.Col() == 0 || size.Row() == 0 { - t.Fatalf("Screen Size Failed") - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/context/.gitignore b/client/Godeps/_workspace/src/github.com/root-gg/context/.gitignore deleted file mode 100644 index 485dee64..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/context/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea diff --git a/client/Godeps/_workspace/src/github.com/root-gg/context/README.md b/client/Godeps/_workspace/src/github.com/root-gg/context/README.md deleted file mode 100644 index f69d9101..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/context/README.md +++ /dev/null @@ -1,2 +0,0 @@ -ROOT-GG Application context -=========================== diff --git a/client/Godeps/_workspace/src/github.com/root-gg/context/context.go b/client/Godeps/_workspace/src/github.com/root-gg/context/context.go deleted file mode 100644 index abe5890b..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/context/context.go +++ /dev/null @@ -1,269 +0,0 @@ -package context - -import ( - "bytes" - "errors" - "fmt" - "github.com/root-gg/plik/client/Godeps/_workspace/src/github.com/root-gg/utils" - "sync" - "time" -) - -var Running = errors.New("running") -var Success = errors.New("success") -var Canceled = errors.New("canceled") -var Timedout = errors.New("timedout") - -type Context struct { - parent *Context - name string - elapsed utils.SplitTime - splits []*utils.SplitTime - done chan struct{} - children []*Context - timeout time.Duration - timer *time.Timer - status error - lock sync.RWMutex - values map[interface{}]interface{} -} - -func NewContext(name string) (ctx *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - ctx = new(Context) - ctx.status = Running - ctx.elapsed = *utils.NewSplitTime("") - ctx.elapsed.Start() - ctx.name = name - ctx.done = make(chan struct{}) - ctx.children = make([]*Context, 0) - ctx.values = make(map[interface{}]interface{}) - return -} - -func NewContextWithTimeout(name string, timeout time.Duration) (ctx *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - ctx = NewContext(name) - ctx.timeout = timeout - ctx.timer = time.NewTimer(timeout) - go func() { - select { - case <-ctx.timer.C: - ctx.Finalize(Timedout) - case <-ctx.Done(): - ctx.timer.Stop() - } - }() - return -} - -func (ctx *Context) Fork(name string) (fork *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - fork = NewContext(name) - fork.parent = ctx - ctx.children = append(ctx.children, fork) - return -} - -func (ctx *Context) ForkWithTimeout(name string, timeout time.Duration) (fork *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - fork = NewContextWithTimeout(name, timeout) - fork.parent = ctx - ctx.children = append(ctx.children, fork) - return -} - -func (ctx *Context) Name() string { - return ctx.name -} - -func (ctx *Context) Done() (done <-chan struct{}) { - done = ctx.done - return -} - -func (ctx *Context) Wait() { - if ctx.status == Running { - <-ctx.done - } -} - -func (ctx *Context) waitAllChildren(root bool) { - for _, child := range ctx.children { - child.waitAllChildren(false) - } - if !root { - ctx.Wait() - } -} - -func (ctx *Context) WaitAllChildren() { - ctx.waitAllChildren(true) - return -} - -func (ctx *Context) Status() (status error) { - if ctx.status == nil { - status = Success - } else { - status = ctx.status - } - return ctx.status -} - -func (ctx *Context) Finalize(err error) { - ctx.lock.Lock() - defer ctx.lock.Unlock() - if ctx.status != Running { - return - } - ctx.status = err - ctx.elapsed.Stop() - close(ctx.done) -} - -func (ctx *Context) Cancel() { - ctx.Finalize(Canceled) - for _, child := range ctx.Children() { - child.Cancel() - } -} - -func (ctx *Context) AutoCancel() *Context { - go func() { - <-ctx.Done() - ctx.Cancel() - }() - return ctx -} - -func (ctx *Context) DetachChild(child *Context) { - for i := 0; i < len(ctx.children); i++ { - if ctx.children[i] == child { - ctx.children = append(ctx.children[:i], ctx.children[i+1:]...) - } - } -} - -func (ctx *Context) AutoDetach() *Context { - go func() { - <-ctx.Done() - if ctx.parent != nil { - ctx.parent.DetachChild(ctx) - } - }() - return ctx -} - -func (ctx *Context) AutoDetachChild(child *Context) { - go func() { - <-child.Done() - ctx.DetachChild(child) - }() -} - -func (ctx *Context) allChildren(children []*Context) []*Context { - children = append(children, ctx.children...) - for _, child := range ctx.children { - children = child.allChildren(children) - } - return children -} - -func (ctx *Context) AllChildren() []*Context { - return ctx.allChildren([]*Context{}) -} - -func (ctx *Context) Children() []*Context { - return ctx.children -} - -func (ctx *Context) Set(key interface{}, value interface{}) { - ctx.values[key] = value -} - -func (ctx *Context) Get(key interface{}) (interface{}, bool) { - if value, ok := ctx.values[key]; ok { - return value, true - } else { - if ctx.parent != nil { - return ctx.parent.Get(key) - } - } - return nil, false -} - -func (ctx *Context) StartDate() *time.Time { - return ctx.elapsed.StartDate() -} - -func (ctx *Context) EndDate() *time.Time { - return ctx.elapsed.StopDate() -} - -func (ctx *Context) Elapsed() time.Duration { - return ctx.elapsed.Elapsed() -} - -func (ctx *Context) Deadline() time.Time { - return ctx.StartDate().Add(ctx.timeout) -} - -func (ctx *Context) Remaining() time.Duration { - return ctx.Deadline().Sub(time.Now()) -} - -func (ctx *Context) Time(name string) (split *utils.SplitTime) { - if ctx.splits == nil { - ctx.splits = make([]*utils.SplitTime, 0) - } - split = utils.NewSplitTime(name) - ctx.splits = append(ctx.splits, split) - split.Start() - return -} - -func (ctx *Context) Timers() []*utils.SplitTime { - return ctx.splits -} - -func (ctx *Context) string(depth int) string { - str := bytes.NewBufferString("") - var pad string - for i := 0; i < depth; i++ { - pad += " " - } - str.WriteString(pad) - if depth > 0 { - str.WriteString("`->") - } - str.WriteString(fmt.Sprintf("%s : status %s, elapsed %s\n", ctx.name, ctx.Status(), ctx.Elapsed().String())) - if ctx.splits != nil { - for _, split := range ctx.splits { - str.WriteString(pad) - str.WriteString(" - ") - str.WriteString(split.String()) - str.WriteString("\n") - } - } - for _, child := range ctx.Children() { - str.WriteString(child.string(depth + 1)) - } - return str.String() -} - -func (ctx *Context) String() string { - return ctx.string(0) -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/context/context_test.go b/client/Godeps/_workspace/src/github.com/root-gg/context/context_test.go deleted file mode 100644 index d5fc2879..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/context/context_test.go +++ /dev/null @@ -1,317 +0,0 @@ -package context - -import ( - "errors" - "fmt" - "github.com/root-gg/plik/client/Godeps/_workspace/src/github.com/root-gg/utils" - "testing" - "time" -) - -func TestMain(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - child.Fork("fork2") - if child.Status() != Running { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Running) - } - child.Finalize(nil) - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Success) - } - children := root.AllChildren() - if len(children) != 2 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 2) - } -} - -func TestDefaultName(t *testing.T) { - root := NewContext("") - defaultName := "TestDefaultName" - if root.Name() != defaultName { - t.Errorf("Invalid child default name %s instead of %s", root.Name, defaultName) - } - child := root.Fork("") - if child.Name() != defaultName { - t.Errorf("Invalid child default name %s instead of %s", child.Name, defaultName) - } -} - -func TestDates(t *testing.T) { - root := NewContext("ROOT") - fmt.Printf("StartDate : %s\n", root.StartDate().String()) - fmt.Printf("Running since : %s\n", root.Elapsed().String()) - if root.EndDate() != nil { - t.Error("EndDate on running context") - } - root.Finalize(Success) - fmt.Printf("EndDate : %s\n", root.StartDate().String()) - fmt.Printf("Has run : %s\n", root.Elapsed().String()) -} - -func TestTimers(t *testing.T) { - root := NewContext("ROOT") - root.Time("t1").Stop() - root.Time("t2") - timers := root.Timers() - if len(timers) != 2 { - t.Errorf("Invalid timer count %d instead of %d", len(root.Timers()), 2) - } - if timers[0].Status() != utils.Stopped { - t.Errorf("Invalid timer %s status %s instead of %s", timers[0].Name(), timers[0].Status(), utils.Stopped) - } - if timers[1].Status() != utils.Running { - t.Errorf("Invalid timer %s status %s instead of %s", timers[1].Name(), timers[1].Status(), utils.Running) - } -} - -func TestFinalize(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - go func() { child.Finalize(Success) }() - child.Wait() - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Success) - } -} - -func TestWaitAllChildren(t *testing.T) { - root := NewContext("ROOT") - child1 := root.Fork("fork1") - child2 := child1.Fork("fork2") - child3 := child2.Fork("fork3") - go func() { - time.Sleep(100 * time.Millisecond) - child1.Finalize(Success) - time.Sleep(100 * time.Millisecond) - child3.Finalize(Success) - time.Sleep(100 * time.Millisecond) - child2.Finalize(Success) - }() - root.WaitAllChildren() - children := root.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - for _, child := range children { - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Timedout) - } - } -} - -func TestStatusOverride(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - var err = errors.New("error") - go func() { child.Finalize(err) }() - child.Wait() - child.Finalize(Success) - if child.Status() != err { - t.Errorf("Invalid child status %s instead of %s", child.Status(), err) - } -} - -func TestTimeoutOk(t *testing.T) { - root := NewContext("ROOT") - child := root.ForkWithTimeout("", 200*time.Millisecond) - go func() { - time.Sleep(100 * time.Millisecond) - child.Finalize(Success) - }() - <-child.Done() - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Success) - } -} - -func TestTimeoutKo(t *testing.T) { - root := NewContext("ROOT") - child := root.ForkWithTimeout("", 100*time.Millisecond) - go func() { - time.Sleep(200 * time.Millisecond) - child.Finalize(Success) - }() - <-child.Done() - if child.Status() != Timedout { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Timedout) - } -} - -func TestTimeoutDates(t *testing.T) { - root := NewContextWithTimeout("", 100*time.Millisecond) - fmt.Printf("Deadline is : %s\n", root.Deadline().String()) - fmt.Printf("Remaining time : %s\n", root.Remaining().String()) - child := root.Fork("") - if child.Deadline() != *child.StartDate() { - t.Errorf("Invalid deadline for non timed context : %s\n", child.Deadline().String()) - } - if child.Remaining().Seconds() > 0 { - t.Errorf("Invalid remaining for non timed context : %s\n", child.Remaining().String()) - } -} - -func TestCancel(t *testing.T) { - root := NewContext("ROOT") - root.Fork("").Fork("").Fork("") - root.Cancel() - children := root.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - for _, child := range children { - if child.Status() != Canceled { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Timedout) - } - } -} - -func TestAutoCancel(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1").AutoCancel() - child.Fork("").Fork("").Fork("") - child.Finalize(Success) - time.Sleep(100 * time.Millisecond) - children := child.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - for _, child := range children { - if child.Status() != Canceled { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Canceled) - } - } -} - -func TestDetach(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("") - child.Fork("").Fork("") - children := root.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - root.DetachChild(child) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } -} - -func TestAutoDetach(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1").AutoDetach() - children := root.AllChildren() - if len(children) != 1 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } - child.Finalize(Success) - time.Sleep(100 * time.Millisecond) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 0) - } -} - -func TestDetachChild(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - children := root.AllChildren() - if len(children) != 1 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } - root.DetachChild(child) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 0) - } -} - -func TestAutoDetachChild(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - root.AutoDetachChild(child) - children := root.AllChildren() - if len(children) != 1 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } - child.Finalize(Success) - time.Sleep(100 * time.Millisecond) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 0) - } -} - -func TestValue(t *testing.T) { - root := NewContext("ROOT") - root.Set("foo", "bar") - value, ok := root.Get("foo") - if !ok { - t.Error("Missing value for key \"foo\"") - } - if value.(string) != "bar" { - t.Error("Invalid value \"%s\" for key \"foo\" sould be \"bar\"", value) - } - child := root.Fork("fork1") - value, ok = child.Get("foo") - if !ok { - t.Error("Missing value for key \"foo\" in child context") - } - if value.(string) != "bar" { - t.Error("Invalid value \"%s\" for key \"foo\" child context sould be \"bar\"", value) - } -} - -func TestMissingValue(t *testing.T) { - root := NewContext("ROOT") - root.Set("go", "lang") - child := root.Fork("scala") - child.Set("sca", "la") - child2 := child.Fork("java") - child2.Get("ja") - value, ok := child.Get("foo") - if ok { - t.Error("Missing key \"ja\" should be missing") - } - if value != nil { - t.Error("Missing value \"%s\" for key \"foo\" should be missing", value) - } -} - -func TestValueOverride(t *testing.T) { - root := NewContext("ROOT") - root.Set("foo", "bar") - child := root.Fork("") - child.Set("foo", "baz") - value, ok := root.Get("foo") - if !ok { - t.Error("Missing value for key foo") - } - if value.(string) != "bar" { - t.Error("Invalid value \"%s\" for key foo sould be \"bar\"", value) - } - value, ok = child.Get("foo") - if !ok { - t.Error("Missing value for key foo in child context") - } - if value.(string) != "baz" { - t.Error("Invalid value \"%s\" for key foo child context sould be \"baz\"", value) - } -} - -func TestDisplay(t *testing.T) { - root := NewContext("ROOT") - fork1 := root.Fork("fork1") - fork1.Fork("fork11") - fork1.Fork("fork12").Fork("fork121") - fork1.Finalize(Success) - fork1.Cancel() - fork2 := root.Fork("fork2") - fork2.Fork("fork21") - fork2.Fork("fork22").Fork("fork221") - fork2.Time("t1").Stop() - fork2.Time("t2").Stop() - fork2.Time("t3") - fmt.Println(root.String()) -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/juliet/.gitignore b/client/Godeps/_workspace/src/github.com/root-gg/juliet/.gitignore new file mode 100644 index 00000000..a388e694 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/juliet/.gitignore @@ -0,0 +1,2 @@ +.idea +juliet diff --git a/client/Godeps/_workspace/src/github.com/root-gg/juliet/.travis.yml b/client/Godeps/_workspace/src/github.com/root-gg/juliet/.travis.yml new file mode 100644 index 00000000..70eed362 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/juliet/.travis.yml @@ -0,0 +1,8 @@ +sudo: false +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip \ No newline at end of file diff --git a/client/Godeps/_workspace/src/github.com/root-gg/juliet/LICENCE b/client/Godeps/_workspace/src/github.com/root-gg/juliet/LICENCE new file mode 100644 index 00000000..6c01cb3e --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/juliet/LICENCE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) <2015> + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/client/Godeps/_workspace/src/github.com/root-gg/juliet/README.md b/client/Godeps/_workspace/src/github.com/root-gg/juliet/README.md new file mode 100644 index 00000000..7eb7f11f --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/juliet/README.md @@ -0,0 +1,60 @@ + +Juliet is a lightweight middleware chaining helper that pass a Context (map) object +from a middleware to the next one. + +This is a fork of [Stack](https://github.com/alexedwards/stack) by Alex Edwards +witch is inspired by [Alice](https://github.com/justinas/alice) by Justinas Stankevicius. + +### Write a ContextMiddleware +``` + // Write a ContextMiddleware + func middleware(ctx *juliet.Context,w next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + // Play with the context + ctx.Set("key", "value") + + // Pass the request to the next middleware / handler + next.ServeHTTP(resp, req) + }) + } + + // To create a new chain + chain := juliet.NewChain(middleware1,middleware2) + + // To append a middleware at the end of the chain + chain = chain.Append(middleware3,middleware4) + + // To append a middleware at the beginning of a chain + chain = juliet.NewChain(firstMiddleware).AppendChain(chain) + + // Classic middleware without context can be added to the chain using the Adapt function + func middlewareWithoutContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // middleware logic + next.ServeHTTP(w, r) + }) + } + + chain = chain.Append(juliet.Adapt(middlewareWithoutContext)) + + // Write a ContextHandler + func handler(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + // play with context + value, _ := ctx.Get("key") + + // write http response + resp.Write([]byte(fmt.Sprintf("value is %v\n", value))) + } + + // Execute a middleware chain + http.Handle("/", chain.Then(handler)) + + // Classic http.Handler without context + http.Handle("/404", chain.ThenHandler(ttp.NotFoundHandler)) + + // Classic http.HandlerFunc without context + func pingHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("pong")) + } + http.Handle("/ping", chain.ThenHandlerFunc(pingHandler)) +``` \ No newline at end of file diff --git a/client/Godeps/_workspace/src/github.com/root-gg/juliet/context.go b/client/Godeps/_workspace/src/github.com/root-gg/juliet/context.go new file mode 100644 index 00000000..15cb1320 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/juliet/context.go @@ -0,0 +1,57 @@ +package juliet + +import ( + "fmt" +) + +// Context hold a map[interface{}]interface{} to pass along the middleware chain. +type Context struct { + values map[interface{}]interface{} +} + +// NewContext creates a new context instance. +func NewContext() (ctx *Context) { + ctx = new(Context) + ctx.values = make(map[interface{}]interface{}) + return +} + +// Get returns the value matching the key from the context. +func (ctx *Context) Get(key interface{}) (value interface{}, ok bool) { + value, ok = ctx.values[key] + return +} + +// Set adds a value to the context or overrides a parent value. +func (ctx *Context) Set(key interface{}, val interface{}) { + ctx.values[key] = val +} + +// Delete remove a value from the context. +func (ctx *Context) Delete(key interface{}) { + delete(ctx.values, key) +} + +// Clear remove all values from the context. +func (ctx *Context) Clear() { + for key := range ctx.values { + delete(ctx.values, key) + } +} + +// Copy creates a new copy of the context. +func (ctx *Context) Copy() *Context { + nc := NewContext() + for key, value := range ctx.values { + nc.values[key] = value + } + return nc +} + +// String returns a string representation of the context values. +func (ctx *Context) String() (str string) { + for key, value := range ctx.values { + str += fmt.Sprintf("%v => %v\n", key, value) + } + return +} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/juliet/juliet.go b/client/Godeps/_workspace/src/github.com/root-gg/juliet/juliet.go new file mode 100644 index 00000000..589f3e36 --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/juliet/juliet.go @@ -0,0 +1,164 @@ +package juliet + +import ( + "net/http" +) + +// ContextMiddleware is a constructor to close a Context into a middleware +type ContextMiddleware func(ctx *Context, next http.Handler) http.Handler + +// ContextHandler is a constructor to close a Context into a http.Handler +type ContextHandler func(ctx *Context) http.Handler + +// ContextHandlerFunc is a constructor to close a Context into a http.HandlerFunc +type ContextHandlerFunc func(ctx *Context, resp http.ResponseWriter, req *http.Request) + +// Chain is a wrapper for a contextMiddleware instance. +// Linking to the previous middleware. +type Chain struct { + parent *Chain + middleware ContextMiddleware +} + +// NewChain creates a new contextMiddleware chain. +func NewChain(cm ...ContextMiddleware) (chain *Chain) { + chain = new(Chain) + if len(cm) > 0 { + chain.middleware = cm[0] + if len(cm) > 1 { + chain = chain.Append(cm[1:]...) + } + } + return +} + +// append add a contextMiddleware(s) to the chain. +func (chain *Chain) append(cm ContextMiddleware) (newChain *Chain) { + newChain = NewChain(cm) + newChain.parent = chain + return newChain +} + +// Append adds contextMiddleware(s) to the chain. +func (chain *Chain) Append(cms ...ContextMiddleware) (newChain *Chain) { + newChain = chain + for _, cm := range cms { + newChain = newChain.append(cm) + } + + return newChain +} + +// Adapt adds context to a middleware so it can be added to the chain. +func Adapt(fn func(http.Handler) http.Handler) ContextMiddleware { + return func(ctx *Context, h http.Handler) http.Handler { + return fn(h) + } +} + +// head returns the top/first middleware of the Chain. +func (chain *Chain) head() (head *Chain) { + // Find the head of the chain + head = chain + for head.parent != nil { + head = head.parent + } + return +} + +// copy duplicate the whole chain of contextMiddleware. +func (chain *Chain) copy() (newChain *Chain) { + newChain = NewChain(chain.middleware) + if chain.parent != nil { + newChain.parent = chain.parent.copy() + } + return +} + +// AppendChain duplicates a chain and links it to the current chain +// An append to the old chain don't alter the new one +func (chain *Chain) AppendChain(tail *Chain) (newChain *Chain) { + // Copy the chain to attach + newChain = tail.copy() + + // Attach the chain to extend to the new tail + newChain.head().parent = chain + + // Return the new tail + return +} + +// Then add a contextHandlerFunc to the end of the chain +// and returns a http.Handler compliant ContextHandler +func (chain *Chain) Then(fn ContextHandlerFunc) (ch *ChainHandler) { + ch = newHandler(chain, adaptContextHandlerFunc(fn)) + return +} + +// ThenHandler add a http.Handler to the end of the chain +// and returns a http.Handler compliant ContextHandler +func (chain *Chain) ThenHandler(handler http.Handler) (ch *ChainHandler) { + ch = newHandler(chain, adaptHandler(handler)) + return +} + +// ThenHandlerFunc add a http.HandlerFunc to the end of the chain +// and returns a http.Handler compliant ContextHandler +func (chain *Chain) ThenHandlerFunc(fn func(http.ResponseWriter, *http.Request)) (ch *ChainHandler) { + ch = newHandler(chain, adaptHandlerFunc(fn)) + return +} + +// ChainHandler holds a chain and a final handler +// It satisfy the http.Handler interface and can be +// served directly by a net/http server +type ChainHandler struct { + chain *Chain + handler ContextHandler +} + +// New Handler creates a new handler chain +func newHandler(chain *Chain, handler ContextHandler) (ch *ChainHandler) { + ch = new(ChainHandler) + ch.chain = chain + ch.handler = handler + return +} + +// ServeHTTP builds the chain of handlers in order, closing the context along the way and executes it. +func (ch *ChainHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + ctx := NewContext() + + // Build the context handler chain + handler := ch.handler(ctx) + chain := ch.chain + for chain != nil { + if chain.middleware != nil { + handler = chain.middleware(ctx, handler) + } + chain = chain.parent + } + + handler.ServeHTTP(resp, req) +} + +// Adapt a ContextHandlerFunc into a contextHandler +func adaptContextHandlerFunc(fn ContextHandlerFunc) ContextHandler { + return func(ctx *Context) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fn(ctx, w, r) + }) + } +} + +// Adapt a http.Handler into a contextHandler +func adaptHandler(h http.Handler) ContextHandler { + return func(ctx *Context) http.Handler { + return h + } +} + +// Adapt a http.HandlerFunc into a contextHandler +func adaptHandlerFunc(fn func(w http.ResponseWriter, r *http.Request)) ContextHandler { + return adaptHandler(http.HandlerFunc(fn)) +} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/logger/logger_test.go b/client/Godeps/_workspace/src/github.com/root-gg/logger/logger_test.go deleted file mode 100644 index bd426567..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/logger/logger_test.go +++ /dev/null @@ -1,474 +0,0 @@ -package logger - -import ( - "bytes" - "fmt" - "github.com/root-gg/plik/client/Godeps/_workspace/src/github.com/root-gg/utils" - "io/ioutil" - "os" - "path" - "testing" - "time" -) - -var logMessage string = "This is a log message\n" - -func TestNew(t *testing.T) { - logger := NewLogger() - if logger.MinLevel != MinLevel { - t.Errorf("Invalid timer default level %s instead of %s", logger.MinLevel, MinLevel) - } - logger.Log(INFO, logMessage) -} - -func TestLogger(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(0) - logger.Log(INFO, logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != logMessage { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), logMessage) - } -} - -func TestAutoNewLine(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(0) - logger.Log(INFO, "This is a log message") - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != logMessage { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), logMessage) - } -} - -func TestPrefix(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - prefix := "prefix" - logger := NewLogger().SetOutput(buffer).SetFlags(0).SetPrefix(prefix) - expected := fmt.Sprintf("[%s] %s", prefix, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestDateFormat(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Fdate).SetDateFormat("01/02/2006") - expected := fmt.Sprintf("[%s] %s", time.Now().Format("01/02/2006"), logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestShortFile(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFile) - file, line, _ := utils.GetCaller(1) - expected := fmt.Sprintf("[%s:%d] %s", path.Base(file), line+2, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestLongFile(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FlongFile) - file, line, _ := utils.GetCaller(1) - expected := fmt.Sprintf("[%s:%d] %s", file, line+2, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestShortFunction(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFunction) - expected := fmt.Sprintf("[%s] %s", "TestShortFunction", logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestLongFunction(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FlongFunction) - expected := fmt.Sprintf("[%s] %s", "github.com/root-gg/logger.TestLongFunction", logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestFileAndFunction(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFile | FshortFunction) - file, line, _ := utils.GetCaller(1) - expected := fmt.Sprintf("[%s:%d TestFileAndFunction] %s", path.Base(file), line+2, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestCallDepth(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFunction).SetCallDepth(1) - expected := fmt.Sprintf("[%s] %s", "Log", logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestDebug(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(DEBUG) - expected := fmt.Sprintf("[%s] %s", levels[DEBUG], logMessage) - logger.Debug(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Debugf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(DEBUG) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestInfo(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(INFO) - expected := fmt.Sprintf("[%s] %s", levels[INFO], logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Infof("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(INFO) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestWarning(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(WARNING) - expected := fmt.Sprintf("[%s] %s", levels[WARNING], logMessage) - logger.Warning(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Warningf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(WARNING) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestCritical(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(CRITICAL) - expected := fmt.Sprintf("[%s] %s", levels[CRITICAL], logMessage) - logger.Critical(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Criticalf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(CRITICAL) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestFatal(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(FATAL) - expected := fmt.Sprintf("[%s] %s", levels[FATAL], logMessage) - var exitcode int = 0 - exiter = func(code int) { - exitcode = code - } - logger.Fatal(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - if exitcode != 1 { - t.Errorf("Invalid exit code %d instead %d", exitcode, 1) - } - exitcode = 0 - buffer.Reset() - logger.Fatalf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - if exitcode != 1 { - t.Errorf("Invalid exit code %d instead %d", exitcode, 1) - } - logIf := logger.LogIf(FATAL) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestFixedSizeLevel(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel | FfixedSizeLevel) - expected := fmt.Sprintf("[%-8s] %s", levels[INFO], logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestMinLevel(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetMinLevel(FATAL) - buffer.Reset() - logger.Debug(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf := logger.LogIf(DEBUG) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - buffer.Reset() - logger.Info(logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf = logger.LogIf(INFO) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - buffer.Reset() - logger.Warning(logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf = logger.LogIf(WARNING) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - buffer.Reset() - logger.Critical(logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf = logger.LogIf(CRITICAL) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - logIf = logger.LogIf(FATAL) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestMinLevelFromString(t *testing.T) { - logger := NewLogger() - logger.SetMinLevelFromString("DEBUG") - if logger.MinLevel != DEBUG { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, DEBUG) - } - logger.SetMinLevelFromString("INVALID") - if logger.MinLevel != DEBUG { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, DEBUG) - } - logger.SetMinLevelFromString("INFO") - if logger.MinLevel != INFO { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, INFO) - } - logger.SetMinLevelFromString("WARNING") - if logger.MinLevel != WARNING { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, WARNING) - } - logger.SetMinLevelFromString("CRITICAL") - if logger.MinLevel != CRITICAL { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, CRITICAL) - } - logger.SetMinLevelFromString("FATAL") - if logger.MinLevel != FATAL { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, FATAL) - } -} - -func TestError(t *testing.T) { - devNull, err := os.Open(os.DevNull) - if err != nil { - t.Errorf("Unable to open %s : %s", os.DevNull, err) - } - logger := NewLogger().SetOutput(devNull) - err = logger.EWarning("Oops!") - if err.Error() != "Oops!" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops!") - } - err = logger.EWarningf("Oops : %s", "it's broken") - if err.Error() != "Oops : it's broken" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops : it's broken") - } - err = logger.ECritical("Oops!") - if err.Error() != "Oops!" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops!") - } - err = logger.ECriticalf("Oops : %s", "it's broken") - if err.Error() != "Oops : it's broken" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops : it's broken") - } - err = logger.Error(DEBUG, "Oops!") - if err.Error() != "Oops!" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops!") - } - err = logger.Errorf(DEBUG, "Oops : %s", "it's broken") - if err.Error() != "Oops : it's broken" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops : it's broken") - } -} - -func TestCopy(t *testing.T) { - logger1 := NewLogger().SetPrefix("logger1") - logger2 := logger1.Copy().SetPrefix("logger2") - if logger1.Prefix != "logger1" { - t.Errorf("Invalid logger prefix %t instead of %t", logger1.Prefix, "logger1") - } - if logger2.Prefix != "logger2" { - t.Errorf("Invalid logger prefix %t instead of %t", logger2.Prefix, "logger2") - } -} - -type TestData struct { - Foo string -} - -func TestDump(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(0) - logger.Dump(INFO, TestData{"bar"}) - expected := "{\n \"Foo\": \"bar\"\n}\n" - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/bytes_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/bytes_test.go deleted file mode 100644 index 80fd9622..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/bytes_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestBytesToString(t *testing.T) { - - // Test for all units - testBytes := BytesToString(123) // Should get : 123 B - testKiloBytes := BytesToString(4755) // Should get : 4.64 KB - testMegaBytes := BytesToString(6541615) // Should get : 6.24 MB - testGigaBytes := BytesToString(2571257332) // Should get : 2.39 GB - - if testBytes != "123 B" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(123)", testBytes, "123 B") - } else if testKiloBytes != "4.64 KB" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(4755)", testBytes, "4.64 KB") - } else if testMegaBytes != "6.24 MB" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(6541615)", testBytes, "6.24 MB") - } else if testGigaBytes != "2.39 GB" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(2571257332)", testBytes, "2.39 GB") - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/caller_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/caller_test.go deleted file mode 100644 index e2f1020a..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/caller_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package utils - -import ( - "fmt" - "path" - "testing" -) - -func TestGetCaller(t *testing.T) { - file, line, function := GetCaller(1) - filename := path.Base(file) - if filename != "caller_test.go" { - t.Errorf("Invalid file name %s instead of %s", filename, "caller_test.go") - } - if line != 10 { - t.Errorf("Invalid line %d instead of %d", line, 10) - } - if function != "github.com/root-gg/utils.TestGetCaller" { - t.Errorf("Invalid function %s instead of %s", function, "github.com/root-gg/utils.TestGetCaller") - } - fmt.Printf("%s:%d : %s\n", file, line, function) - return -} - -func TestParseFunction(t *testing.T) { - _, _, fct := GetCaller(1) - pkg, function := ParseFunction(fct) - if pkg != "github.com/root-gg/utils" { - t.Errorf("Invalid package name %s instead of %s", pkg, "github.com/root-gg/utils") - } - if function != "TestParseFunction" { - t.Errorf("Invalid package name %s instead of %s", function, "TestParseFunction") - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/dumper_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/dumper_test.go deleted file mode 100644 index bfec4ee3..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/dumper_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import ( - "testing" -) - -type TestDumper struct { - Foo string -} - -func TestDump(t *testing.T) { - Dump(TestDumper{"bar"}) -} - -func TestSdump(t *testing.T) { - dump := Sdump(TestDumper{"bar"}) - expected := "{\n \"Foo\": \"bar\"\n}" - if dump != expected { - t.Errorf("Invalid dump got %s instead of %s", dump, expected) - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/json_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/json_test.go deleted file mode 100644 index ddbdb727..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/json_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package utils - -import "testing" - -type TestJson struct { - Foo string -} - -func TestToJson(t *testing.T) { - data := TestJson{"bar"} - json, err := ToJson(data) - if err != nil { - t.Errorf("Unable to serialize %v to json : %s", data, err) - } - expected := "{\"Foo\":\"bar\"}" - if string(json) != expected { - t.Errorf("Invalid dump got %s instead of %s", string(json), expected) - } -} - -func TestToJsonString(t *testing.T) { - data := TestJson{"bar"} - json, err := ToJsonString(data) - if err != nil { - t.Errorf("Unable to serialize %v to json : %s", data, err) - } - expected := "{\"Foo\":\"bar\"}" - if json != expected { - t.Errorf("Invalid dump got %s instead of %s", json, expected) - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/md5sum_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/md5sum_test.go deleted file mode 100644 index 12568f17..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/md5sum_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package utils - -import ( - "testing" - "os" -) - -func TestMd5sum(t *testing.T) { - md5sum, err := Md5sum("Lorem ipsum dolor sit amet") - if err != nil { - t.Errorf("Unable to compute md5sum : %s", err) - } - sum := "fea80f2db003d4ebc4536023814aa885" - if md5sum != sum { - t.Errorf("Invalid md5sum got %s instead of %s", md5sum, sum) - } - return -} - -func TestFileMd5sum(t *testing.T) { - path := os.TempDir() + "/" + "testFileMd5Sum" - f, err := os.Create(path) - if err != nil { - t.Errorf("Unable to open test file %s : %s", path, err) - } - _, err = f.Write([]byte("Lorem ipsum dolor sit amet")) - if err != nil { - t.Errorf("Unable to write test file %s : %s", path, err) - } - err = f.Close() - if err != nil { - t.Errorf("Unable to close test file %s : %s", path, err) - } - md5sum, err := FileMd5sum(path) - if err != nil { - t.Errorf("Unable to compute md5sum : %s", err) - } - sum := "fea80f2db003d4ebc4536023814aa885" - if md5sum != sum { - t.Errorf("Invalid md5sum got %s instead of %s", md5sum, sum) - } - err = os.Remove(path) - return -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/net.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/net.go new file mode 100644 index 00000000..5617559f --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/utils/net.go @@ -0,0 +1,23 @@ +package utils + +import ( + "net" +) + +func NtoI(ip net.IP) (ipInt uint32) { + ip = ip.To4() + ipInt |= uint32(ip[0]) << 24 + ipInt |= uint32(ip[1]) << 16 + ipInt |= uint32(ip[2]) << 8 + ipInt |= uint32(ip[3]) + return +} + +func ItoN(ipInt uint32) net.IP { + bytes := make([]byte, 4) + bytes[0] = byte(ipInt >> 24 & 0xFF) + bytes[1] = byte(ipInt >> 16 & 0xFF) + bytes[2] = byte(ipInt >> 8 & 0xFF) + bytes[3] = byte(ipInt & 0xFF) + return net.IP(bytes) +} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/reflect_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/reflect_test.go deleted file mode 100644 index fe7198e4..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/reflect_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package utils - -import ( - "testing" -) - -type TestReflect struct { - Foo string - Map map[string]string -} - -func TestAssign(t *testing.T) { - values := make(map[string]interface{}) - values["Foo"] = "bar" - values["Map"] = map[string]string{"go": "pher"} - values["Ja"] = "va" - test := new(TestReflect) - Assign(test, values) - if test.Foo != "bar" { - t.Errorf("Invalid dume got %s instead of %s", test.Foo, "bar") - } - if test.Map == nil { - t.Error("Missing value for Map") - } - if v, ok := test.Map["go"]; ok { - if v != "pher" { - t.Errorf("Invalid dume got %s instead of %s", v, "pher") - } - } else { - t.Error("Missing value for map key \"go\"") - } - return -} - -func TestToInterfaceArray(t *testing.T) { - ToInterfaceArray([]int{1, 2, 3, 4, 5, 6}) -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/strings_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/strings_test.go deleted file mode 100644 index 1114a3e9..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/strings_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestChomp(t *testing.T) { - str := "foo\n" - result := Chomp(str) - if result != "foo" { - t.Errorf("Invalid string chomp got %s instead of %s", result, "foo") - } - str = "bar" - result = Chomp(str) - if result != "bar" { - t.Errorf("Invalid string chomp got %s instead of %s", result, "bar") - } -} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/time.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/time.go new file mode 100644 index 00000000..1eafc53c --- /dev/null +++ b/client/Godeps/_workspace/src/github.com/root-gg/utils/time.go @@ -0,0 +1,12 @@ +package utils + +import "time" + +func TruncateDuration(d time.Duration, precision time.Duration) time.Duration { + if d == 0 { + return time.Duration(0) + } + p := float64(precision) + n := float64(int(float64(d)/p)) * p + return time.Duration(n) +} diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/timer.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/timer.go index 91eaf7c0..80f97891 100644 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/timer.go +++ b/client/Godeps/_workspace/src/github.com/root-gg/utils/timer.go @@ -13,6 +13,7 @@ var Uninitalized = errors.New("uninitalized") type SplitTime struct { name string start *time.Time + split *time.Time stop *time.Time } @@ -37,6 +38,22 @@ func (split *SplitTime) StartDate() *time.Time { return split.start } +func (split *SplitTime) Split() (elapsed time.Duration) { + if split.start != nil { + if split.stop == nil { + now := time.Now() + if split.split == nil { + elapsed = now.Sub(*split.start) + } else { + elapsed = now.Sub(*split.split) + } + split.split = &now + return + } + } + return +} + func (split *SplitTime) Stop() { if split.stop == nil { now := time.Now() diff --git a/client/Godeps/_workspace/src/github.com/root-gg/utils/timer_test.go b/client/Godeps/_workspace/src/github.com/root-gg/utils/timer_test.go deleted file mode 100644 index 0ad05cf9..00000000 --- a/client/Godeps/_workspace/src/github.com/root-gg/utils/timer_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package utils - -import ( - "fmt" - "testing" - "time" -) - -func TestNewTimer(t *testing.T) { - timer := NewSplitTime("main") - if timer.Name() != "main" { - t.Errorf("Invalid timer name %s instead of %s", timer.Name(), "main") - } -} - -func TestTimerStatus(t *testing.T) { - timer := NewSplitTime("timer") - if timer.Status() != Uninitalized { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Uninitalized) - } - timer.Start() - if timer.Status() != Running { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Running) - } - timer.Stop() - if timer.Status() != Stopped { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Stopped) - } -} - -func TestTimerDates(t *testing.T) { - timer := NewSplitTime("timer") - if timer.StartDate() != nil { - t.Error("Start date on uninitalized timer : %s", timer.StartDate().String()) - } - if timer.StopDate() != nil { - t.Error("Stop date on uninitalized timer : %s", timer.StopDate().String()) - } - timer.Start() - if timer.StartDate() == nil { - t.Error("Missing start date on running timer") - } - if timer.StopDate() != nil { - t.Error("Stop date on running timer : %s", timer.StopDate().String()) - } - timer.Stop() - if timer.StartDate() == nil { - t.Error("Missing start date on stopped timer") - } - if timer.StopDate() == nil { - t.Error("Missing stop date on stopped timer") - } -} - -func TestTimerImmutability(t *testing.T) { - timer := NewSplitTime("timer") - timer.Start() - startDate1 := timer.StartDate() - timer.Start() - startDate2 := timer.StartDate() - if startDate1 != startDate2 { - t.Errorf("Non immutable start date : %s != %s", startDate1.String(), startDate2.String()) - } - timer.Stop() - stopDate1 := timer.StopDate() - timer.Stop() - stopDate2 := timer.StopDate() - if stopDate1 != stopDate2 { - t.Errorf("Non immutable stop date : %s != %s", stopDate1.String(), stopDate2.String()) - } - timer.Start() - if timer.Status() != Stopped { - t.Errorf("Non immutable timer status %s instead of %s", timer.Status(), Stopped) - } - startDate3 := timer.StartDate() - if startDate1 != startDate3 { - t.Errorf("Non immutable start date : %s != %s", startDate1.String(), startDate3.String()) - } -} - -func TestTimerElapsed(t *testing.T) { - timer := NewSplitTime("timer") - if timer.Elapsed() != time.Duration(0) { - t.Errorf("Invalid uninitialized timer elapsed time %s", timer.Elapsed().String()) - } - timer.Start() - if timer.Elapsed() <= time.Duration(0) { - t.Errorf("Invalid running timer elapsed time %s", timer.Elapsed().String()) - } - timer.Stop() - if timer.Elapsed() <= time.Duration(0) { - t.Errorf("Invalid stopped timer elapsed time %s", timer.Elapsed().String()) - } -} - -func TestTimerStopUninitalizedTimer(t *testing.T) { - timer := NewSplitTime("timer") - timer.Stop() - if timer.Status() != Stopped { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Stopped) - } - if timer.Elapsed() != time.Duration(0) { - t.Errorf("Invalid uninitialized stopped timer elapsed time %s", timer.Elapsed().String()) - } -} - -func TestTimerString(t *testing.T) { - timer := NewSplitTime("timer") - fmt.Println(timer.String()) - timer.Start() - fmt.Println(timer.String()) - timer.Stop() - fmt.Println(timer.String()) -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/LICENSE b/client/Godeps/_workspace/src/golang.org/x/crypto/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/client/Godeps/_workspace/src/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/PATENTS b/client/Godeps/_workspace/src/golang.org/x/crypto/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/client/Godeps/_workspace/src/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/cast5/cast5_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/cast5/cast5_test.go deleted file mode 100644 index 778b272a..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/cast5/cast5_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cast5 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -// This test vector is taken from RFC 2144, App B.1. -// Since the other two test vectors are for reduced-round variants, we can't -// use them. -var basicTests = []struct { - key, plainText, cipherText string -}{ - { - "0123456712345678234567893456789a", - "0123456789abcdef", - "238b4fe5847e44b2", - }, -} - -func TestBasic(t *testing.T) { - for i, test := range basicTests { - key, _ := hex.DecodeString(test.key) - plainText, _ := hex.DecodeString(test.plainText) - expected, _ := hex.DecodeString(test.cipherText) - - c, err := NewCipher(key) - if err != nil { - t.Errorf("#%d: failed to create Cipher: %s", i, err) - continue - } - var cipherText [BlockSize]byte - c.Encrypt(cipherText[:], plainText) - if !bytes.Equal(cipherText[:], expected) { - t.Errorf("#%d: got:%x want:%x", i, cipherText, expected) - } - - var plainTextAgain [BlockSize]byte - c.Decrypt(plainTextAgain[:], cipherText[:]) - if !bytes.Equal(plainTextAgain[:], plainText) { - t.Errorf("#%d: got:%x want:%x", i, plainTextAgain, plainText) - } - } -} - -// TestFull performs the test specified in RFC 2144, App B.2. -// However, due to the length of time taken, it's disabled here and a more -// limited version is included, below. -func TestFull(t *testing.T) { - if testing.Short() { - // This is too slow for normal testing - return - } - - a, b := iterate(1000000) - - const expectedA = "eea9d0a249fd3ba6b3436fb89d6dca92" - const expectedB = "b2c95eb00c31ad7180ac05b8e83d696e" - - if hex.EncodeToString(a) != expectedA { - t.Errorf("a: got:%x want:%s", a, expectedA) - } - if hex.EncodeToString(b) != expectedB { - t.Errorf("b: got:%x want:%s", b, expectedB) - } -} - -func iterate(iterations int) ([]byte, []byte) { - const initValueHex = "0123456712345678234567893456789a" - - initValue, _ := hex.DecodeString(initValueHex) - - var a, b [16]byte - copy(a[:], initValue) - copy(b[:], initValue) - - for i := 0; i < iterations; i++ { - c, _ := NewCipher(b[:]) - c.Encrypt(a[:8], a[:8]) - c.Encrypt(a[8:], a[8:]) - c, _ = NewCipher(a[:]) - c.Encrypt(b[:8], b[:8]) - c.Encrypt(b[8:], b[8:]) - } - - return a[:], b[:] -} - -func TestLimited(t *testing.T) { - a, b := iterate(1000) - - const expectedA = "23f73b14b02a2ad7dfb9f2c35644798d" - const expectedB = "e5bf37eff14c456a40b21ce369370a9f" - - if hex.EncodeToString(a) != expectedA { - t.Errorf("a: got:%x want:%s", a, expectedA) - } - if hex.EncodeToString(b) != expectedB { - t.Errorf("b: got:%x want:%s", b, expectedB) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor/armor_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor/armor_test.go deleted file mode 100644 index 9334e94e..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor/armor_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "bytes" - "hash/adler32" - "io/ioutil" - "testing" -) - -func TestDecodeEncode(t *testing.T) { - buf := bytes.NewBuffer([]byte(armorExample1)) - result, err := Decode(buf) - if err != nil { - t.Error(err) - } - expectedType := "PGP SIGNATURE" - if result.Type != expectedType { - t.Errorf("result.Type: got:%s want:%s", result.Type, expectedType) - } - if len(result.Header) != 1 { - t.Errorf("len(result.Header): got:%d want:1", len(result.Header)) - } - v, ok := result.Header["Version"] - if !ok || v != "GnuPG v1.4.10 (GNU/Linux)" { - t.Errorf("result.Header: got:%#v", result.Header) - } - - contents, err := ioutil.ReadAll(result.Body) - if err != nil { - t.Error(err) - } - - if adler32.Checksum(contents) != 0x27b144be { - t.Errorf("contents: got: %x", contents) - } - - buf = bytes.NewBuffer(nil) - w, err := Encode(buf, result.Type, result.Header) - if err != nil { - t.Error(err) - } - _, err = w.Write(contents) - if err != nil { - t.Error(err) - } - w.Close() - - if !bytes.Equal(buf.Bytes(), []byte(armorExample1)) { - t.Errorf("got: %s\nwant: %s", string(buf.Bytes()), armorExample1) - } -} - -func TestLongHeader(t *testing.T) { - buf := bytes.NewBuffer([]byte(armorLongLine)) - result, err := Decode(buf) - if err != nil { - t.Error(err) - return - } - value, ok := result.Header["Version"] - if !ok { - t.Errorf("missing Version header") - } - if value != longValueExpected { - t.Errorf("got: %s want: %s", value, longValueExpected) - } -} - -const armorExample1 = `-----BEGIN PGP SIGNATURE----- -Version: GnuPG v1.4.10 (GNU/Linux) - -iJwEAAECAAYFAk1Fv/0ACgkQo01+GMIMMbsYTwQAiAw+QAaNfY6WBdplZ/uMAccm -4g+81QPmTSGHnetSb6WBiY13kVzK4HQiZH8JSkmmroMLuGeJwsRTEL4wbjRyUKEt -p1xwUZDECs234F1xiG5enc5SGlRtP7foLBz9lOsjx+LEcA4sTl5/2eZR9zyFZqWW -TxRjs+fJCIFuo71xb1g= -=/teI ------END PGP SIGNATURE-----` - -const armorLongLine = `-----BEGIN PGP SIGNATURE----- -Version: 0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz - -iQEcBAABAgAGBQJMtFESAAoJEKsQXJGvOPsVj40H/1WW6jaMXv4BW+1ueDSMDwM8 -kx1fLOXbVM5/Kn5LStZNt1jWWnpxdz7eq3uiqeCQjmqUoRde3YbB2EMnnwRbAhpp -cacnAvy9ZQ78OTxUdNW1mhX5bS6q1MTEJnl+DcyigD70HG/yNNQD7sOPMdYQw0TA -byQBwmLwmTsuZsrYqB68QyLHI+DUugn+kX6Hd2WDB62DKa2suoIUIHQQCd/ofwB3 -WfCYInXQKKOSxu2YOg2Eb4kLNhSMc1i9uKUWAH+sdgJh7NBgdoE4MaNtBFkHXRvv -okWuf3+xA9ksp1npSY/mDvgHijmjvtpRDe6iUeqfCn8N9u9CBg8geANgaG8+QA4= -=wfQG ------END PGP SIGNATURE-----` - -const longValueExpected = "0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz" diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/canonical_text_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/canonical_text_test.go deleted file mode 100644 index 8f3ba2a8..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/canonical_text_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "bytes" - "testing" -) - -type recordingHash struct { - buf *bytes.Buffer -} - -func (r recordingHash) Write(b []byte) (n int, err error) { - return r.buf.Write(b) -} - -func (r recordingHash) Sum(in []byte) []byte { - return append(in, r.buf.Bytes()...) -} - -func (r recordingHash) Reset() { - panic("shouldn't be called") -} - -func (r recordingHash) Size() int { - panic("shouldn't be called") -} - -func (r recordingHash) BlockSize() int { - panic("shouldn't be called") -} - -func testCanonicalText(t *testing.T, input, expected string) { - r := recordingHash{bytes.NewBuffer(nil)} - c := NewCanonicalTextHash(r) - c.Write([]byte(input)) - result := c.Sum(nil) - if expected != string(result) { - t.Errorf("input: %x got: %x want: %x", input, result, expected) - } -} - -func TestCanonicalText(t *testing.T) { - testCanonicalText(t, "foo\n", "foo\r\n") - testCanonicalText(t, "foo", "foo") - testCanonicalText(t, "foo\r\n", "foo\r\n") - testCanonicalText(t, "foo\r\nbar", "foo\r\nbar") - testCanonicalText(t, "foo\r\nbar\n\n", "foo\r\nbar\r\n\r\n") -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go deleted file mode 100644 index 7cabd33a..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clearsign - -import ( - "bytes" - "github.com/root-gg/plik/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp" - "testing" -) - -func testParse(t *testing.T, input []byte, expected, expectedPlaintext string) { - b, rest := Decode(input) - if b == nil { - t.Fatal("failed to decode clearsign message") - } - if !bytes.Equal(rest, []byte("trailing")) { - t.Errorf("unexpected remaining bytes returned: %s", string(rest)) - } - if b.ArmoredSignature.Type != "PGP SIGNATURE" { - t.Errorf("bad armor type, got:%s, want:PGP SIGNATURE", b.ArmoredSignature.Type) - } - if !bytes.Equal(b.Bytes, []byte(expected)) { - t.Errorf("bad body, got:%x want:%x", b.Bytes, expected) - } - - if !bytes.Equal(b.Plaintext, []byte(expectedPlaintext)) { - t.Errorf("bad plaintext, got:%x want:%x", b.Plaintext, expectedPlaintext) - } - - keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey)) - if err != nil { - t.Errorf("failed to parse public key: %s", err) - } - - if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil { - t.Errorf("failed to check signature: %s", err) - } -} - -func TestParse(t *testing.T) { - testParse(t, clearsignInput, "Hello world\r\nline 2", "Hello world\nline 2\n") - testParse(t, clearsignInput2, "\r\n\r\n(This message has a couple of blank lines at the start and end.)\r\n\r\n", "\n\n(This message has a couple of blank lines at the start and end.)\n\n\n") -} - -func TestParseWithNoNewlineAtEnd(t *testing.T) { - input := clearsignInput - input = input[:len(input)-len("trailing")-1] - b, rest := Decode(input) - if b == nil { - t.Fatal("failed to decode clearsign message") - } - if len(rest) > 0 { - t.Errorf("unexpected remaining bytes returned: %s", string(rest)) - } -} - -var signingTests = []struct { - in, signed, plaintext string -}{ - {"", "", ""}, - {"a", "a", "a\n"}, - {"a\n", "a", "a\n"}, - {"-a\n", "-a", "-a\n"}, - {"--a\nb", "--a\r\nb", "--a\nb\n"}, -} - -func TestSigning(t *testing.T) { - keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey)) - if err != nil { - t.Errorf("failed to parse public key: %s", err) - } - - for i, test := range signingTests { - var buf bytes.Buffer - - plaintext, err := Encode(&buf, keyring[0].PrivateKey, nil) - if err != nil { - t.Errorf("#%d: error from Encode: %s", i, err) - continue - } - if _, err := plaintext.Write([]byte(test.in)); err != nil { - t.Errorf("#%d: error from Write: %s", i, err) - continue - } - if err := plaintext.Close(); err != nil { - t.Fatalf("#%d: error from Close: %s", i, err) - continue - } - - b, _ := Decode(buf.Bytes()) - if b == nil { - t.Errorf("#%d: failed to decode clearsign message", i) - continue - } - if !bytes.Equal(b.Bytes, []byte(test.signed)) { - t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Bytes, test.signed) - continue - } - if !bytes.Equal(b.Plaintext, []byte(test.plaintext)) { - t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Plaintext, test.plaintext) - continue - } - - if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil { - t.Errorf("#%d: failed to check signature: %s", i, err) - } - } -} - -var clearsignInput = []byte(` -;lasjlkfdsa - ------BEGIN PGP SIGNED MESSAGE----- -Hash: SHA1 - -Hello world -line 2 ------BEGIN PGP SIGNATURE----- -Version: GnuPG v1.4.10 (GNU/Linux) - -iJwEAQECAAYFAk8kMuEACgkQO9o98PRieSpMsAQAhmY/vwmNpflrPgmfWsYhk5O8 -pjnBUzZwqTDoDeINjZEoPDSpQAHGhjFjgaDx/Gj4fAl0dM4D0wuUEBb6QOrwflog -2A2k9kfSOMOtk0IH/H5VuFN1Mie9L/erYXjTQIptv9t9J7NoRBMU0QOOaFU0JaO9 -MyTpno24AjIAGb+mH1U= -=hIJ6 ------END PGP SIGNATURE----- -trailing`) - -var clearsignInput2 = []byte(` -asdlfkjasdlkfjsadf - ------BEGIN PGP SIGNED MESSAGE----- -Hash: SHA256 - - - -(This message has a couple of blank lines at the start and end.) - - ------BEGIN PGP SIGNATURE----- -Version: GnuPG v1.4.11 (GNU/Linux) - -iJwEAQEIAAYFAlPpSREACgkQO9o98PRieSpZTAP+M8QUoCt/7Rf3YbXPcdzIL32v -pt1I+cMNeopzfLy0u4ioEFi8s5VkwpL1AFmirvgViCwlf82inoRxzZRiW05JQ5LI -ESEzeCoy2LIdRCQ2hcrG8pIUPzUO4TqO5D/dMbdHwNH4h5nNmGJUAEG6FpURlPm+ -qZg6BaTvOxepqOxnhVU= -=e+C6 ------END PGP SIGNATURE----- - -trailing`) - -var signingKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1.4.10 (GNU/Linux) - -lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp -idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn -vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB -AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X -0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL -IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk -VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn -gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 -TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx -q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz -dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA -CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 -ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ -eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid -AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV -bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK -/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA -A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX -TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc -lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 -rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN -oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 -QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU -nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC -AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp -BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad -AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL -VrM0m72/jnpKo04= -=zNCn ------END PGP PRIVATE KEY BLOCK----- -` diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go deleted file mode 100644 index c4f99f5c..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package elgamal - -import ( - "bytes" - "crypto/rand" - "math/big" - "testing" -) - -// This is the 1024-bit MODP group from RFC 5114, section 2.1: -const primeHex = "B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371" - -const generatorHex = "A4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5" - -func fromHex(hex string) *big.Int { - n, ok := new(big.Int).SetString(hex, 16) - if !ok { - panic("failed to parse hex number") - } - return n -} - -func TestEncryptDecrypt(t *testing.T) { - priv := &PrivateKey{ - PublicKey: PublicKey{ - G: fromHex(generatorHex), - P: fromHex(primeHex), - }, - X: fromHex("42"), - } - priv.Y = new(big.Int).Exp(priv.G, priv.X, priv.P) - - message := []byte("hello world") - c1, c2, err := Encrypt(rand.Reader, &priv.PublicKey, message) - if err != nil { - t.Errorf("error encrypting: %s", err) - } - message2, err := Decrypt(priv, c1, c2) - if err != nil { - t.Errorf("error decrypting: %s", err) - } - if !bytes.Equal(message2, message) { - t.Errorf("decryption failed, got: %x, want: %x", message2, message) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/keys_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/keys_test.go deleted file mode 100644 index 794a478e..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/keys_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package openpgp - -import ( - "testing" - "time" - - "github.com/root-gg/plik/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet" -) - -func TestKeyExpiry(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(expiringKeyHex)) - entity := kring[0] - - const timeFormat = "2006-01-02" - time1, _ := time.Parse(timeFormat, "2013-07-01") - // The expiringKeyHex key is structured as: - // - // pub 1024R/5E237D8C created: 2013-07-01 expires: 2013-07-31 usage: SC - // sub 1024R/1ABB25A0 created: 2013-07-01 expires: 2013-07-08 usage: E - // sub 1024R/96A672F5 created: 2013-07-01 expires: 2013-07-31 usage: E - // - // So this should select the first, non-expired encryption key. - key, _ := entity.encryptionKey(time1) - if id := key.PublicKey.KeyIdShortString(); id != "1ABB25A0" { - t.Errorf("Expected key 1ABB25A0 at time %s, but got key %s", time1.Format(timeFormat), id) - } - - // Once the first encryption subkey has expired, the second should be - // selected. - time2, _ := time.Parse(timeFormat, "2013-07-09") - key, _ = entity.encryptionKey(time2) - if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" { - t.Errorf("Expected key 96A672F5 at time %s, but got key %s", time2.Format(timeFormat), id) - } - - // Once all the keys have expired, nothing should be returned. - time3, _ := time.Parse(timeFormat, "2013-08-01") - if key, ok := entity.encryptionKey(time3); ok { - t.Errorf("Expected no key at time %s, but got key %s", time3.Format(timeFormat), key.PublicKey.KeyIdShortString()) - } -} - -// TestExternallyRevokableKey attempts to load and parse a key with a third party revocation permission. -func TestExternallyRevocableKey(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex)) - - // The 0xA42704B92866382A key can be revoked by 0xBE3893CB843D0FE70C - // according to this signature that appears within the key: - // :signature packet: algo 1, keyid A42704B92866382A - // version 4, created 1396409682, md5len 0, sigclass 0x1f - // digest algo 2, begin of digest a9 84 - // hashed subpkt 2 len 4 (sig created 2014-04-02) - // hashed subpkt 12 len 22 (revocation key: c=80 a=1 f=CE094AA433F7040BB2DDF0BE3893CB843D0FE70C) - // hashed subpkt 7 len 1 (not revocable) - // subpkt 16 len 8 (issuer key ID A42704B92866382A) - // data: [1024 bits] - - id := uint64(0xA42704B92866382A) - keys := kring.KeysById(id) - if len(keys) != 1 { - t.Errorf("Expected to find key id %X, but got %d matches", id, len(keys)) - } -} - -func TestKeyRevocation(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(revokedKeyHex)) - - // revokedKeyHex contains these keys: - // pub 1024R/9A34F7C0 2014-03-25 [revoked: 2014-03-25] - // sub 1024R/1BA3CD60 2014-03-25 [revoked: 2014-03-25] - ids := []uint64{0xA401D9F09A34F7C0, 0x5CD3BE0A1BA3CD60} - - for _, id := range ids { - keys := kring.KeysById(id) - if len(keys) != 1 { - t.Errorf("Expected KeysById to find revoked key %X, but got %d matches", id, len(keys)) - } - keys = kring.KeysByIdUsage(id, 0) - if len(keys) != 0 { - t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", id, len(keys)) - } - } -} - -func TestSubkeyRevocation(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(revokedSubkeyHex)) - - // revokedSubkeyHex contains these keys: - // pub 1024R/4EF7E4BECCDE97F0 2014-03-25 - // sub 1024R/D63636E2B96AE423 2014-03-25 - // sub 1024D/DBCE4EE19529437F 2014-03-25 - // sub 1024R/677815E371C2FD23 2014-03-25 [revoked: 2014-03-25] - validKeys := []uint64{0x4EF7E4BECCDE97F0, 0xD63636E2B96AE423, 0xDBCE4EE19529437F} - revokedKey := uint64(0x677815E371C2FD23) - - for _, id := range validKeys { - keys := kring.KeysById(id) - if len(keys) != 1 { - t.Errorf("Expected KeysById to find key %X, but got %d matches", id, len(keys)) - } - keys = kring.KeysByIdUsage(id, 0) - if len(keys) != 1 { - t.Errorf("Expected KeysByIdUsage to find key %X, but got %d matches", id, len(keys)) - } - } - - keys := kring.KeysById(revokedKey) - if len(keys) != 1 { - t.Errorf("Expected KeysById to find key %X, but got %d matches", revokedKey, len(keys)) - } - - keys = kring.KeysByIdUsage(revokedKey, 0) - if len(keys) != 0 { - t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", revokedKey, len(keys)) - } -} - -func TestKeyUsage(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex)) - - // subkeyUsageHex contains these keys: - // pub 1024R/2866382A created: 2014-04-01 expires: never usage: SC - // sub 1024R/936C9153 created: 2014-04-01 expires: never usage: E - // sub 1024R/64D5F5BB created: 2014-04-02 expires: never usage: E - // sub 1024D/BC0BA992 created: 2014-04-02 expires: never usage: S - certifiers := []uint64{0xA42704B92866382A} - signers := []uint64{0xA42704B92866382A, 0x42CE2C64BC0BA992} - encrypters := []uint64{0x09C0C7D9936C9153, 0xC104E98664D5F5BB} - - for _, id := range certifiers { - keys := kring.KeysByIdUsage(id, packet.KeyFlagCertify) - if len(keys) == 1 { - if keys[0].PublicKey.KeyId != id { - t.Errorf("Expected to find certifier key id %X, but got %X", id, keys[0].PublicKey.KeyId) - } - } else { - t.Errorf("Expected one match for certifier key id %X, but got %d matches", id, len(keys)) - } - } - - for _, id := range signers { - keys := kring.KeysByIdUsage(id, packet.KeyFlagSign) - if len(keys) == 1 { - if keys[0].PublicKey.KeyId != id { - t.Errorf("Expected to find signing key id %X, but got %X", id, keys[0].PublicKey.KeyId) - } - } else { - t.Errorf("Expected one match for signing key id %X, but got %d matches", id, len(keys)) - } - - // This keyring contains no encryption keys that are also good for signing. - keys = kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications) - if len(keys) != 0 { - t.Errorf("Unexpected match for encryption key id %X", id) - } - } - - for _, id := range encrypters { - keys := kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications) - if len(keys) == 1 { - if keys[0].PublicKey.KeyId != id { - t.Errorf("Expected to find encryption key id %X, but got %X", id, keys[0].PublicKey.KeyId) - } - } else { - t.Errorf("Expected one match for encryption key id %X, but got %d matches", id, len(keys)) - } - - // This keyring contains no encryption keys that are also good for signing. - keys = kring.KeysByIdUsage(id, packet.KeyFlagSign) - if len(keys) != 0 { - t.Errorf("Unexpected match for signing key id %X", id) - } - } -} - -func TestIdVerification(t *testing.T) { - kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) - if err != nil { - t.Fatal(err) - } - if err := kring[1].PrivateKey.Decrypt([]byte("passphrase")); err != nil { - t.Fatal(err) - } - - const identity = "Test Key 1 (RSA)" - if err := kring[0].SignIdentity(identity, kring[1], nil); err != nil { - t.Fatal(err) - } - - ident, ok := kring[0].Identities[identity] - if !ok { - t.Fatal("identity missing from key after signing") - } - - checked := false - for _, sig := range ident.Signatures { - if sig.IssuerKeyId == nil || *sig.IssuerKeyId != kring[1].PrimaryKey.KeyId { - continue - } - - if err := kring[1].PrimaryKey.VerifyUserIdSignature(identity, kring[0].PrimaryKey, sig); err != nil { - t.Fatalf("error verifying new identity signature: %s", err) - } - checked = true - break - } - - if !checked { - t.Fatal("didn't find identity signature in Entity") - } -} - -const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e" -const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" -const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" -const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/compressed_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/compressed_test.go deleted file mode 100644 index cb2d70bd..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/compressed_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/hex" - "io" - "io/ioutil" - "testing" -) - -func TestCompressed(t *testing.T) { - packet, err := Read(readerFromHex(compressedHex)) - if err != nil { - t.Errorf("failed to read Compressed: %s", err) - return - } - - c, ok := packet.(*Compressed) - if !ok { - t.Error("didn't find Compressed packet") - return - } - - contents, err := ioutil.ReadAll(c.Body) - if err != nil && err != io.EOF { - t.Error(err) - return - } - - expected, _ := hex.DecodeString(compressedExpectedHex) - if !bytes.Equal(expected, contents) { - t.Errorf("got:%x want:%x", contents, expected) - } -} - -const compressedHex = "a3013b2d90c4e02b72e25f727e5e496a5e49b11e1700" -const compressedExpectedHex = "cb1062004d14c8fe636f6e74656e74732e0a" diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go deleted file mode 100644 index 0a8dcc6d..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/rsa" - "fmt" - "math/big" - "testing" -) - -func bigFromBase10(s string) *big.Int { - b, ok := new(big.Int).SetString(s, 10) - if !ok { - panic("bigFromBase10 failed") - } - return b -} - -var encryptedKeyPub = rsa.PublicKey{ - E: 65537, - N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"), -} - -var encryptedKeyRSAPriv = &rsa.PrivateKey{ - PublicKey: encryptedKeyPub, - D: bigFromBase10("32355588668219869544751561565313228297765464314098552250409557267371233892496951383426602439009993875125222579159850054973310859166139474359774543943714622292329487391199285040721944491839695981199720170366763547754915493640685849961780092241140181198779299712578774460837139360803883139311171713302987058393"), -} - -var encryptedKeyPriv = &PrivateKey{ - PublicKey: PublicKey{ - PubKeyAlgo: PubKeyAlgoRSA, - }, - PrivateKey: encryptedKeyRSAPriv, -} - -func TestDecryptingEncryptedKey(t *testing.T) { - const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8" - const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b" - - p, err := Read(readerFromHex(encryptedKeyHex)) - if err != nil { - t.Errorf("error from Read: %s", err) - return - } - ek, ok := p.(*EncryptedKey) - if !ok { - t.Errorf("didn't parse an EncryptedKey, got %#v", p) - return - } - - if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA { - t.Errorf("unexpected EncryptedKey contents: %#v", ek) - return - } - - err = ek.Decrypt(encryptedKeyPriv, nil) - if err != nil { - t.Errorf("error from Decrypt: %s", err) - return - } - - if ek.CipherFunc != CipherAES256 { - t.Errorf("unexpected EncryptedKey contents: %#v", ek) - return - } - - keyHex := fmt.Sprintf("%x", ek.Key) - if keyHex != expectedKeyHex { - t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex) - } -} - -func TestEncryptingEncryptedKey(t *testing.T) { - key := []byte{1, 2, 3, 4} - const expectedKeyHex = "01020304" - const keyId = 42 - - pub := &PublicKey{ - PublicKey: &encryptedKeyPub, - KeyId: keyId, - PubKeyAlgo: PubKeyAlgoRSAEncryptOnly, - } - - buf := new(bytes.Buffer) - err := SerializeEncryptedKey(buf, pub, CipherAES128, key, nil) - if err != nil { - t.Errorf("error writing encrypted key packet: %s", err) - } - - p, err := Read(buf) - if err != nil { - t.Errorf("error from Read: %s", err) - return - } - ek, ok := p.(*EncryptedKey) - if !ok { - t.Errorf("didn't parse an EncryptedKey, got %#v", p) - return - } - - if ek.KeyId != keyId || ek.Algo != PubKeyAlgoRSAEncryptOnly { - t.Errorf("unexpected EncryptedKey contents: %#v", ek) - return - } - - err = ek.Decrypt(encryptedKeyPriv, nil) - if err != nil { - t.Errorf("error from Decrypt: %s", err) - return - } - - if ek.CipherFunc != CipherAES128 { - t.Errorf("unexpected EncryptedKey contents: %#v", ek) - return - } - - keyHex := fmt.Sprintf("%x", ek.Key) - if keyHex != expectedKeyHex { - t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go deleted file mode 100644 index 91022c04..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/aes" - "crypto/rand" - "testing" -) - -var commonKey128 = []byte{0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c} - -func testOCFB(t *testing.T, resync OCFBResyncOption) { - block, err := aes.NewCipher(commonKey128) - if err != nil { - t.Error(err) - return - } - - plaintext := []byte("this is the plaintext, which is long enough to span several blocks.") - randData := make([]byte, block.BlockSize()) - rand.Reader.Read(randData) - ocfb, prefix := NewOCFBEncrypter(block, randData, resync) - ciphertext := make([]byte, len(plaintext)) - ocfb.XORKeyStream(ciphertext, plaintext) - - ocfbdec := NewOCFBDecrypter(block, prefix, resync) - if ocfbdec == nil { - t.Errorf("NewOCFBDecrypter failed (resync: %t)", resync) - return - } - plaintextCopy := make([]byte, len(plaintext)) - ocfbdec.XORKeyStream(plaintextCopy, ciphertext) - - if !bytes.Equal(plaintextCopy, plaintext) { - t.Errorf("got: %x, want: %x (resync: %t)", plaintextCopy, plaintext, resync) - } -} - -func TestOCFB(t *testing.T) { - testOCFB(t, OCFBNoResync) - testOCFB(t, OCFBResync) -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/opaque_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/opaque_test.go deleted file mode 100644 index f27bbfe0..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/opaque_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/hex" - "io" - "testing" -) - -// Test packet.Read error handling in OpaquePacket.Parse, -// which attempts to re-read an OpaquePacket as a supported -// Packet type. -func TestOpaqueParseReason(t *testing.T) { - buf, err := hex.DecodeString(UnsupportedKeyHex) - if err != nil { - t.Fatal(err) - } - or := NewOpaqueReader(bytes.NewBuffer(buf)) - count := 0 - badPackets := 0 - var uid *UserId - for { - op, err := or.Next() - if err == io.EOF { - break - } else if err != nil { - t.Errorf("#%d: opaque read error: %v", count, err) - break - } - // try to parse opaque packet - p, err := op.Parse() - switch pkt := p.(type) { - case *UserId: - uid = pkt - case *OpaquePacket: - // If an OpaquePacket can't re-parse, packet.Read - // certainly had its reasons. - if pkt.Reason == nil { - t.Errorf("#%d: opaque packet, no reason", count) - } else { - badPackets++ - } - } - count++ - } - - const expectedBad = 3 - // Test post-conditions, make sure we actually parsed packets as expected. - if badPackets != expectedBad { - t.Errorf("unexpected # unparseable packets: %d (want %d)", badPackets, expectedBad) - } - if uid == nil { - t.Errorf("failed to find expected UID in unsupported keyring") - } else if uid.Id != "Armin M. Warda " { - t.Errorf("unexpected UID: %v", uid.Id) - } -} - -// This key material has public key and signature packet versions modified to -// an unsupported value (1), so that trying to parse the OpaquePacket to -// a typed packet will get an error. It also contains a GnuPG trust packet. -// (Created with: od -An -t x1 pubring.gpg | xargs | sed 's/ //g') -const UnsupportedKeyHex = `988d012e7a18a20000010400d6ac00d92b89c1f4396c243abb9b76d2e9673ad63483291fed88e22b82e255e441c078c6abbbf7d2d195e50b62eeaa915b85b0ec20c225ce2c64c167cacb6e711daf2e45da4a8356a059b8160e3b3628ac0dd8437b31f06d53d6e8ea4214d4a26406a6b63e1001406ef23e0bb3069fac9a99a91f77dfafd5de0f188a5da5e3c9000511b42741726d696e204d2e205761726461203c7761726461406e657068696c696d2e727568722e64653e8900950105102e8936c705d1eb399e58489901013f0e03ff5a0c4f421e34fcfa388129166420c08cd76987bcdec6f01bd0271459a85cc22048820dd4e44ac2c7d23908d540f54facf1b36b0d9c20488781ce9dca856531e76e2e846826e9951338020a03a09b57aa5faa82e9267458bd76105399885ac35af7dc1cbb6aaed7c39e1039f3b5beda2c0e916bd38560509bab81235d1a0ead83b0020000` diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/packet_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/packet_test.go deleted file mode 100644 index e84a3af9..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/packet_test.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/hex" - "fmt" - "github.com/root-gg/plik/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors" - "io" - "io/ioutil" - "testing" -) - -func TestReadFull(t *testing.T) { - var out [4]byte - - b := bytes.NewBufferString("foo") - n, err := readFull(b, out[:3]) - if n != 3 || err != nil { - t.Errorf("full read failed n:%d err:%s", n, err) - } - - b = bytes.NewBufferString("foo") - n, err = readFull(b, out[:4]) - if n != 3 || err != io.ErrUnexpectedEOF { - t.Errorf("partial read failed n:%d err:%s", n, err) - } - - b = bytes.NewBuffer(nil) - n, err = readFull(b, out[:3]) - if n != 0 || err != io.ErrUnexpectedEOF { - t.Errorf("empty read failed n:%d err:%s", n, err) - } -} - -func readerFromHex(s string) io.Reader { - data, err := hex.DecodeString(s) - if err != nil { - panic("readerFromHex: bad input") - } - return bytes.NewBuffer(data) -} - -var readLengthTests = []struct { - hexInput string - length int64 - isPartial bool - err error -}{ - {"", 0, false, io.ErrUnexpectedEOF}, - {"1f", 31, false, nil}, - {"c0", 0, false, io.ErrUnexpectedEOF}, - {"c101", 256 + 1 + 192, false, nil}, - {"e0", 1, true, nil}, - {"e1", 2, true, nil}, - {"e2", 4, true, nil}, - {"ff", 0, false, io.ErrUnexpectedEOF}, - {"ff00", 0, false, io.ErrUnexpectedEOF}, - {"ff0000", 0, false, io.ErrUnexpectedEOF}, - {"ff000000", 0, false, io.ErrUnexpectedEOF}, - {"ff00000000", 0, false, nil}, - {"ff01020304", 16909060, false, nil}, -} - -func TestReadLength(t *testing.T) { - for i, test := range readLengthTests { - length, isPartial, err := readLength(readerFromHex(test.hexInput)) - if test.err != nil { - if err != test.err { - t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err) - } - continue - } - if err != nil { - t.Errorf("%d: unexpected error: %s", i, err) - continue - } - if length != test.length || isPartial != test.isPartial { - t.Errorf("%d: bad result got:(%d,%t) want:(%d,%t)", i, length, isPartial, test.length, test.isPartial) - } - } -} - -var partialLengthReaderTests = []struct { - hexInput string - err error - hexOutput string -}{ - {"e0", io.ErrUnexpectedEOF, ""}, - {"e001", io.ErrUnexpectedEOF, ""}, - {"e0010102", nil, "0102"}, - {"ff00000000", nil, ""}, - {"e10102e1030400", nil, "01020304"}, - {"e101", io.ErrUnexpectedEOF, ""}, -} - -func TestPartialLengthReader(t *testing.T) { - for i, test := range partialLengthReaderTests { - r := &partialLengthReader{readerFromHex(test.hexInput), 0, true} - out, err := ioutil.ReadAll(r) - if test.err != nil { - if err != test.err { - t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err) - } - continue - } - if err != nil { - t.Errorf("%d: unexpected error: %s", i, err) - continue - } - - got := fmt.Sprintf("%x", out) - if got != test.hexOutput { - t.Errorf("%d: got:%s want:%s", i, test.hexOutput, got) - } - } -} - -var readHeaderTests = []struct { - hexInput string - structuralError bool - unexpectedEOF bool - tag int - length int64 - hexOutput string -}{ - {"", false, false, 0, 0, ""}, - {"7f", true, false, 0, 0, ""}, - - // Old format headers - {"80", false, true, 0, 0, ""}, - {"8001", false, true, 0, 1, ""}, - {"800102", false, false, 0, 1, "02"}, - {"81000102", false, false, 0, 1, "02"}, - {"820000000102", false, false, 0, 1, "02"}, - {"860000000102", false, false, 1, 1, "02"}, - {"83010203", false, false, 0, -1, "010203"}, - - // New format headers - {"c0", false, true, 0, 0, ""}, - {"c000", false, false, 0, 0, ""}, - {"c00102", false, false, 0, 1, "02"}, - {"c0020203", false, false, 0, 2, "0203"}, - {"c00202", false, true, 0, 2, ""}, - {"c3020203", false, false, 3, 2, "0203"}, -} - -func TestReadHeader(t *testing.T) { - for i, test := range readHeaderTests { - tag, length, contents, err := readHeader(readerFromHex(test.hexInput)) - if test.structuralError { - if _, ok := err.(errors.StructuralError); ok { - continue - } - t.Errorf("%d: expected StructuralError, got:%s", i, err) - continue - } - if err != nil { - if len(test.hexInput) == 0 && err == io.EOF { - continue - } - if !test.unexpectedEOF || err != io.ErrUnexpectedEOF { - t.Errorf("%d: unexpected error from readHeader: %s", i, err) - } - continue - } - if int(tag) != test.tag || length != test.length { - t.Errorf("%d: got:(%d,%d) want:(%d,%d)", i, int(tag), length, test.tag, test.length) - continue - } - - body, err := ioutil.ReadAll(contents) - if err != nil { - if !test.unexpectedEOF || err != io.ErrUnexpectedEOF { - t.Errorf("%d: unexpected error from contents: %s", i, err) - } - continue - } - if test.unexpectedEOF { - t.Errorf("%d: expected ErrUnexpectedEOF from contents but got no error", i) - continue - } - got := fmt.Sprintf("%x", body) - if got != test.hexOutput { - t.Errorf("%d: got:%s want:%s", i, got, test.hexOutput) - } - } -} - -func TestSerializeHeader(t *testing.T) { - tag := packetTypePublicKey - lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000} - - for _, length := range lengths { - buf := bytes.NewBuffer(nil) - serializeHeader(buf, tag, length) - tag2, length2, _, err := readHeader(buf) - if err != nil { - t.Errorf("length %d, err: %s", length, err) - } - if tag2 != tag { - t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag) - } - if int(length2) != length { - t.Errorf("length %d, length incorrect (got %d)", length, length2) - } - } -} - -func TestPartialLengths(t *testing.T) { - buf := bytes.NewBuffer(nil) - w := new(partialLengthWriter) - w.w = noOpCloser{buf} - - const maxChunkSize = 64 - - var b [maxChunkSize]byte - var n uint8 - for l := 1; l <= maxChunkSize; l++ { - for i := 0; i < l; i++ { - b[i] = n - n++ - } - m, err := w.Write(b[:l]) - if m != l { - t.Errorf("short write got: %d want: %d", m, l) - } - if err != nil { - t.Errorf("error from write: %s", err) - } - } - w.Close() - - want := (maxChunkSize * (maxChunkSize + 1)) / 2 - copyBuf := bytes.NewBuffer(nil) - r := &partialLengthReader{buf, 0, true} - m, err := io.Copy(copyBuf, r) - if m != int64(want) { - t.Errorf("short copy got: %d want: %d", m, want) - } - if err != nil { - t.Errorf("error from copy: %s", err) - } - - copyBytes := copyBuf.Bytes() - for i := 0; i < want; i++ { - if copyBytes[i] != uint8(i) { - t.Errorf("bad pattern in copy at %d", i) - break - } - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/private_key_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/private_key_test.go deleted file mode 100644 index 6a6197ae..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/private_key_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "testing" - "time" -) - -var privateKeyTests = []struct { - privateKeyHex string - creationTime time.Time -}{ - { - privKeyRSAHex, - time.Unix(0x4cc349a8, 0), - }, - { - privKeyElGamalHex, - time.Unix(0x4df9ee1a, 0), - }, -} - -func TestPrivateKeyRead(t *testing.T) { - for i, test := range privateKeyTests { - packet, err := Read(readerFromHex(test.privateKeyHex)) - if err != nil { - t.Errorf("#%d: failed to parse: %s", i, err) - continue - } - - privKey := packet.(*PrivateKey) - - if !privKey.Encrypted { - t.Errorf("#%d: private key isn't encrypted", i) - continue - } - - err = privKey.Decrypt([]byte("wrong password")) - if err == nil { - t.Errorf("#%d: decrypted with incorrect key", i) - continue - } - - err = privKey.Decrypt([]byte("testing")) - if err != nil { - t.Errorf("#%d: failed to decrypt: %s", i, err) - continue - } - - if !privKey.CreationTime.Equal(test.creationTime) || privKey.Encrypted { - t.Errorf("#%d: bad result, got: %#v", i, privKey) - } - } -} - -// Generated with `gpg --export-secret-keys "Test Key 2"` -const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" - -// Generated by `gpg --export-secret-keys` followed by a manual extraction of -// the ElGamal subkey from the packets. -const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_test.go deleted file mode 100644 index 7ad7d918..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_test.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/hex" - "testing" - "time" -) - -var pubKeyTests = []struct { - hexData string - hexFingerprint string - creationTime time.Time - pubKeyAlgo PublicKeyAlgorithm - keyId uint64 - keyIdString string - keyIdShort string -}{ - {rsaPkDataHex, rsaFingerprintHex, time.Unix(0x4d3c5c10, 0), PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"}, - {dsaPkDataHex, dsaFingerprintHex, time.Unix(0x4d432f89, 0), PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"}, - {ecdsaPkDataHex, ecdsaFingerprintHex, time.Unix(0x5071c294, 0), PubKeyAlgoECDSA, 0x43fe956c542ca00b, "43FE956C542CA00B", "542CA00B"}, -} - -func TestPublicKeyRead(t *testing.T) { - for i, test := range pubKeyTests { - packet, err := Read(readerFromHex(test.hexData)) - if err != nil { - t.Errorf("#%d: Read error: %s", i, err) - continue - } - pk, ok := packet.(*PublicKey) - if !ok { - t.Errorf("#%d: failed to parse, got: %#v", i, packet) - continue - } - if pk.PubKeyAlgo != test.pubKeyAlgo { - t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo) - } - if !pk.CreationTime.Equal(test.creationTime) { - t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime) - } - expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint) - if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) { - t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint) - } - if pk.KeyId != test.keyId { - t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) - } - if g, e := pk.KeyIdString(), test.keyIdString; g != e { - t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e) - } - if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e { - t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e) - } - } -} - -func TestPublicKeySerialize(t *testing.T) { - for i, test := range pubKeyTests { - packet, err := Read(readerFromHex(test.hexData)) - if err != nil { - t.Errorf("#%d: Read error: %s", i, err) - continue - } - pk, ok := packet.(*PublicKey) - if !ok { - t.Errorf("#%d: failed to parse, got: %#v", i, packet) - continue - } - serializeBuf := bytes.NewBuffer(nil) - err = pk.Serialize(serializeBuf) - if err != nil { - t.Errorf("#%d: failed to serialize: %s", i, err) - continue - } - - packet, err = Read(serializeBuf) - if err != nil { - t.Errorf("#%d: Read error (from serialized data): %s", i, err) - continue - } - pk, ok = packet.(*PublicKey) - if !ok { - t.Errorf("#%d: failed to parse serialized data, got: %#v", i, packet) - continue - } - } -} - -func TestEcc384Serialize(t *testing.T) { - r := readerFromHex(ecc384PubHex) - var w bytes.Buffer - for i := 0; i < 2; i++ { - // Public key - p, err := Read(r) - if err != nil { - t.Error(err) - } - pubkey := p.(*PublicKey) - if !bytes.Equal(pubkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) { - t.Errorf("Unexpected pubkey OID: %x", pubkey.ec.oid) - } - if !bytes.Equal(pubkey.ec.p.bytes[:5], []byte{0x04, 0xf6, 0xb8, 0xc5, 0xac}) { - t.Errorf("Unexpected pubkey P[:5]: %x", pubkey.ec.p.bytes) - } - if pubkey.KeyId != 0x098033880F54719F { - t.Errorf("Unexpected pubkey ID: %x", pubkey.KeyId) - } - err = pubkey.Serialize(&w) - if err != nil { - t.Error(err) - } - // User ID - p, err = Read(r) - if err != nil { - t.Error(err) - } - uid := p.(*UserId) - if uid.Id != "ec_dsa_dh_384 " { - t.Error("Unexpected UID:", uid.Id) - } - err = uid.Serialize(&w) - if err != nil { - t.Error(err) - } - // User ID Sig - p, err = Read(r) - if err != nil { - t.Error(err) - } - uidSig := p.(*Signature) - err = pubkey.VerifyUserIdSignature(uid.Id, pubkey, uidSig) - if err != nil { - t.Error(err, ": UID") - } - err = uidSig.Serialize(&w) - if err != nil { - t.Error(err) - } - // Subkey - p, err = Read(r) - if err != nil { - t.Error(err) - } - subkey := p.(*PublicKey) - if !bytes.Equal(subkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) { - t.Errorf("Unexpected subkey OID: %x", subkey.ec.oid) - } - if !bytes.Equal(subkey.ec.p.bytes[:5], []byte{0x04, 0x2f, 0xaa, 0x84, 0x02}) { - t.Errorf("Unexpected subkey P[:5]: %x", subkey.ec.p.bytes) - } - if subkey.ecdh.KdfHash != 0x09 { - t.Error("Expected KDF hash function SHA384 (0x09), got", subkey.ecdh.KdfHash) - } - if subkey.ecdh.KdfAlgo != 0x09 { - t.Error("Expected KDF symmetric alg AES256 (0x09), got", subkey.ecdh.KdfAlgo) - } - if subkey.KeyId != 0xAA8B938F9A201946 { - t.Errorf("Unexpected subkey ID: %x", subkey.KeyId) - } - err = subkey.Serialize(&w) - if err != nil { - t.Error(err) - } - // Subkey Sig - p, err = Read(r) - if err != nil { - t.Error(err) - } - subkeySig := p.(*Signature) - err = pubkey.VerifyKeySignature(subkey, subkeySig) - if err != nil { - t.Error(err) - } - err = subkeySig.Serialize(&w) - if err != nil { - t.Error(err) - } - // Now read back what we've written again - r = bytes.NewBuffer(w.Bytes()) - w.Reset() - } -} - -const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" - -const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" - -const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" - -const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" - -const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" - -const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" - -// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key -const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go deleted file mode 100644 index e0640590..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/hex" - "testing" - "time" -) - -var pubKeyV3Test = struct { - hexFingerprint string - creationTime time.Time - pubKeyAlgo PublicKeyAlgorithm - keyId uint64 - keyIdString string - keyIdShort string -}{ - "103BECF5BD1E837C89D19E98487767F7", - time.Unix(779753634, 0), - PubKeyAlgoRSA, - 0xDE0F188A5DA5E3C9, - "DE0F188A5DA5E3C9", - "5DA5E3C9"} - -func TestPublicKeyV3Read(t *testing.T) { - i, test := 0, pubKeyV3Test - packet, err := Read(v3KeyReader(t)) - if err != nil { - t.Fatalf("#%d: Read error: %s", i, err) - } - pk, ok := packet.(*PublicKeyV3) - if !ok { - t.Fatalf("#%d: failed to parse, got: %#v", i, packet) - } - if pk.PubKeyAlgo != test.pubKeyAlgo { - t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo) - } - if !pk.CreationTime.Equal(test.creationTime) { - t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime) - } - expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint) - if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) { - t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint) - } - if pk.KeyId != test.keyId { - t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) - } - if g, e := pk.KeyIdString(), test.keyIdString; g != e { - t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e) - } - if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e { - t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e) - } -} - -func TestPublicKeyV3Serialize(t *testing.T) { - //for i, test := range pubKeyV3Tests { - i := 0 - packet, err := Read(v3KeyReader(t)) - if err != nil { - t.Fatalf("#%d: Read error: %s", i, err) - } - pk, ok := packet.(*PublicKeyV3) - if !ok { - t.Fatalf("#%d: failed to parse, got: %#v", i, packet) - } - var serializeBuf bytes.Buffer - if err = pk.Serialize(&serializeBuf); err != nil { - t.Fatalf("#%d: failed to serialize: %s", i, err) - } - - if packet, err = Read(bytes.NewBuffer(serializeBuf.Bytes())); err != nil { - t.Fatalf("#%d: Read error (from serialized data): %s", i, err) - } - if pk, ok = packet.(*PublicKeyV3); !ok { - t.Fatalf("#%d: failed to parse serialized data, got: %#v", i, packet) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_test.go deleted file mode 100644 index c1bbde8b..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "encoding/hex" - "testing" -) - -func TestSignatureRead(t *testing.T) { - packet, err := Read(readerFromHex(signatureDataHex)) - if err != nil { - t.Error(err) - return - } - sig, ok := packet.(*Signature) - if !ok || sig.SigType != SigTypeBinary || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.SHA1 { - t.Errorf("failed to parse, got: %#v", packet) - } -} - -func TestSignatureReserialize(t *testing.T) { - packet, _ := Read(readerFromHex(signatureDataHex)) - sig := packet.(*Signature) - out := new(bytes.Buffer) - err := sig.Serialize(out) - if err != nil { - t.Errorf("error reserializing: %s", err) - return - } - - expected, _ := hex.DecodeString(signatureDataHex) - if !bytes.Equal(expected, out.Bytes()) { - t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected)) - } -} - -const signatureDataHex = "c2c05c04000102000605024cb45112000a0910ab105c91af38fb158f8d07ff5596ea368c5efe015bed6e78348c0f033c931d5f2ce5db54ce7f2a7e4b4ad64db758d65a7a71773edeab7ba2a9e0908e6a94a1175edd86c1d843279f045b021a6971a72702fcbd650efc393c5474d5b59a15f96d2eaad4c4c426797e0dcca2803ef41c6ff234d403eec38f31d610c344c06f2401c262f0993b2e66cad8a81ebc4322c723e0d4ba09fe917e8777658307ad8329adacba821420741009dfe87f007759f0982275d028a392c6ed983a0d846f890b36148c7358bdb8a516007fac760261ecd06076813831a36d0459075d1befa245ae7f7fb103d92ca759e9498fe60ef8078a39a3beda510deea251ea9f0a7f0df6ef42060f20780360686f3e400e" diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go deleted file mode 100644 index 8e0cc150..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "encoding/hex" - "io" - "io/ioutil" - "testing" - - "github.com/root-gg/plik/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor" -) - -func TestSignatureV3Read(t *testing.T) { - r := v3KeyReader(t) - Read(r) // Skip public key - Read(r) // Skip uid - packet, err := Read(r) // Signature - if err != nil { - t.Error(err) - return - } - sig, ok := packet.(*SignatureV3) - if !ok || sig.SigType != SigTypeGenericCert || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.MD5 { - t.Errorf("failed to parse, got: %#v", packet) - } -} - -func TestSignatureV3Reserialize(t *testing.T) { - r := v3KeyReader(t) - Read(r) // Skip public key - Read(r) // Skip uid - packet, err := Read(r) - if err != nil { - t.Error(err) - return - } - sig := packet.(*SignatureV3) - out := new(bytes.Buffer) - if err = sig.Serialize(out); err != nil { - t.Errorf("error reserializing: %s", err) - return - } - expected, err := ioutil.ReadAll(v3KeyReader(t)) - if err != nil { - t.Error(err) - return - } - expected = expected[4+141+4+39:] // See pgpdump offsets below, this is where the sig starts - if !bytes.Equal(expected, out.Bytes()) { - t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected)) - } -} - -func v3KeyReader(t *testing.T) io.Reader { - armorBlock, err := armor.Decode(bytes.NewBufferString(keySigV3Armor)) - if err != nil { - t.Fatalf("armor Decode failed: %v", err) - } - return armorBlock.Body -} - -// keySigV3Armor is some V3 public key I found in an SKS dump. -// Old: Public Key Packet(tag 6)(141 bytes) -// Ver 4 - new -// Public key creation time - Fri Sep 16 17:13:54 CDT 1994 -// Pub alg - unknown(pub 0) -// Unknown public key(pub 0) -// Old: User ID Packet(tag 13)(39 bytes) -// User ID - Armin M. Warda -// Old: Signature Packet(tag 2)(149 bytes) -// Ver 4 - new -// Sig type - unknown(05) -// Pub alg - ElGamal Encrypt-Only(pub 16) -// Hash alg - unknown(hash 46) -// Hashed Sub: unknown(sub 81, critical)(1988 bytes) -const keySigV3Armor = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: SKS 1.0.10 - -mI0CLnoYogAAAQQA1qwA2SuJwfQ5bCQ6u5t20ulnOtY0gykf7YjiK4LiVeRBwHjGq7v30tGV -5Qti7qqRW4Ww7CDCJc4sZMFnystucR2vLkXaSoNWoFm4Fg47NiisDdhDezHwbVPW6OpCFNSi -ZAamtj4QAUBu8j4LswafrJqZqR9336/V3g8Yil2l48kABRG0J0FybWluIE0uIFdhcmRhIDx3 -YXJkYUBuZXBoaWxpbS5ydWhyLmRlPoiVAgUQLok2xwXR6zmeWEiZAQE/DgP/WgxPQh40/Po4 -gSkWZCDAjNdph7zexvAb0CcUWahcwiBIgg3U5ErCx9I5CNVA9U+s8bNrDZwgSIeBzp3KhWUx -524uhGgm6ZUTOAIKA6CbV6pfqoLpJnRYvXYQU5mIWsNa99wcu2qu18OeEDnztb7aLA6Ra9OF -YFCbq4EjXRoOrYM= -=LPjs ------END PGP PUBLIC KEY BLOCK-----` diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go deleted file mode 100644 index dd983cb3..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/hex" - "io" - "io/ioutil" - "testing" -) - -func TestSymmetricKeyEncrypted(t *testing.T) { - buf := readerFromHex(symmetricallyEncryptedHex) - packet, err := Read(buf) - if err != nil { - t.Errorf("failed to read SymmetricKeyEncrypted: %s", err) - return - } - ske, ok := packet.(*SymmetricKeyEncrypted) - if !ok { - t.Error("didn't find SymmetricKeyEncrypted packet") - return - } - err = ske.Decrypt([]byte("password")) - if err != nil { - t.Error(err) - return - } - - packet, err = Read(buf) - if err != nil { - t.Errorf("failed to read SymmetricallyEncrypted: %s", err) - return - } - se, ok := packet.(*SymmetricallyEncrypted) - if !ok { - t.Error("didn't find SymmetricallyEncrypted packet") - return - } - r, err := se.Decrypt(ske.CipherFunc, ske.Key) - if err != nil { - t.Error(err) - return - } - - contents, err := ioutil.ReadAll(r) - if err != nil && err != io.EOF { - t.Error(err) - return - } - - expectedContents, _ := hex.DecodeString(symmetricallyEncryptedContentsHex) - if !bytes.Equal(expectedContents, contents) { - t.Errorf("bad contents got:%x want:%x", contents, expectedContents) - } -} - -const symmetricallyEncryptedHex = "8c0d04030302371a0b38d884f02060c91cf97c9973b8e58e028e9501708ccfe618fb92afef7fa2d80ddadd93cf" -const symmetricallyEncryptedContentsHex = "cb1062004d14c4df636f6e74656e74732e0a" - -func TestSerializeSymmetricKeyEncrypted(t *testing.T) { - buf := bytes.NewBuffer(nil) - passphrase := []byte("testing") - config := &Config{ - DefaultCipher: CipherAES128, - } - - key, err := SerializeSymmetricKeyEncrypted(buf, passphrase, config) - if err != nil { - t.Errorf("failed to serialize: %s", err) - return - } - - p, err := Read(buf) - if err != nil { - t.Errorf("failed to reparse: %s", err) - return - } - ske, ok := p.(*SymmetricKeyEncrypted) - if !ok { - t.Errorf("parsed a different packet type: %#v", p) - return - } - - if !ske.Encrypted { - t.Errorf("SKE not encrypted but should be") - } - if ske.CipherFunc != config.DefaultCipher { - t.Errorf("SKE cipher function is %d (expected %d)", ske.CipherFunc, config.DefaultCipher) - } - err = ske.Decrypt(passphrase) - if err != nil { - t.Errorf("failed to decrypt reparsed SKE: %s", err) - return - } - if !bytes.Equal(key, ske.Key) { - t.Errorf("keys don't match after Decrpyt: %x (original) vs %x (parsed)", key, ske.Key) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go deleted file mode 100644 index 33f01751..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/sha1" - "encoding/hex" - "github.com/root-gg/plik/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors" - "io" - "io/ioutil" - "testing" -) - -// TestReader wraps a []byte and returns reads of a specific length. -type testReader struct { - data []byte - stride int -} - -func (t *testReader) Read(buf []byte) (n int, err error) { - n = t.stride - if n > len(t.data) { - n = len(t.data) - } - if n > len(buf) { - n = len(buf) - } - copy(buf, t.data) - t.data = t.data[n:] - if len(t.data) == 0 { - err = io.EOF - } - return -} - -func testMDCReader(t *testing.T) { - mdcPlaintext, _ := hex.DecodeString(mdcPlaintextHex) - - for stride := 1; stride < len(mdcPlaintext)/2; stride++ { - r := &testReader{data: mdcPlaintext, stride: stride} - mdcReader := &seMDCReader{in: r, h: sha1.New()} - body, err := ioutil.ReadAll(mdcReader) - if err != nil { - t.Errorf("stride: %d, error: %s", stride, err) - continue - } - if !bytes.Equal(body, mdcPlaintext[:len(mdcPlaintext)-22]) { - t.Errorf("stride: %d: bad contents %x", stride, body) - continue - } - - err = mdcReader.Close() - if err != nil { - t.Errorf("stride: %d, error on Close: %s", stride, err) - } - } - - mdcPlaintext[15] ^= 80 - - r := &testReader{data: mdcPlaintext, stride: 2} - mdcReader := &seMDCReader{in: r, h: sha1.New()} - _, err := ioutil.ReadAll(mdcReader) - if err != nil { - t.Errorf("corruption test, error: %s", err) - return - } - err = mdcReader.Close() - if err == nil { - t.Error("corruption: no error") - } else if _, ok := err.(*errors.SignatureError); !ok { - t.Errorf("corruption: expected SignatureError, got: %s", err) - } -} - -const mdcPlaintextHex = "a302789c3b2d93c4e0eb9aba22283539b3203335af44a134afb800c849cb4c4de10200aff40b45d31432c80cb384299a0655966d6939dfdeed1dddf980" - -func TestSerialize(t *testing.T) { - buf := bytes.NewBuffer(nil) - c := CipherAES128 - key := make([]byte, c.KeySize()) - - w, err := SerializeSymmetricallyEncrypted(buf, c, key, nil) - if err != nil { - t.Errorf("error from SerializeSymmetricallyEncrypted: %s", err) - return - } - - contents := []byte("hello world\n") - - w.Write(contents) - w.Close() - - p, err := Read(buf) - if err != nil { - t.Errorf("error from Read: %s", err) - return - } - - se, ok := p.(*SymmetricallyEncrypted) - if !ok { - t.Errorf("didn't read a *SymmetricallyEncrypted") - return - } - - r, err := se.Decrypt(c, key) - if err != nil { - t.Errorf("error from Decrypt: %s", err) - return - } - - contentsCopy := bytes.NewBuffer(nil) - _, err = io.Copy(contentsCopy, r) - if err != nil { - t.Errorf("error from io.Copy: %s", err) - return - } - if !bytes.Equal(contentsCopy.Bytes(), contents) { - t.Errorf("contents not equal got: %x want: %x", contentsCopy.Bytes(), contents) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go deleted file mode 100644 index 13ca5143..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "encoding/base64" - "image/color" - "image/jpeg" - "testing" -) - -func TestParseUserAttribute(t *testing.T) { - r := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(userAttributePacket)) - for i := 0; i < 2; i++ { - p, err := Read(r) - if err != nil { - t.Fatal(err) - } - uat := p.(*UserAttribute) - imgs := uat.ImageData() - if len(imgs) != 1 { - t.Errorf("Unexpected number of images in user attribute packet: %d", len(imgs)) - } - if len(imgs[0]) != 3395 { - t.Errorf("Unexpected JPEG image size: %d", len(imgs[0])) - } - img, err := jpeg.Decode(bytes.NewBuffer(imgs[0])) - if err != nil { - t.Errorf("Error decoding JPEG image: %v", err) - } - // A pixel in my right eye. - pixel := color.NRGBAModel.Convert(img.At(56, 36)) - ref := color.NRGBA{R: 157, G: 128, B: 124, A: 255} - if pixel != ref { - t.Errorf("Unexpected pixel color: %v", pixel) - } - w := bytes.NewBuffer(nil) - err = uat.Serialize(w) - if err != nil { - t.Errorf("Error writing user attribute: %v", err) - } - r = bytes.NewBuffer(w.Bytes()) - } -} - -const userAttributePacket = ` -0cyWzJQBEAABAQAAAAAAAAAAAAAAAP/Y/+AAEEpGSUYAAQIAAAEAAQAA/9sAQwAFAwQEBAMFBAQE -BQUFBgcMCAcHBwcPCgsJDBEPEhIRDxEQExYcFxMUGhUQERghGBocHR8fHxMXIiQiHiQcHh8e/9sA -QwEFBQUHBgcOCAgOHhQRFB4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4e -Hh4eHh4eHh4e/8AAEQgAZABkAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYH -CAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHw -JDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6 -g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk -5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIB -AgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEX -GBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKT -lJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX2 -9/j5+v/aAAwDAQACEQMRAD8A5uGP06VehQ4pIox04q5EnHSvAep+hIIl4zVuMHGPWmRrUWtalaaN -pU2oXsgSGJSxPr6ClvoitErs0Itqjc7BQOpPAFYmrfEnwjojtHNqaXEynBjtx5hH4jj9a8B8d+Od -W8UXZjWR4LJT+7t0Jwfc+prnIdO1CWZEW2mZ3HyDactXXDB3V5s8evm1namj6r0H4weCLtxG+ova -ueP30RA/MV6not1bX0Ed1ZzxzwyDKvGwZSPqK+Ff+ES8R8t/ZV2oHUmM10Hgbxp4m8BatEfNnWBH -/eWshOxx9Kmpg4te49RUM1kn+8Wh9zQ4P1FaMC7l465rjPh14y0fxnoseoaXOpfaPOgJ+eI98j09 -67W19M15bi4uzPSqTU480WXkjZkAyAR61DPE6OCSOalWRRgZxjvTb598sfU4FBwx5uY4T4feIm8P -TeJbAgc65NIM+8cX+FFeLfF3Vr3SfiNrMFrMypJMJcDPUqP8KK+kpVFyLU+ar037SXqX4hxVpMY7 -1UhPpVlT2rybKx9smWYz3NeH/EDVLzxt40j8O6bITaQybPlbKkjq39K9O8fasdH8IahfKxWQRFIy -Ou9uB/OuE/Z/0y3j1d9TuyoZCMs5xjuea1pLli5nn46q240l13PcfhN8EvDNtpcEl/CklyVBLuMk -mvU/Dfwo0BL/AO13FjEDD/qyV7Vn+CvGPg8zRpJrVm8ikLtEg6+1ew2dxZ3EQaJgysuQPasH7eXW -1zzsbVhT92kk/PsYieEND+zlPs6c/wCyAPyryH4wfCPRtW0u6j+xRLOxLxSoADkDpXY+MPjJ4c0S -9k082d3O8ZKkxw5XI96ytK+IGk+IpFjRpod+Qq3C7QT6A1E6NenaXbqRg6rlLlqS0fRnxjpd1r/w -w8afa7GWRPKbZLGeBKmeVNfZngLxNaeKfDdprVjxHcLlkJ5Vh1H5185/tDad9h8XOsqAw3Cb0cjq -CfX61P8AsveKf7L8T3fhe5nxa3g324YniQdh9R/KuivTdSmp9TXB1/Z1nRlsfU249QBx1pWfcwI7 -Cq6u2Ovamb9rYz16V5x7Psz5q/aJhZfibcupIElvE3H+7j+lFbXx9szP45jlUfeso8/99OKK9elL -3EeNVopzZVharCtxVRGGMk02S5JyFOB69zWTieypnL/GksfB+0cr9oQt69awPhPpD69Y3Ky3DWth -CWluGU4LAdq3vibGs/g68BJygVxjrwRW5+ztoRv/AAs8EeCZnO/J/hzz/Kumi4wp3kePjlOdZKPY -ml8Mvo6WM9ppi7J0EkQYMzkb1X0wW+bJHGACa+ivg14huZPCkjXUO6SImIYOQAP6UQ2sGneHmiWF -CYoSAAuM8etXfhBpMr+EZ3SSNRcMx6ZxWdes6ytBGSwkMNFuo7pnP614Ut9Zn1C4uLySKcwObGFA -Qnm4+XcR71h+CfDHiKCQWuv2YWFtw+bBZQD8rcE8n2Ney+GbGGQSM6I7xvtI681rXdp8hKRRp6t3 -FYPE1VDlsY1nQjWdl+J8w/tOeDZZ/AMd/EGefTHyxxyYjwfyODXg3waRh8UtEcFh+8Jb8FNfZPxh -Ak8J6nbPIsiyW7LnseK+Ofh99ptPHFnf2lu0y2twGcKuSEPB/Q1WHk50miq1o14TXU+xop+On61H -NMC6Nis1LgsAcUTSt1APFcXJZn0EqmhyvxA037friTYziBV6f7Tf40Vr3k4aXLx5OMZIzRXZB2ik -efJXbPHJJcnaD9aN2R1qoGO8/WkuLlIV+YjdjpXSonQ5lTxfiTwzqCnkeQxx9BWx+zPrQsrBFYja -zEfrXL6lfie3khcjY6lSPUGud+G3iA6FrY0uQ/KJsA9gCa0jSvFpnBi6tpKSPu++nsIfDFxeXciR -qIicscY4rxTwB8RUkn1axsPEf2LTYx85kTGzqCUP8VcJ47+JOs+I0Hhq1njjt/ufIeSvq1VtE+Gs -eoaUbSHUrkHdu3WtuX5Ix81XRh7OL5jirVpV5Whdn0F8C/iX4auVn0i612T7bASoe8wjTAd89K9g -vtSt5NMa4t5lkRhgOh3Dn6V8aaz8KZrIR3OlQ6r56LySmSxxz06Vo/CHx34h0rxBP4XvJ5AjK2RP -nEbAEj6ZxjPrWM6fMmoswqJxqJ1VZnqHxn1NLPwveqWHmNC2BnnNcD8DfDkGi+CH1m+ijN1qMzNA -4GSIiAMf+hVxPxU8Tapc3c0F9MGCn5GU5BX0Pau3+HmrT3XgXSIJCBHDGdgAx1NYSpezha52Yauq -1dya2Wh2onAIwTj1p0lxxWWLkhRyCKWa5O3ORXOos9KVQluZm83j0oqi84JyWH50Vdmc7ep43d3I -t1Z2Iz2FYdxeSTsxyRnvTdVuDNcNluM9KrKcg817NOnZGNbEXdkNckjrXGeIIprPxFFdRHAlIwem -COtdmxrG8Q2cd/ZNExw45RvQ1bVjim+dWNzw7eaTD4mN3dndCQCo6hmI5zXpj/Ea/wBHjkh0kwRW -xXEfl4yTxXzXZalJDL9nuWKMmRnHcV2Hh3WreCyYXW2SWQhd5P3F6n+lS43d2cTm6d7Ox9EWPxH1 -ODQxPqWpCaSU/ukUc4z3/WvKW8UhviAdaMewYZG98gj9c1ymoa8LyWOJHwkTDaVPb0qpr+q2m6Nb -cfvNo349az9mou9iZVXNWbub3jm98/Vza2ReV7lsJg/e3dsV654UR9N0K0sZP9ZDGFbHr3rzL4P+ -H7rXfEEWr3I3W1qf3IYdW9fwqDxf4k8UeH/G95p08kscHmk25dPlZT0we9YTj7SXKjpw1aNG8mj3 -FLv5ccU959ycnmvKPDnxB82YQarGsZPAlTp+IrvIr1ZIgySKwIyCOhFYTpyg9T0qWIhVV4svzPvf -IdhgY4orPachj81FRdmtzxqdiZmJ9aQEgdqZcPtmbJ71DJcAZ5r20kkeXJtsfPIQDwPzrG1a+S3i -LyHAHvmp7y7HOD1rlNdm+1T7Acovf3o+J2RMpezjzMvrob67pX9o2ShZlYgg/wAWKxZLLWLZ/Ke3 -mVh14yK9M+BMC3dre2ko3LHKCB7EV7EngeGQJdQ7HyBkMKS0djgq1W3c+XtK03U522RwzsTwNiEk -ntXoHgf4calql9El/G8UZbLfLyfr7V9FeGvh+s+0Lbxxcglu2K1NW1nwN4Gk/wBLuI57tV5jjwzE -/QVNS+0dWYRqNvXRFv4eeCodKsY1ggVIY1G3K4z714h+1Jqul3GpwaXYeXJLbzgyyrg4b+6D+HNb -vjz436zq9m+naHF/ZdkeGfOZXH17V4Vqt2b29K+ZuOc5bnce5zWdPBShL2lTfojSeJhy+zp/NjVz -1Bwa6DSfFGq6fbJFDKrov8DjPFcu97ZxsUe4jVhwVJ5Bpp1mwQiLewJPXacVq6fNpYyjOUXdHoKf -EG8VQHsInbuVcgflRXnt5fIs2FYHgcgUVi8LG+xusdW/mN7U2KgEVkTzPt60UVfQ9eHxGHrV1MGi -iD4V25x1qvdgLAMd6KK0pbHm4x++dp8FtUubLxJ5EIjMc+A4Za+qfD8pe1JZVOBmiinW3RyRPMfi -R8QPE638+k2l6LK0Hylbddhb6nOa80mlkcmWR2kcnlnOSaKK7qCXKcNdu5narcSrAoBxvODWJIga -VckjDdqKKwq/EaQ0gUdbjQ6mr7QGBUcd6tPBC6gtGpOOuKKKie5qn7qIpEXd0HSiiimSf//Z` diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userid_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userid_test.go deleted file mode 100644 index 29681938..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userid_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "testing" -) - -var userIdTests = []struct { - id string - name, comment, email string -}{ - {"", "", "", ""}, - {"John Smith", "John Smith", "", ""}, - {"John Smith ()", "John Smith", "", ""}, - {"John Smith () <>", "John Smith", "", ""}, - {"(comment", "", "comment", ""}, - {"(comment)", "", "comment", ""}, - {" sdfk", "", "", "email"}, - {" John Smith ( Comment ) asdkflj < email > lksdfj", "John Smith", "Comment", "email"}, - {" John Smith < email > lksdfj", "John Smith", "", "email"}, - {"("}, - {"foo", "bar", "", "foo (bar)"}, - {"foo", "", "baz", "foo "}, - {"", "bar", "baz", "(bar) "}, - {"foo", "bar", "baz", "foo (bar) "}, -} - -func TestNewUserId(t *testing.T) { - for i, test := range newUserIdTests { - uid := NewUserId(test.name, test.comment, test.email) - if uid == nil { - t.Errorf("#%d: returned nil", i) - continue - } - if uid.Id != test.id { - t.Errorf("#%d: got '%s', want '%s'", i, uid.Id, test.id) - } - } -} - -var invalidNewUserIdTests = []struct { - name, comment, email string -}{ - {"foo(", "", ""}, - {"foo<", "", ""}, - {"", "bar)", ""}, - {"", "bar<", ""}, - {"", "", "baz>"}, - {"", "", "baz)"}, - {"", "", "baz\x00"}, -} - -func TestNewUserIdWithInvalidInput(t *testing.T) { - for i, test := range invalidNewUserIdTests { - if uid := NewUserId(test.name, test.comment, test.email); uid != nil { - t.Errorf("#%d: returned non-nil value: %#v", i, uid) - } - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/read_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/read_test.go deleted file mode 100644 index 93ea278d..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/read_test.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "bytes" - _ "crypto/sha512" - "encoding/hex" - "github.com/root-gg/plik/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/errors" - "io" - "io/ioutil" - "strings" - "testing" -) - -func readerFromHex(s string) io.Reader { - data, err := hex.DecodeString(s) - if err != nil { - panic("readerFromHex: bad input") - } - return bytes.NewBuffer(data) -} - -func TestReadKeyRing(t *testing.T) { - kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - if err != nil { - t.Error(err) - return - } - if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B { - t.Errorf("bad keyring: %#v", kring) - } -} - -func TestRereadKeyRing(t *testing.T) { - kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - if err != nil { - t.Errorf("error in initial parse: %s", err) - return - } - out := new(bytes.Buffer) - err = kring[0].Serialize(out) - if err != nil { - t.Errorf("error in serialization: %s", err) - return - } - kring, err = ReadKeyRing(out) - if err != nil { - t.Errorf("error in second parse: %s", err) - return - } - - if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB { - t.Errorf("bad keyring: %#v", kring) - } -} - -func TestReadPrivateKeyRing(t *testing.T) { - kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) - if err != nil { - t.Error(err) - return - } - if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B || kring[0].PrimaryKey == nil { - t.Errorf("bad keyring: %#v", kring) - } -} - -func TestReadDSAKey(t *testing.T) { - kring, err := ReadKeyRing(readerFromHex(dsaTestKeyHex)) - if err != nil { - t.Error(err) - return - } - if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x0CCC0360 { - t.Errorf("bad parse: %#v", kring) - } -} - -func TestDSAHashTruncatation(t *testing.T) { - // dsaKeyWithSHA512 was generated with GnuPG and --cert-digest-algo - // SHA512 in order to require DSA hash truncation to verify correctly. - _, err := ReadKeyRing(readerFromHex(dsaKeyWithSHA512)) - if err != nil { - t.Error(err) - } -} - -func TestGetKeyById(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - - keys := kring.KeysById(0xa34d7e18c20c31bb) - if len(keys) != 1 || keys[0].Entity != kring[0] { - t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys) - } - - keys = kring.KeysById(0xfd94408d4543314f) - if len(keys) != 1 || keys[0].Entity != kring[0] { - t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys) - } -} - -func checkSignedMessage(t *testing.T, signedHex, expected string) { - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - - md, err := ReadMessage(readerFromHex(signedHex), kring, nil, nil) - if err != nil { - t.Error(err) - return - } - - if !md.IsSigned || md.SignedByKeyId != 0xa34d7e18c20c31bb || md.SignedBy == nil || md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) != 0 || md.IsSymmetricallyEncrypted { - t.Errorf("bad MessageDetails: %#v", md) - } - - contents, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - t.Errorf("error reading UnverifiedBody: %s", err) - } - if string(contents) != expected { - t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected) - } - if md.SignatureError != nil || md.Signature == nil { - t.Errorf("failed to validate: %s", md.SignatureError) - } -} - -func TestSignedMessage(t *testing.T) { - checkSignedMessage(t, signedMessageHex, signedInput) -} - -func TestTextSignedMessage(t *testing.T) { - checkSignedMessage(t, signedTextMessageHex, signedTextInput) -} - -var signedEncryptedMessageTests = []struct { - keyRingHex string - messageHex string - signedByKeyId uint64 - encryptedToKeyId uint64 -}{ - { - testKeys1And2PrivateHex, - signedEncryptedMessageHex, - 0xa34d7e18c20c31bb, - 0x2a67d68660df41c7, - }, - { - dsaElGamalTestKeysHex, - signedEncryptedMessage2Hex, - 0x33af447ccd759b09, - 0xcf6a7abcd43e3673, - }, -} - -func TestSignedEncryptedMessage(t *testing.T) { - for i, test := range signedEncryptedMessageTests { - expected := "Signed and encrypted message\n" - kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex)) - prompt := func(keys []Key, symmetric bool) ([]byte, error) { - if symmetric { - t.Errorf("prompt: message was marked as symmetrically encrypted") - return nil, errors.ErrKeyIncorrect - } - - if len(keys) == 0 { - t.Error("prompt: no keys requested") - return nil, errors.ErrKeyIncorrect - } - - err := keys[0].PrivateKey.Decrypt([]byte("passphrase")) - if err != nil { - t.Errorf("prompt: error decrypting key: %s", err) - return nil, errors.ErrKeyIncorrect - } - - return nil, nil - } - - md, err := ReadMessage(readerFromHex(test.messageHex), kring, prompt, nil) - if err != nil { - t.Errorf("#%d: error reading message: %s", i, err) - return - } - - if !md.IsSigned || md.SignedByKeyId != test.signedByKeyId || md.SignedBy == nil || !md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) == 0 || md.EncryptedToKeyIds[0] != test.encryptedToKeyId { - t.Errorf("#%d: bad MessageDetails: %#v", i, md) - } - - contents, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - t.Errorf("#%d: error reading UnverifiedBody: %s", i, err) - } - if string(contents) != expected { - t.Errorf("#%d: bad UnverifiedBody got:%s want:%s", i, string(contents), expected) - } - - if md.SignatureError != nil || md.Signature == nil { - t.Errorf("#%d: failed to validate: %s", i, md.SignatureError) - } - } -} - -func TestUnspecifiedRecipient(t *testing.T) { - expected := "Recipient unspecified\n" - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) - - md, err := ReadMessage(readerFromHex(recipientUnspecifiedHex), kring, nil, nil) - if err != nil { - t.Errorf("error reading message: %s", err) - return - } - - contents, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - t.Errorf("error reading UnverifiedBody: %s", err) - } - if string(contents) != expected { - t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected) - } -} - -func TestSymmetricallyEncrypted(t *testing.T) { - expected := "Symmetrically encrypted.\n" - - prompt := func(keys []Key, symmetric bool) ([]byte, error) { - if len(keys) != 0 { - t.Errorf("prompt: len(keys) = %d (want 0)", len(keys)) - } - - if !symmetric { - t.Errorf("symmetric is not set") - } - - return []byte("password"), nil - } - - md, err := ReadMessage(readerFromHex(symmetricallyEncryptedCompressedHex), nil, prompt, nil) - if err != nil { - t.Errorf("ReadMessage: %s", err) - return - } - - contents, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - t.Errorf("ReadAll: %s", err) - } - - expectedCreationTime := uint32(1295992998) - if md.LiteralData.Time != expectedCreationTime { - t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreationTime) - } - - if string(contents) != expected { - t.Errorf("contents got: %s want: %s", string(contents), expected) - } -} - -func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sigInput, tag string, expectedSignerKeyId uint64) { - signed := bytes.NewBufferString(sigInput) - signer, err := CheckDetachedSignature(kring, signed, signature) - if err != nil { - t.Errorf("%s: signature error: %s", tag, err) - return - } - if signer == nil { - t.Errorf("%s: signer is nil", tag) - return - } - if signer.PrimaryKey.KeyId != expectedSignerKeyId { - t.Errorf("%s: wrong signer got:%x want:%x", tag, signer.PrimaryKey.KeyId, expectedSignerKeyId) - } -} - -func TestDetachedSignature(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - testDetachedSignature(t, kring, readerFromHex(detachedSignatureHex), signedInput, "binary", testKey1KeyId) - testDetachedSignature(t, kring, readerFromHex(detachedSignatureTextHex), signedInput, "text", testKey1KeyId) - testDetachedSignature(t, kring, readerFromHex(detachedSignatureV3TextHex), signedInput, "v3", testKey1KeyId) - - incorrectSignedInput := signedInput + "X" - _, err := CheckDetachedSignature(kring, bytes.NewBufferString(incorrectSignedInput), readerFromHex(detachedSignatureHex)) - if err == nil { - t.Fatal("CheckDetachedSignature returned without error for bad signature") - } - if err == errors.ErrUnknownIssuer { - t.Fatal("CheckDetachedSignature returned ErrUnknownIssuer when the signer was known, but the signature invalid") - } -} - -func TestDetachedSignatureDSA(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex)) - testDetachedSignature(t, kring, readerFromHex(detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) -} - -func testHashFunctionError(t *testing.T, signatureHex string) { - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - _, err := CheckDetachedSignature(kring, nil, readerFromHex(signatureHex)) - if err == nil { - t.Fatal("Packet with bad hash type was correctly parsed") - } - unsupported, ok := err.(errors.UnsupportedError) - if !ok { - t.Fatalf("Unexpected class of error: %s", err) - } - if !strings.Contains(string(unsupported), "hash ") { - t.Fatalf("Unexpected error: %s", err) - } -} - -func TestUnknownHashFunction(t *testing.T) { - // unknownHashFunctionHex contains a signature packet with hash - // function type 153 (which isn't a real hash function id). - testHashFunctionError(t, unknownHashFunctionHex) -} - -func TestMissingHashFunction(t *testing.T) { - // missingHashFunctionHex contains a signature packet that uses - // RIPEMD160, which isn't compiled in. - testHashFunctionError(t, missingHashFunctionHex) -} - -func TestReadingArmoredPrivateKey(t *testing.T) { - el, err := ReadArmoredKeyRing(bytes.NewBufferString(armoredPrivateKeyBlock)) - if err != nil { - t.Error(err) - } - if len(el) != 1 { - t.Errorf("got %d entities, wanted 1\n", len(el)) - } -} - -func TestReadingArmoredPublicKey(t *testing.T) { - el, err := ReadArmoredKeyRing(bytes.NewBufferString(e2ePublicKey)) - if err != nil { - t.Error(err) - } - if len(el) != 1 { - t.Errorf("didn't get a valid entity") - } -} - -func TestNoArmoredData(t *testing.T) { - _, err := ReadArmoredKeyRing(bytes.NewBufferString("foo")) - if _, ok := err.(errors.InvalidArgumentError); !ok { - t.Errorf("error was not an InvalidArgumentError: %s", err) - } -} - -const testKey1KeyId = 0xA34D7E18C20C31BB -const testKey3KeyId = 0x338934250CCC0360 - -const signedInput = "Signed message\nline 2\nline 3\n" -const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" - -const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" - -const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" - -const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" - -const detachedSignatureV3TextHex = "8900950305005255c25ca34d7e18c20c31bb0102bb3f04009f6589ef8a028d6e54f6eaf25432e590d31c3a41f4710897585e10c31e5e332c7f9f409af8512adceaff24d0da1474ab07aa7bce4f674610b010fccc5b579ae5eb00a127f272fb799f988ab8e4574c141da6dbfecfef7e6b2c478d9a3d2551ba741f260ee22bec762812f0053e05380bfdd55ad0f22d8cdf71b233fe51ae8a24" - -const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" - -const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" - -const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" - -const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" - -const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" - -const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" - -const signedEncryptedMessageHex = "848c032a67d68660df41c70103ff5789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8d2c03b018bd210b1d3791e1aba74b0f1034e122ab72e760492c192383cf5e20b5628bd043272d63df9b923f147eb6091cd897553204832aba48fec54aa447547bb16305a1024713b90e77fd0065f1918271947549205af3c74891af22ee0b56cd29bfec6d6e351901cd4ab3ece7c486f1e32a792d4e474aed98ee84b3f591c7dff37b64e0ecd68fd036d517e412dcadf85840ce184ad7921ad446c4ee28db80447aea1ca8d4f574db4d4e37688158ddd19e14ee2eab4873d46947d65d14a23e788d912cf9a19624ca7352469b72a83866b7c23cb5ace3deab3c7018061b0ba0f39ed2befe27163e5083cf9b8271e3e3d52cc7ad6e2a3bd81d4c3d7022f8d" - -const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" - -const symmetricallyEncryptedCompressedHex = "8c0d04030302eb4a03808145d0d260c92f714339e13de5a79881216431925bf67ee2898ea61815f07894cd0703c50d0a76ef64d482196f47a8bc729af9b80bb6" - -const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" - -const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" - -const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1.4.10 (GNU/Linux) - -lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp -idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn -vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB -AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X -0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL -IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk -VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn -gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 -TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx -q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz -dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA -CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 -ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ -eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid -AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV -bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK -/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA -A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX -TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc -lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 -rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN -oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 -QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU -nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC -AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp -BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad -AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL -VrM0m72/jnpKo04= -=zNCn ------END PGP PRIVATE KEY BLOCK-----` - -const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Charset: UTF-8 - -xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 -sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk -Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ -AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD -24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX -+WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 -B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX -fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA -FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 -ex7En5r7rHR5xwX82Msc+Rq9dSyO -=7MrZ ------END PGP PUBLIC KEY BLOCK-----` - -const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` - -const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` - -const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go deleted file mode 100644 index 183d2605..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2k - -import ( - "bytes" - "crypto" - _ "crypto/md5" - "crypto/rand" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/hex" - "testing" - - _ "golang.org/x/crypto/ripemd160" -) - -var saltedTests = []struct { - in, out string -}{ - {"hello", "10295ac1"}, - {"world", "ac587a5e"}, - {"foo", "4dda8077"}, - {"bar", "bd8aac6b9ea9cae04eae6a91c6133b58b5d9a61c14f355516ed9370456"}, - {"x", "f1d3f289"}, - {"xxxxxxxxxxxxxxxxxxxxxxx", "e00d7b45"}, -} - -func TestSalted(t *testing.T) { - h := sha1.New() - salt := [4]byte{1, 2, 3, 4} - - for i, test := range saltedTests { - expected, _ := hex.DecodeString(test.out) - out := make([]byte, len(expected)) - Salted(out, h, []byte(test.in), salt[:]) - if !bytes.Equal(expected, out) { - t.Errorf("#%d, got: %x want: %x", i, out, expected) - } - } -} - -var iteratedTests = []struct { - in, out string -}{ - {"hello", "83126105"}, - {"world", "6fa317f9"}, - {"foo", "8fbc35b9"}, - {"bar", "2af5a99b54f093789fd657f19bd245af7604d0f6ae06f66602a46a08ae"}, - {"x", "5a684dfe"}, - {"xxxxxxxxxxxxxxxxxxxxxxx", "18955174"}, -} - -func TestIterated(t *testing.T) { - h := sha1.New() - salt := [4]byte{4, 3, 2, 1} - - for i, test := range iteratedTests { - expected, _ := hex.DecodeString(test.out) - out := make([]byte, len(expected)) - Iterated(out, h, []byte(test.in), salt[:], 31) - if !bytes.Equal(expected, out) { - t.Errorf("#%d, got: %x want: %x", i, out, expected) - } - } -} - -var parseTests = []struct { - spec, in, out string -}{ - /* Simple with SHA1 */ - {"0002", "hello", "aaf4c61d"}, - /* Salted with SHA1 */ - {"01020102030405060708", "hello", "f4f7d67e"}, - /* Iterated with SHA1 */ - {"03020102030405060708f1", "hello", "f2a57b7c"}, -} - -func TestParse(t *testing.T) { - for i, test := range parseTests { - spec, _ := hex.DecodeString(test.spec) - buf := bytes.NewBuffer(spec) - f, err := Parse(buf) - if err != nil { - t.Errorf("%d: Parse returned error: %s", i, err) - continue - } - - expected, _ := hex.DecodeString(test.out) - out := make([]byte, len(expected)) - f(out, []byte(test.in)) - if !bytes.Equal(out, expected) { - t.Errorf("%d: output got: %x want: %x", i, out, expected) - } - if testing.Short() { - break - } - } -} - -func TestSerialize(t *testing.T) { - hashes := []crypto.Hash{crypto.MD5, crypto.SHA1, crypto.RIPEMD160, - crypto.SHA256, crypto.SHA384, crypto.SHA512, crypto.SHA224} - testCounts := []int{-1, 0, 1024, 65536, 4063232, 65011712} - for _, h := range hashes { - for _, c := range testCounts { - testSerializeConfig(t, &Config{Hash: h, S2KCount: c}) - } - } -} - -func testSerializeConfig(t *testing.T, c *Config) { - t.Logf("Running testSerializeConfig() with config: %+v", c) - - buf := bytes.NewBuffer(nil) - key := make([]byte, 16) - passphrase := []byte("testing") - err := Serialize(buf, key, rand.Reader, passphrase, c) - if err != nil { - t.Errorf("failed to serialize: %s", err) - return - } - - f, err := Parse(buf) - if err != nil { - t.Errorf("failed to reparse: %s", err) - return - } - key2 := make([]byte, len(key)) - f(key2, passphrase) - if !bytes.Equal(key2, key) { - t.Errorf("keys don't match: %x (serialied) vs %x (parsed)", key, key2) - } -} diff --git a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/write_test.go b/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/write_test.go deleted file mode 100644 index 9f8c358b..00000000 --- a/client/Godeps/_workspace/src/golang.org/x/crypto/openpgp/write_test.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "bytes" - "io" - "io/ioutil" - "testing" - "time" -) - -func TestSignDetached(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) - out := bytes.NewBuffer(nil) - message := bytes.NewBufferString(signedInput) - err := DetachSign(out, kring[0], message, nil) - if err != nil { - t.Error(err) - } - - testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId) -} - -func TestSignTextDetached(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) - out := bytes.NewBuffer(nil) - message := bytes.NewBufferString(signedInput) - err := DetachSignText(out, kring[0], message, nil) - if err != nil { - t.Error(err) - } - - testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId) -} - -func TestSignDetachedDSA(t *testing.T) { - kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyPrivateHex)) - out := bytes.NewBuffer(nil) - message := bytes.NewBufferString(signedInput) - err := DetachSign(out, kring[0], message, nil) - if err != nil { - t.Error(err) - } - - testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId) -} - -func TestNewEntity(t *testing.T) { - if testing.Short() { - return - } - - e, err := NewEntity("Test User", "test", "test@example.com", nil) - if err != nil { - t.Errorf("failed to create entity: %s", err) - return - } - - w := bytes.NewBuffer(nil) - if err := e.SerializePrivate(w, nil); err != nil { - t.Errorf("failed to serialize entity: %s", err) - return - } - serialized := w.Bytes() - - el, err := ReadKeyRing(w) - if err != nil { - t.Errorf("failed to reparse entity: %s", err) - return - } - - if len(el) != 1 { - t.Errorf("wrong number of entities found, got %d, want 1", len(el)) - } - - w = bytes.NewBuffer(nil) - if err := e.SerializePrivate(w, nil); err != nil { - t.Errorf("failed to serialize entity second time: %s", err) - return - } - - if !bytes.Equal(w.Bytes(), serialized) { - t.Errorf("results differed") - } -} - -func TestSymmetricEncryption(t *testing.T) { - buf := new(bytes.Buffer) - plaintext, err := SymmetricallyEncrypt(buf, []byte("testing"), nil, nil) - if err != nil { - t.Errorf("error writing headers: %s", err) - return - } - message := []byte("hello world\n") - _, err = plaintext.Write(message) - if err != nil { - t.Errorf("error writing to plaintext writer: %s", err) - } - err = plaintext.Close() - if err != nil { - t.Errorf("error closing plaintext writer: %s", err) - } - - md, err := ReadMessage(buf, nil, func(keys []Key, symmetric bool) ([]byte, error) { - return []byte("testing"), nil - }, nil) - if err != nil { - t.Errorf("error rereading message: %s", err) - } - messageBuf := bytes.NewBuffer(nil) - _, err = io.Copy(messageBuf, md.UnverifiedBody) - if err != nil { - t.Errorf("error rereading message: %s", err) - } - if !bytes.Equal(message, messageBuf.Bytes()) { - t.Errorf("recovered message incorrect got '%s', want '%s'", messageBuf.Bytes(), message) - } -} - -var testEncryptionTests = []struct { - keyRingHex string - isSigned bool -}{ - { - testKeys1And2PrivateHex, - false, - }, - { - testKeys1And2PrivateHex, - true, - }, - { - dsaElGamalTestKeysHex, - false, - }, - { - dsaElGamalTestKeysHex, - true, - }, -} - -func TestEncryption(t *testing.T) { - for i, test := range testEncryptionTests { - kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex)) - - passphrase := []byte("passphrase") - for _, entity := range kring { - if entity.PrivateKey != nil && entity.PrivateKey.Encrypted { - err := entity.PrivateKey.Decrypt(passphrase) - if err != nil { - t.Errorf("#%d: failed to decrypt key", i) - } - } - for _, subkey := range entity.Subkeys { - if subkey.PrivateKey != nil && subkey.PrivateKey.Encrypted { - err := subkey.PrivateKey.Decrypt(passphrase) - if err != nil { - t.Errorf("#%d: failed to decrypt subkey", i) - } - } - } - } - - var signed *Entity - if test.isSigned { - signed = kring[0] - } - - buf := new(bytes.Buffer) - w, err := Encrypt(buf, kring[:1], signed, nil /* no hints */, nil) - if err != nil { - t.Errorf("#%d: error in Encrypt: %s", i, err) - continue - } - - const message = "testing" - _, err = w.Write([]byte(message)) - if err != nil { - t.Errorf("#%d: error writing plaintext: %s", i, err) - continue - } - err = w.Close() - if err != nil { - t.Errorf("#%d: error closing WriteCloser: %s", i, err) - continue - } - - md, err := ReadMessage(buf, kring, nil /* no prompt */, nil) - if err != nil { - t.Errorf("#%d: error reading message: %s", i, err) - continue - } - - testTime, _ := time.Parse("2006-01-02", "2013-07-01") - if test.isSigned { - signKey, _ := kring[0].signingKey(testTime) - expectedKeyId := signKey.PublicKey.KeyId - if md.SignedByKeyId != expectedKeyId { - t.Errorf("#%d: message signed by wrong key id, got: %d, want: %d", i, *md.SignedBy, expectedKeyId) - } - if md.SignedBy == nil { - t.Errorf("#%d: failed to find the signing Entity", i) - } - } - - plaintext, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - t.Errorf("#%d: error reading encrypted contents: %s", i, err) - continue - } - - encryptKey, _ := kring[0].encryptionKey(testTime) - expectedKeyId := encryptKey.PublicKey.KeyId - if len(md.EncryptedToKeyIds) != 1 || md.EncryptedToKeyIds[0] != expectedKeyId { - t.Errorf("#%d: expected message to be encrypted to %v, but got %#v", i, expectedKeyId, md.EncryptedToKeyIds) - } - - if string(plaintext) != message { - t.Errorf("#%d: got: %s, want: %s", i, string(plaintext), message) - } - - if test.isSigned { - if md.SignatureError != nil { - t.Errorf("#%d: signature error: %s", i, md.SignatureError) - } - if md.Signature == nil { - t.Error("signature missing") - } - } - } -} diff --git a/client/config/config.go b/client/config/config.go index 7afb5646..9ae336a7 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -50,13 +50,13 @@ import ( "github.com/root-gg/plik/server/common" ) -// Static config var +// Config static variable var Config *UploadConfig -// Static Upload var +// Upload static variable var Upload *common.Upload -// Static files array +// Files static array var Files []*FileToUpload // Private backends @@ -85,6 +85,7 @@ type UploadConfig struct { Password string TTL int AutoUpdate bool + Token string } // NewUploadConfig construct a new configuration with default values @@ -113,6 +114,7 @@ func NewUploadConfig() (config *UploadConfig) { config.Password = "" config.TTL = 86400 * 30 config.AutoUpdate = false + config.Token = "" return } @@ -224,8 +226,8 @@ func Load() (err error) { return } -// UnmarshalArgs into upload informations -// Argument takes priority over config file param +// UnmarshalArgs turns command line arguments into upload settings +// Command line arguments override config file settings func UnmarshalArgs(arguments map[string]interface{}) (err error) { // Handle flags @@ -243,29 +245,11 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Debug("Arguments : " + Sdump(arguments)) Debug("Configuration : " + Sdump(Config)) - // Plik url + // Plik server url if arguments["--server"] != nil && arguments["--server"].(string) != "" { Config.URL = arguments["--server"].(string) } - // Do we need an archive backend - if arguments["-a"].(bool) || arguments["--archive"] != nil || Config.Archive { - Config.Archive = true - - if arguments["--archive"] != nil && arguments["--archive"] != "" { - Config.ArchiveMethod = arguments["--archive"].(string) - } - } - archiveBackend, err = archive.NewArchiveBackend(Config.ArchiveMethod, Config.ArchiveOptions) - if err != nil { - return fmt.Errorf("Invalid archive params : %s\n", err) - } - err = archiveBackend.Configure(arguments) - if err != nil { - return fmt.Errorf("Invalid archive params : %s\n", err) - } - Debug("Archive backend configuration : " + utils.Sdump(archiveBackend.GetConfiguration())) - // Check files if _, ok := arguments["FILE"].([]string); ok { @@ -318,8 +302,31 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Files = append(Files, fileToUpload) } + // Enable archive mode ? + if arguments["-a"].(bool) || arguments["--archive"] != nil || Config.Archive { + Config.Archive = true + + if arguments["--archive"] != nil && arguments["--archive"] != "" { + Config.ArchiveMethod = arguments["--archive"].(string) + } + } + if Config.Archive { + // Configure the archive backend + archiveBackend, err = archive.NewArchiveBackend(Config.ArchiveMethod, Config.ArchiveOptions) + if err != nil { + return fmt.Errorf("Invalid archive params : %s\n", err) + } + err = archiveBackend.Configure(arguments) + if err != nil { + return fmt.Errorf("Invalid archive params : %s\n", err) + } + Debug("Archive backend configuration : " + utils.Sdump(archiveBackend.GetConfiguration())) + + // Add archive file to upload list fileToUpload := NewFileToUpload() + + // Guess archive name fileToUpload.Name = archiveBackend.GetFileName(arguments["FILE"].([]string)) fileToUpload.Reference = "0" @@ -329,12 +336,11 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Files = make([]*FileToUpload, 1) Files[0] = fileToUpload } - } else { return fmt.Errorf("No files specified") } - // Set name if user specified it + // Override file name if specified if arguments["--name"] != nil && arguments["--name"].(string) != "" && len(Files) == 1 { Files[0].Name = arguments["--name"].(string) } @@ -357,7 +363,7 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Upload.Comments = arguments["--comments"].(string) } - // Upload time to live + // Configure upload expire date Upload.TTL = Config.TTL if arguments["--ttl"] != nil && arguments["--ttl"].(string) != "" { ttlStr := arguments["--ttl"].(string) @@ -379,7 +385,7 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Upload.TTL = ttl * mul } - // Do we need a crypto backend ? + // Enable secure mode ? if arguments["-s"].(bool) || arguments["--secure"] != nil || Config.Secure { Config.Secure = true secureMethod := Config.SecureMethod @@ -387,6 +393,8 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { secureMethod = arguments["--secure"].(string) } var err error + + // Configure crypto backend cryptoBackend, err = crypto.NewCryptoBackend(secureMethod, Config.SecureOptions) if err != nil { return fmt.Errorf("Invalid secure params : %s\n", err) @@ -399,7 +407,7 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Debug("Crypto backend configuration : " + utils.Sdump(cryptoBackend.GetConfiguration())) } - // Do user wants a password protected upload ? + // Enable password protection ? if arguments["-p"].(bool) { fmt.Printf("Login [plik]: ") var err error @@ -430,7 +438,7 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { Upload.Password = password } - // User wants Yubikey protected upload ? + // Enable Yubikey protection ? if Config.Yubikey || arguments["--yubikey"].(bool) { fmt.Printf("Yubikey token : ") _, err := fmt.Scanln(&Upload.Yubikey) @@ -439,6 +447,11 @@ func UnmarshalArgs(arguments map[string]interface{}) (err error) { } } + // Override upload token ? + if arguments["--token"] != nil && arguments["--token"].(string) != "" { + Config.Token = arguments["--token"].(string) + } + return } @@ -466,13 +479,12 @@ func Debug(message string) { } } -// Dump takes a interface{} and print the call -// to Sdump +// Dump prints an interface{} as a JSON string func Dump(data interface{}) { fmt.Println(Sdump(data)) } -// Sdump takes a interface{} and turn it to a string +// Sdump turns an interface{} to a JSON string func Sdump(data interface{}) string { buf := new(bytes.Buffer) if json, err := json.MarshalIndent(data, "", " "); err != nil { diff --git a/client/plik.go b/client/plik.go index b857599d..ad97608d 100644 --- a/client/plik.go +++ b/client/plik.go @@ -33,15 +33,16 @@ import ( "bytes" "crypto/tls" "encoding/json" - "errors" "fmt" "io" "io/ioutil" "math/rand" "mime/multipart" "net/http" + "net/http/httputil" "net/url" "os" + "path/filepath" "runtime" "strconv" "strings" @@ -89,6 +90,7 @@ Options: -t, --ttl TTL Time before expiration (Upload will be removed in m|h|d) -n, --name NAME Set file name when piping from STDIN --server SERVER Overrides plik url + --token TOKEN Specify an upload token --comments COMMENT Set comments of the upload ( MarkDown compatible ) -p Protect the upload with login and password --password PASSWD Protect the upload with login:password ( if omitted default login is "plik" ) @@ -124,7 +126,8 @@ Options: os.Exit(0) } } else { - printf("Unable to update Plik client : %s\n", err) + printf("Unable to update Plik client : \n") + printf("%s\n", err) if updateFlag { os.Exit(1) } @@ -135,16 +138,24 @@ Options: // --> If not from pipe, and no files in arguments : printing help fi, _ := os.Stdin.Stat() - if (fi.Mode()&os.ModeCharDevice) != 0 && len(arguments["FILE"].([]string)) == 0 { - fmt.Println(usage) - os.Exit(0) + if runtime.GOOS != "windows" { + if (fi.Mode()&os.ModeCharDevice) != 0 && len(arguments["FILE"].([]string)) == 0 { + fmt.Println(usage) + os.Exit(0) + } + } else { + if len(arguments["FILE"].([]string)) == 0 { + fmt.Println(usage) + os.Exit(0) + } } // Create upload config.Debug("Sending upload params : " + config.Sdump(config.Upload)) uploadInfo, err := createUpload(config.Upload) if err != nil { - printf("Unable to create upload : %s\n", err) + printf("Unable to create upload\n") + printf("%s\n", err) os.Exit(1) } config.Debug("Got upload info : " + config.Sdump(uploadInfo)) @@ -201,7 +212,8 @@ Options: file, err := upload(uploadInfo, fileToUpload, fileToUpload.FileHandle) if err != nil { - printf("Unable to upload file : %s\n", err) + printf("Unable to upload file : \n") + printf("%s\n", err) return } @@ -221,7 +233,7 @@ Options: // Increment size totalSize += file.CurrentSize - // Print file informations (only url if quiet mode enabled) + // Print file information (only url if quiet mode is enabled) if config.Config.Quiet { fmt.Println(getFileURL(uploadInfo, file)) } else { @@ -251,11 +263,11 @@ func createUpload(uploadParams *common.Upload) (upload *common.Upload, err error } req.Header.Set("Content-Type", "application/json") - req.Header.Set("X-ClientApp", "cli_client") + + // Referer is used to generate shorlinks req.Header.Set("Referer", config.Config.URL) - var resp *http.Response - resp, err = client.Do(req) + resp, err := makeRequest(req) if err != nil { return } @@ -266,18 +278,6 @@ func createUpload(uploadParams *common.Upload) (upload *common.Upload, err error return } - // Parse Json error - if resp.StatusCode != 200 { - result := new(common.Result) - err = json.Unmarshal(body, result) - if err == nil && result.Message != "" { - err = errors.New(result.Message) - } else { - err = fmt.Errorf("HTTP error %d %s", resp.StatusCode, resp.Status) - } - return - } - basicAuth = resp.Header.Get("Authorization") // Parse Json response @@ -359,15 +359,13 @@ func upload(uploadInfo *common.Upload, fileToUpload *config.FileToUpload, reader } req.Header.Set("Content-Type", multipartWriter.FormDataContentType()) - req.Header.Set("X-ClientApp", "cli_client") req.Header.Set("X-UploadToken", uploadInfo.UploadToken) if uploadInfo.ProtectedByPassword { req.Header.Set("Authorization", basicAuth) } - var resp *http.Response - resp, err = client.Do(req) + resp, err := makeRequest(req) if err != nil { return } @@ -378,18 +376,6 @@ func upload(uploadInfo *common.Upload, fileToUpload *config.FileToUpload, reader return } - // Parse Json error - if resp.StatusCode != 200 { - result := new(common.Result) - err = json.Unmarshal(body, result) - if err == nil && result.Message != "" { - err = errors.New(result.Message) - } else { - err = fmt.Errorf("HTTP error %d %s", resp.StatusCode, resp.Status) - } - return - } - // Parse Json response file = new(common.File) err = json.Unmarshal(body, file) @@ -489,15 +475,42 @@ func updateClient(updateFlag bool) (err error) { err = fmt.Errorf("Unable to get server version : %s", err) return } - var resp *http.Response - resp, err = client.Do(req) - if err != nil { - err = fmt.Errorf("Unable to get server version : %s", err) - return - } + + resp, err := makeRequest(req) defer resp.Body.Close() - if resp.StatusCode == 404 { + if resp.StatusCode == 200 { + // >=1.1 use BuildInfo from /version + + var body []byte + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + err = fmt.Errorf("Unable to get server version : %s", err) + return + } + + // Parse json BuildInfo object + buildInfo := new(common.BuildInfo) + err = json.Unmarshal(body, buildInfo) + if err != nil { + err = fmt.Errorf("Unable to get server version : %s", err) + return + } + + version = buildInfo.Version + for _, client := range buildInfo.Clients { + if client.OS == runtime.GOOS && client.ARCH == runtime.GOARCH { + newMD5 = client.Md5 + downloadURL = config.Config.URL + "/" + client.Path + break + } + } + + if newMD5 == "" || downloadURL == "" { + err = fmt.Errorf("Server does not offer a %s-%s client", runtime.GOOS, runtime.GOARCH) + return + } + } else if resp.StatusCode == 404 { // <1.1 fallback on MD5SUM file baseURL := config.Config.URL + "/clients/" + runtime.GOOS + "-" + runtime.GOARCH @@ -512,8 +525,8 @@ func updateClient(updateFlag bool) (err error) { err = fmt.Errorf("Unable to get server version : %s", err) return } - var resp *http.Response - resp, err = client.Do(req) + + resp, err = makeRequest(req) if err != nil { err = fmt.Errorf("Unable to get server version : %s", err) return @@ -539,41 +552,8 @@ func updateClient(updateFlag bool) (err error) { } downloadURL = baseURL + "/" + binary } else { - // >=1.1 use BuildInfo from /version - - if resp.StatusCode != 200 { - err = fmt.Errorf("Unable to get server version : %s", resp.Status) - return - } - - var body []byte - body, err = ioutil.ReadAll(resp.Body) - if err != nil { - err = fmt.Errorf("Unable to get server version : %s", err) - return - } - - // Parse json BuildInfo object - buildInfo := new(common.BuildInfo) - err = json.Unmarshal(body, buildInfo) - if err != nil { - err = fmt.Errorf("Unable to get server version : %s", err) - return - } - - version = buildInfo.Version - for _, client := range buildInfo.Clients { - if client.OS == runtime.GOOS && client.ARCH == runtime.GOARCH { - newMD5 = client.Md5 - downloadURL = config.Config.URL + "/" + client.Path - break - } - } - - if newMD5 == "" || downloadURL == "" { - err = fmt.Errorf("Server does not offer a %s-%s client", runtime.GOOS, runtime.GOARCH) - return - } + err = fmt.Errorf("Unable to get server version : %s", err) + return } // Check if the client is up to date @@ -604,17 +584,17 @@ func updateClient(updateFlag bool) (err error) { return } - // Create tmp file - tmpFile, err := ioutil.TempFile("", ".plik_update_") + // Download new client + tmpPath := filepath.Dir(path) + "/" + "." + filepath.Base(path) + ".tmp" + tmpFile, err := os.OpenFile(tmpPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777) if err != nil { return } defer func() { tmpFile.Close() - os.Remove(tmpFile.Name()) + os.Remove(tmpPath) }() - // Download new client URL, err = url.Parse(downloadURL) if err != nil { err = fmt.Errorf("Unable to download client : %s", err) @@ -625,7 +605,7 @@ func updateClient(updateFlag bool) (err error) { err = fmt.Errorf("Unable to download client : %s", err) return } - resp, err = client.Do(req) + resp, err = makeRequest(req) if err != nil { err = fmt.Errorf("Unable to download client : %s", err) return @@ -647,7 +627,7 @@ func updateClient(updateFlag bool) (err error) { } // Check download integrity - downloadMD5, err := utils.FileMd5sum(tmpFile.Name()) + downloadMD5, err := utils.FileMd5sum(tmpPath) if err != nil { err = fmt.Errorf("Unable to download client : %s", err) return @@ -658,7 +638,7 @@ func updateClient(updateFlag bool) (err error) { } // Replace old client - err = os.Rename(tmpFile.Name(), path) + err = os.Rename(tmpPath, path) if err != nil { err = fmt.Errorf("Unable to replace client : %s", err) return @@ -673,6 +653,71 @@ func updateClient(updateFlag bool) (err error) { return } +func makeRequest(req *http.Request) (resp *http.Response, err error) { + + // Set client version headers + req.Header.Set("X-ClientApp", "cli_client") + bi := common.GetBuildInfo() + if bi != nil { + version := runtime.GOOS + "-" + runtime.GOARCH + "-" + bi.Version + req.Header.Set("X-ClientVersion", version) + } + + // Set authentication header + if config.Config.Token != "" { + req.Header.Set("X-PlikToken", config.Config.Token) + } + + // Log request + if config.Config.Debug { + dump, err := httputil.DumpRequest(req, true) + if err == nil { + config.Debug(string(dump)) + } else { + printf("Unable to dump HTTP request : %s", err) + } + } + + // Make request + resp, err = client.Do(req) + if err != nil { + return + } + + // Log response + if config.Config.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err == nil { + config.Debug(string(dump)) + } else { + printf("Unable to dump HTTP response : %s", err) + } + } + + // Parse Json error + if resp.StatusCode != 200 { + defer resp.Body.Close() + var body []byte + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + result := new(common.Result) + err = json.Unmarshal(body, result) + if err == nil && result.Message != "" { + err = fmt.Errorf("%s : %s", resp.Status, result.Message) + } else if len(body) > 0 { + err = fmt.Errorf("%s : %s", resp.Status, string(body)) + } else { + err = fmt.Errorf("%s", resp.Status) + } + return + } + + return +} + func printf(format string, args ...interface{}) { if !config.Config.Quiet { fmt.Printf(format, args...) diff --git a/client/test.sh b/client/test.sh index 00548ecb..cc72bb83 100755 --- a/client/test.sh +++ b/client/test.sh @@ -156,7 +156,6 @@ function uploadOpts { fi CURL_CMD="$CURL_CMD $URL/upload/$UPLOAD_ID" UPLOAD_OPTS=$( eval "$CURL_CMD" 2>/dev/null | python -m json.tool ) - } # Download files by running the output cmds @@ -661,9 +660,9 @@ rm $SERVER_LOG rm $CLIENT_LOG cd $ORIGIN -echo " - upgrade :" -./test_upgrade.sh 2>/dev/null | grep "Update from" -echo " - downgrade :" -./test_downgrade.sh 2>/dev/null | grep "Update to" +#echo " - upgrade : ( this might take a long time ... )" +#./test_upgrade.sh +#echo " - downgrade : ( this might take a long time ... )" +#./test_downgrade.sh exit 0 \ No newline at end of file diff --git a/client/test_downgrade.sh b/client/test_downgrade.sh index 681fb543..9059022f 100755 --- a/client/test_downgrade.sh +++ b/client/test_downgrade.sh @@ -37,6 +37,7 @@ RELEASES=( 1.0 1.0.1 1.1-RC1 + 1.1-RC2 ) ### @@ -91,7 +92,7 @@ do # Build server and clients echo "Compiling server and clients v$RELEASE :" - if grep deps Makefile ; then + if grep "^deps:" Makefile ; then make deps fi diff --git a/client/test_upgrade.sh b/client/test_upgrade.sh index fa158e3f..b7c23889 100755 --- a/client/test_upgrade.sh +++ b/client/test_upgrade.sh @@ -38,6 +38,7 @@ RELEASES=( 1.0 1.0.1 1.1-RC1 + 1.1-RC2 ) ### diff --git a/documentation/api.md b/documentation/api.md new file mode 100644 index 00000000..9bbe2db5 --- /dev/null +++ b/documentation/api.md @@ -0,0 +1,169 @@ +### API +Plik server expose a REST-full API to manage uploads and get files : + +Get and create upload : + + - **POST** /upload + - Params (json object in request body) : + - oneshot (bool) + - stream (bool) + - removable (bool) + - ttl (int) + - login (string) + - password (string) + - files (see below) + - Return : + JSON formatted upload object. + Important fields : + - id (required to upload files) + - uploadToken (required to upload/remove files) + - files (see below) + + For stream mode you need to know the file id before the upload starts as it will block. + File size and/or file type also need to be known before the upload starts as they have to be printed + in HTTP response headers. + To get the file ids pass a "files" json object with each file you are about to upload. + Fill the reference field with an arbitrary string to avoid matching file ids using the fileName field. + This is also used to notify of MISSING files when file upload is not yet finished or has failed. + ``` + "files" : { + "0" : { + "fileName": "file.txt", + "fileSize": 12345, + "fileType": "text/plain", + "reference": "0" + },... + } + ``` + + - **GET** /upload/:uploadid: + - Get upload metadata (files list, upload date, ttl,...) + +Upload file : + + - **POST** /$mode/:uploadid:/:fileid:/:filename: + - Request body must be a multipart request with a part named "file" containing file data. + + - **POST** /file/:uploadid: + - Same as above without passing file id, won't work for stream mode. + +Get file : + + - **HEAD** /$mode/:uploadid:/:fileid:/:filename: + - Returns only HTTP headers. Usefull to know Content-Type and Content-Type without downloading the file. Especially if upload has OneShot option enabled. + + - **GET** /$mode/:uploadid:/:fileid:/:filename: + - Download file. Filename **MUST** match. A browser, might try to display the file if it's a jpeg for example. You may try to force download with ?dl=1 in url. + + - **GET** /$mode/:uploadid:/:fileid:/:filename:/yubikey/:yubikeyOtp: + - Same as previous call, except that you can specify a Yubikey OTP in the URL if the upload is Yubikey restricted. + +Remove file : + + - **DELETE** /$mode/:uploadid:/:fileid:/:filename: + - Delete file. Upload **MUST** have "removable" option enabled. + +Show server details : + + - **GET** /version + - Show plik server version, and some build information (build host, date, git revision,...) + + - **GET** /config + - Show plik server configuration (ttl values, max file size, ...) + +User authentication : + + - + Plik can authenticate users using Google and/or OVH third-party API. + The /auth API is designed for the Plik web application nevertheless if you want to automatize it be sure to provide a valid + Referrer HTTP header and forward all session cookies. + Plik session cookies have the "secure" flag set, so they can only be transmitted over secure HTTPS connections. + To avoid CSRF attacks the value of the plik-xsrf cookie MUST be copied in the X-XRSFToken HTTP header of each + authenticated request. + Once authenticated a user can generate upload tokens. Those tokens can be used in the X-PlikToken HTTP header used to link + an upload to the user account. It can be put in the ~/.plikrc file of the Plik command line client. + + + - **Google** : + - You'll need to create a new application in the [Google Developper Console](https://console.developers.google.com) + - You'll be handed a Google API ClientID and a Google API ClientSecret that you'll need to put in the plikd.cfg file. + - Do not forget to whitelist valid origin and redirect url ( https://yourdomain/auth/google/callback ) for your domain. + + - **OVH** : + - You'll need to create a new application in the OVH API : https://eu.api.ovh.com/createApp/ + - You'll be handed an OVH application key and an OVH application secret key that you'll need to put in the plikd.cfg file. + + - **GET** /auth/google/login + - Get Google user consent URL. User have to visit this URL to authenticate. + + - **GET** /auth/google/callback + - Callback of the user consent dialog. + - The user will be redirected back to the web application with a Plik session cookie at the end of this call. + + - **GET** /auth/ovh/login + - Get OVH user consent URL. User have to visit this URL to authenticate. + - The response will contain a temporary session cookie to forward the API endpoint and OVH consumer key to the callback. + + - **GET** /auth/google/callback + - Callback of the user consent dialog. + - The user will be redirected back to the web application with a Plik session cookie at the end of this call. + + - **GET** /auth/logout + - Invalidate Plik session cookies. + + - **GET** /me + - Return basic user info ( ID, name, email ) and tokens. + + - **DELETE** /me + - Remove user account. + + - **POST** /me/token + - Create a new upload token. + - A comment can be passed in the json body. + + - **DELETE** /me/token/{token} + - Revoke an upload token. + + - **GET** /me/uploads + - Return all uploads linked to a user account. + - Params : + - token : filter by token + - size : maximum uploads to return ( max : 100 ) + - offset : number of uploads to skip + + - **DELETE** /me/uploads + - Remove all uploads linked to a user account. + - Params : + - token : filter by token + +QRCode : + + - **GET** /qrcode + - Generate a QRCode image from an url + - Params : + - url : The url you want to store in the QRCode + - size : The size of the generated image in pixels (default: 250, max: 1000) + + +$mode can be "file" or "stream" depending if stream mode is enabled. See FAQ for more details. + +Examples : +```sh +Create an upload (in the json response, you'll have upload id and upload token) +$ curl -X POST http://127.0.0.1:8080/upload + +Create a OneShot upload +$ curl -X POST -d '{ "OneShot" : true }' http://127.0.0.1:8080/upload + +Upload a file to upload +$ curl -X POST --header "X-UploadToken: M9PJftiApG1Kqr81gN3Fq1HJItPENMhl" -F "file=@test.txt" http://127.0.0.1:8080/file/IsrIPIsDskFpN12E + +Get headers +$ curl -I http://127.0.0.1:8080/file/IsrIPIsDskFpN12E/sFjIeokH23M35tN4/test.txt +HTTP/1.1 200 OK +Content-Disposition: filename=test.txt +Content-Length: 3486 +Content-Type: text/plain; charset=utf-8 +Date: Fri, 15 May 2015 09:16:20 GMT + +``` diff --git a/documentation/docker.md b/documentation/docker.md new file mode 100644 index 00000000..2fe9d8ce --- /dev/null +++ b/documentation/docker.md @@ -0,0 +1,37 @@ +### Docker +Plik comes with a simple Dockerfile that allows you to run it inside a docker container. + +##### Getting image from docker registry + +```sh +$ docker pull rootgg/plik:latest +``` + +##### Building the docker image + +First, you need to build the docker image : +```sh +$ make docker +``` + +##### Configuration + +Then you can run an instance and map the local port 8080 to the plik port : +```sh +$ docker run -t -d -p 8080:8080 rootgg/plik +ab9b2c99da1f3e309cd3b12392b9084b5cafcca0325d7d47ff76f5b1e475d1b9 +``` + +To use a different config file, you can map a single file to the container at runtime : +Here, we map local folder plikd.cfg to the home/plik/server/plikd.cfg which is the default config file location in the container : +```sh +$ docker run -t -d -p 8080:8080 -v plikd.cfg:/home/plik/server/plikd.cfg rootgg/plik +ab9b2c99da1f3e309cd3b12392b9084b5cafcca0325d7d47ff76f5b1e475d1b9 +``` + +You can also use a volume to store uploads outside the container : +Here, we map local folder /data to the /home/plik/server/files folder of the container which is the default upload directory : +```sh +$ docker run -t -d -p 8080:8080 -v /data:/home/plik/server/files rootgg/plik +ab9b2c99da1f3e309cd3b12392b9084b5cafcca0325d7d47ff76f5b1e475d1b9 +``` \ No newline at end of file diff --git a/server/Godeps/Godeps.json b/server/Godeps/Godeps.json index ed448656..63909766 100644 --- a/server/Godeps/Godeps.json +++ b/server/Godeps/Godeps.json @@ -1,16 +1,33 @@ { "ImportPath": "github.com/root-gg/plik/server", "GoVersion": "go1.5.1", + "Packages": [ + "./..." + ], "Deps": [ { "ImportPath": "github.com/BurntSushi/toml", - "Comment": "v0.1.0", - "Rev": "2ceedfee35ad3848e49308ab0c9a4f640cfb5fb2" + "Comment": "v0.1.0-18-g443a628", + "Rev": "443a628bc233f634a75bcbdd71fe5350789f1afa" }, { "ImportPath": "github.com/GeertJohan/yubigo", "Rev": "b1764f04aa9ba3c98a15084e7e13c1a69753e1da" }, + { + "ImportPath": "github.com/boltdb/bolt", + "Comment": "v1.1.0-12-g47d80ed", + "Rev": "47d80ed8a451fe9c07069e845e93b79291bb0532" + }, + { + "ImportPath": "github.com/boombuler/barcode", + "Rev": "63f4aa2c46ebfac93952223a52df876f30b064c0" + }, + { + "ImportPath": "github.com/dgrijalva/jwt-go", + "Comment": "v2.4.0-4-gafef698", + "Rev": "afef698c326bfd906b11659432544e5aae441d44" + }, { "ImportPath": "github.com/facebookgo/clock", "Rev": "600d898af40aa09a7a93ecb9265d87b0504b6f03" @@ -23,29 +40,26 @@ "ImportPath": "github.com/facebookgo/stats", "Rev": "31fb71caf5a4f04c9f8bb3fa8e7c2597ba6eb50a" }, - { - "ImportPath": "github.com/boombuler/barcode", - "Rev": "63f4aa2c46ebfac93952223a52df876f30b064c0" - }, - { - "ImportPath": "github.com/boombuler/barcode", - "Rev": "63f4aa2c46ebfac93952223a52df876f30b064c0" - }, { "ImportPath": "github.com/gorilla/context", - "Rev": "50c25fb3b2b3b3cc724e9b6ac75fb44b3bccd0da" + "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd" }, { "ImportPath": "github.com/gorilla/mux", - "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5" }, { "ImportPath": "github.com/ncw/swift", "Rev": "da9274954a59d67d130af988ebf73bb15bc15e1b" }, { - "ImportPath": "github.com/root-gg/context", - "Rev": "eb01ea15154712b6b44ee3bbf07863af6525afc6" + "ImportPath": "github.com/nu7hatch/gouuid", + "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" + }, + { + "ImportPath": "github.com/root-gg/juliet", + "Comment": "v1.0", + "Rev": "f7abeae007897a156a3a5af8fe8e5e653cb15f13" }, { "ImportPath": "github.com/root-gg/logger", @@ -53,12 +67,40 @@ }, { "ImportPath": "github.com/root-gg/utils", - "Rev": "075b52a8bb434c1e2f156f004a994976bf6a8173" + "Rev": "38f45ede2ce220d9c08734edd8a13107022cc20d" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "f1d3149ecb40ffadf4a28d39a30f9a125fe57bdf" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "2baa8a1b9338cf13d9eeb27696d761155fa480be" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "77e7d383beb96054547729f49c372b3d01e196ff" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "77e7d383beb96054547729f49c372b3d01e196ff" + }, + { + "ImportPath": "google.golang.org/api/oauth2/v2", + "Rev": "77e7d383beb96054547729f49c372b3d01e196ff" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "1bff51b8fae8d33cb3dab8f7858c266ce001ee3e" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "1bff51b8fae8d33cb3dab8f7858c266ce001ee3e" }, { "ImportPath": "gopkg.in/mgo.v2", - "Comment": "r2015.01.24", - "Rev": "c6a7dce14133ccac2dcac3793f1d6e2ef048503a" + "Comment": "r2015.12.06", + "Rev": "e30de8ac9ae3b30df7065f766c71f88bba7d4e49" } ] } diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md index 380bb36b..5a5df637 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md @@ -9,7 +9,7 @@ representations. (There is an example of this below.) Spec: https://github.com/mojombo/toml Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) +[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) Documentation: http://godoc.org/github.com/BurntSushi/toml @@ -111,7 +111,7 @@ type songs struct { Song []song } var favorites songs -if _, err := Decode(blob, &favorites); err != nil { +if _, err := toml.Decode(blob, &favorites); err != nil { log.Fatal(err) } diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go index b6d75d04..6c7d398b 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go @@ -12,6 +12,18 @@ import ( var e = fmt.Errorf +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // When using the various `Decode*` functions, the type `Primitive` may // be given to any value, and its decoding will be delayed. @@ -128,6 +140,7 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { // Save the undecoded data and the key context into the primitive @@ -141,6 +154,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + // Special case. Handle time.Time values specifically. // TODO: Remove this code when we decide to drop support for Go 1.1. // This isn't necessary in Go 1.2 because time.Time satisfies the encoding diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go index c8114453..ef6f545f 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go @@ -59,6 +59,29 @@ func (k Key) String() string { return strings.Join(k, ".") } +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } else { + return k[i] + } +} + func (k Key) add(piece string) Key { newKey := make(Key, len(k)+1) copy(newKey, k) diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index b940333d..00000000 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,540 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "reflect" - "testing" - "time" -) - -func init() { - log.SetFlags(0) -} - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": cats{Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - - tests := map[string]struct { - input string - decodeInto interface{} - wantDecoded interface{} - }{ - "embedded struct": { - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - "embedded non-nil pointer to struct": { - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - "embedded nil pointer to struct": { - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - "embedded int": { - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } - - for label, test := range tests { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands.J Geils] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go index 36187134..64e8c47e 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go @@ -118,7 +118,8 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { k := rv.Kind() switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: @@ -173,7 +174,8 @@ func (enc *Encoder) eElement(rv reflect.Value) { switch rv.Kind() { case reflect.Bool: enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: enc.wf(strconv.FormatInt(rv.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -223,28 +225,28 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { if len(key) == 0 { encPanic(errNoKey) } - panicIfInvalidKey(key, true) for i := 0; i < rv.Len(); i++ { trv := rv.Index(i) if isNil(trv) { continue } + panicIfInvalidKey(key) enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.String()) + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) enc.newline() enc.eMapOrStruct(key, trv) } } func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) if len(key) == 1 { // Output an extra new line between top-level tables. // (The newline isn't written if nothing else has been written though.) enc.newline() } if len(key) > 0 { - panicIfInvalidKey(key, true) - enc.wf("%s[%s]", enc.indentStr(key), key.String()) + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) enc.newline() } enc.eMapOrStruct(key, rv) @@ -348,10 +350,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) { writeFields(fieldsSub) } -// tomlTypeName returns the TOML type name of the Go value's type. It is used to -// determine whether the types of array elements are mixed (which is forbidden). -// If the Go value is nil, then it is illegal for it to be an array element, and -// valueIsNil is returned as true. +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. // Returns the TOML type of a Go value. The type may be `nil`, which means // no concrete TOML type could be found. @@ -362,7 +364,8 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { switch rv.Kind() { case reflect.Bool: return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return tomlInteger @@ -440,8 +443,8 @@ func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { if len(key) == 0 { encPanic(errNoKey) } - panicIfInvalidKey(key, false) - enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1]) + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) enc.newline() } @@ -479,37 +482,15 @@ func isNil(rv reflect.Value) bool { } } -func panicIfInvalidKey(key Key, hash bool) { - if hash { - for _, k := range key { - if !isValidTableName(k) { - encPanic(e("Key '%s' is not a valid table name. Table names "+ - "cannot contain '[', ']' or '.'.", key.String())) - } - } - } else { - if !isValidKeyName(key[len(key)-1]) { - encPanic(e("Key '%s' is not a name. Key names "+ - "cannot contain whitespace.", key.String())) - } - } -} - -func isValidTableName(s string) bool { - if len(s) == 0 { - return false - } - for _, r := range s { - if r == '[' || r == ']' || r == '.' { - return false +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) } } - return true } func isValidKeyName(s string) bool { - if len(s) == 0 { - return false - } - return true + return len(s) != 0 } diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 74a5ee5d..00000000 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,506 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - log.Printf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - map[string]int{ - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) anonymous non-struct": { - input: struct{ NonStruct }{5}, - wantError: errAnonNonStruct, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go index 140c44c1..d36e1dd6 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go @@ -14,6 +14,6 @@ import ( // so that Go 1.1 can be supported. type TextMarshaler encoding.TextMarshaler -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here -// so that Go 1.1 can be supported. +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. type TextUnmarshaler encoding.TextUnmarshaler diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go index fb285e7f..e8d503d0 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -11,8 +11,8 @@ type TextMarshaler interface { MarshalText() (text []byte, err error) } -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here -// so that Go 1.1 can be supported. +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. type TextUnmarshaler interface { UnmarshalText(text []byte) error } diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go index 3821fa27..21912285 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go @@ -14,6 +14,9 @@ const ( itemEOF itemText itemString + itemRawString + itemMultilineString + itemRawMultilineString itemBool itemInteger itemFloat @@ -42,6 +45,8 @@ const ( commentStart = '#' stringStart = '"' stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' ) type stateFn func(lx *lexer) stateFn @@ -256,38 +261,54 @@ func lexArrayTableEnd(lx *lexer) stateFn { } func lexTableNameStart(lx *lexer) stateFn { - switch lx.next() { - case tableEnd, eof: - return lx.errorf("Unexpected end of table. (Tables cannot " + + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("Unexpected end of table name. (Table names cannot " + "be empty.)") - case tableSep: - return lx.errorf("Unexpected table separator. (Tables cannot " + + case r == tableSep: + return lx.errorf("Unexpected table separator. (Table names cannot " + "be empty.)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + case isWhitespace(r): + return lexTableNameStart + default: + return lexBareTableName } - return lexTableName } // lexTableName lexes the name of a table. It assumes that at least one // valid character for the table has already been read. -func lexTableName(lx *lexer) stateFn { - switch lx.peek() { - case eof: - return lx.errorf("Unexpected end of table name %q.", lx.current()) - case tableStart: - return lx.errorf("Table names cannot contain %q or %q.", - tableStart, tableEnd) - case tableEnd: - lx.emit(itemText) - lx.next() - return lx.pop() - case tableSep: - lx.emit(itemText) - lx.next() +func lexBareTableName(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareTableName + case r == tableSep || r == tableEnd: + lx.backup() + lx.emitTrim(itemText) + return lexTableNameEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: lx.ignore() return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ + "instead.", r) } - lx.next() - return lexTableName } // lexKeyStart consumes a key name up until the first non-whitespace character. @@ -300,53 +321,48 @@ func lexKeyStart(lx *lexer) stateFn { case isWhitespace(r) || isNL(r): lx.next() return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey } - - lx.ignore() - lx.emit(itemKeyStart) - lx.next() - return lexKey } -// lexKey consumes the text of a key. Assumes that the first character (which -// is not whitespace) has already been consumed. -func lexKey(lx *lexer) stateFn { - r := lx.peek() - - // Keys cannot contain a '#' character. - if r == commentStart { - return lx.errorf("Key cannot contain a '#' character.") - } - - // XXX: Possible divergence from spec? - // "Keys start with the first non-whitespace character and end with the - // last non-whitespace character before the equals sign." - // Note here that whitespace is either a tab or a space. - // But we'll call it quits if we see a new line too. - if isNL(r) { +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): lx.emitTrim(itemText) return lexKeyEnd - } - - // Let's also call it quits if we see an equals sign. - if r == keySep { + case r == keySep: + lx.backup() lx.emitTrim(itemText) return lexKeyEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) } - - lx.next() - return lexKey } -// lexKeyEnd consumes the end of a key (up to the key separator). -// Assumes that any whitespace after a key has been consumed. +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). func lexKeyEnd(lx *lexer) stateFn { - r := lx.next() - if r == keySep { + switch r := lx.next(); { + case r == keySep: return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("Expected key separator %q, but got %q instead.", + keySep, r) } - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) } // lexValue starts the consumption of a value anywhere a value is expected. @@ -354,7 +370,8 @@ func lexKeyEnd(lx *lexer) stateFn { // After a value is lexed, the last state on the next is popped and returned. func lexValue(lx *lexer) stateFn { // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new lines. + // In array syntax, the array states are responsible for ignoring new + // lines. r := lx.next() if isWhitespace(r) { return lexSkip(lx, lexValue) @@ -366,8 +383,25 @@ func lexValue(lx *lexer) stateFn { lx.emit(itemArray) return lexArrayValue case r == stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } lx.ignore() // ignore the '"' return lexString + case r == rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString case r == 't': return lexTrue case r == 'f': @@ -441,6 +475,7 @@ func lexString(lx *lexer) stateFn { case isNL(r): return lx.errorf("Strings cannot contain new lines.") case r == '\\': + lx.push(lexString) return lexStringEscape case r == stringEnd: lx.backup() @@ -452,8 +487,88 @@ func lexString(lx *lexer) stateFn { return lexString } -// lexStringEscape consumes an escaped character. It assumes that the preceding -// '\\' has already been consumed. +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '\\': + return lexMultilineStringEscape + case r == stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case isNL(r): + return lx.errorf("Strings cannot contain new lines.") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + lx.next() + return lexMultilineString + } else { + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) + } +} + func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { @@ -469,35 +584,45 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '"': fallthrough - case '/': - fallthrough case '\\': - return lexString + return lx.pop() case 'u': - return lexStringUnicode + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape } return lx.errorf("Invalid escape character %q. Only the following "+ "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r) + "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ + "\\uXXXX and \\UXXXXXXXX.", r) } -// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes -// that the '\x' has already been consumed. -func lexStringUnicode(lx *lexer) stateFn { +func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune - for i := 0; i < 4; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\x', "+ + return lx.errorf("Expected four hexadecimal digits after '\\u', "+ "but got '%s' instead.", lx.current()) } } - return lexString + return lx.pop() } -// lexNumberOrDateStart consumes either a (positive) integer, float or datetime. -// It assumes that NO negative sign has been consumed. +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ + "but got '%s' instead.", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either a (positive) integer, float or +// datetime. It assumes that NO negative sign has been consumed. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() if !isDigit(r) { @@ -557,9 +682,10 @@ func lexDateAfterYear(lx *lexer) stateFn { return lx.pop() } -// lexNumberStart consumes either an integer or a float. It assumes that a -// negative sign has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. +// lexNumberStart consumes either an integer or a float. It assumes that +// a negative sign has already been read, but that *no* digits have been +// consumed. lexNumberStart will move to the appropriate integer or float +// states. func lexNumberStart(lx *lexer) stateFn { // we MUST see a digit. Even floats have to start with a digit. r := lx.next() @@ -693,6 +819,14 @@ func isHexadecimal(r rune) bool { (r >= 'A' && r <= 'F') } +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + func (itype itemType) String() string { switch itype { case itemError: @@ -705,6 +839,12 @@ func (itype itemType) String() string { return "Text" case itemString: return "String" + case itemRawString: + return "String" + case itemMultilineString: + return "String" + case itemRawMultilineString: + return "String" case itemBool: return "Bool" case itemInteger: diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go index 43afe3c3..c6069be1 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" "time" + "unicode" "unicode/utf8" ) @@ -66,7 +67,7 @@ func parse(data string) (p *parser, err error) { } func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d, key '%s': %s", + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", p.approxLine, p.current(), fmt.Sprintf(format, v...)) panic(parseError(msg)) } @@ -74,7 +75,7 @@ func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) next() item { it := p.lx.nextItem() if it.typ == itemError { - p.panicf("Near line %d: %s", it.line, it.val) + p.panicf("%s", it.val) } return it } @@ -101,12 +102,12 @@ func (p *parser) topLevel(item item) { p.approxLine = item.line p.expect(itemText) case itemTableStart: - kg := p.expect(itemText) + kg := p.next() p.approxLine = kg.line - key := make(Key, 0) - for ; kg.typ == itemText; kg = p.next() { - key = append(key, kg.val) + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) } p.assertEqual(itemTableEnd, kg.typ) @@ -114,12 +115,12 @@ func (p *parser) topLevel(item item) { p.setType("", tomlHash) p.ordered = append(p.ordered, key) case itemArrayTableStart: - kg := p.expect(itemText) + kg := p.next() p.approxLine = kg.line - key := make(Key, 0) - for ; kg.typ == itemText; kg = p.next() { - key = append(key, kg.val) + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) } p.assertEqual(itemArrayTableEnd, kg.typ) @@ -127,27 +128,48 @@ func (p *parser) topLevel(item item) { p.setType("", tomlArrayHash) p.ordered = append(p.ordered, key) case itemKeyStart: - kname := p.expect(itemText) - p.currentKey = kname.val + kname := p.next() p.approxLine = kname.line + p.currentKey = p.keyString(kname) val, typ := p.value(p.next()) p.setValue(p.currentKey, val) p.setType(p.currentKey, typ) p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" default: p.bug("Unexpected type at top level: %s", item.typ) } } +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. func (p *parser) value(it item) (interface{}, tomlType) { switch it.typ { case itemString: - return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it) + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) case itemBool: switch it.val { case "true": @@ -352,7 +374,8 @@ func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true } -// removeImplicit stops tagging the given key as having been implicitly created. +// removeImplicit stops tagging the given key as having been implicitly +// created. func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false } @@ -374,31 +397,85 @@ func (p *parser) current() string { return fmt.Sprintf("%s.%s", p.context, p.currentKey) } -func replaceEscapes(s string) string { - return strings.NewReplacer( - "\\b", "\u0008", - "\\t", "\u0009", - "\\n", "\u000A", - "\\f", "\u000C", - "\\r", "\u000D", - "\\\"", "\u0022", - "\\/", "\u002F", - "\\\\", "\u005C", - ).Replace(s) +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:len(s)] } -func (p *parser) replaceUnicode(s string) string { - indexEsc := func() int { - return strings.Index(s, "\\u") +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } } - for i := indexEsc(); i != -1; i = indexEsc() { - asciiBytes := s[i+2 : i+6] - s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1) + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } } - return s + return string(replaced) } -func (p *parser) asciiEscapeToUnicode(s string) string { +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the "+ @@ -409,9 +486,13 @@ func (p *parser) asciiEscapeToUnicode(s string) string { // I honestly don't understand how this works. I can't seem // to find a way to make this fail. I figured this would fail on invalid // UTF-8 characters like U+DCFF, but it doesn't. - r := string(rune(hex)) - if !utf8.ValidString(r) { + if !utf8.ValidString(string(rune(hex))) { p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) } - return string(r) + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString } diff --git a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go index 79dac6b1..c73f8afc 100644 --- a/server/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go +++ b/server/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go @@ -56,6 +56,12 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { return tomlDatetime case itemString: return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString case itemBool: return tomlBool } @@ -77,8 +83,8 @@ func (p *parser) typeOfArray(types []tomlType) tomlType { theType := types[0] for _, t := range types[1:] { if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but arrays "+ - "must be homogeneous.", theType, t) + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) } } return tomlArray diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore b/server/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore new file mode 100644 index 00000000..c7bd2b7a --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE b/server/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE new file mode 100644 index 00000000..004e77fe --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/server/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile new file mode 100644 index 00000000..cfbed514 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile @@ -0,0 +1,54 @@ +TEST=. +BENCH=. +COVERPROFILE=/tmp/c.out +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +bench: + go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) + +# http://cloc.sourceforge.net/ +cloc: + @cloc --not-match-f='Makefile|_test.go' . + +cover: fmt + go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . + go tool cover -html=$(COVERPROFILE) + rm $(COVERPROFILE) + +cpuprofile: fmt + @go test -c + @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof + +# go get github.com/kisielk/errcheck +errcheck: + @echo "=== errcheck ===" + @errcheck github.com/boltdb/bolt + +fmt: + @go fmt ./... + +get: + @go get -d ./... + +build: get + @mkdir -p bin + @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt + +test: fmt + @go get github.com/stretchr/testify/assert + @echo "=== TESTS ===" + @go test -v -cover -test.run=$(TEST) + @echo "" + @echo "" + @echo "=== CLI ===" + @go test -v -test.run=$(TEST) ./cmd/bolt + @echo "" + @echo "" + @echo "=== RACE DETECTOR ===" + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +.PHONY: bench cloc cover cpuprofile fmt memprofile test diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/server/Godeps/_workspace/src/github.com/boltdb/bolt/README.md new file mode 100644 index 00000000..32759ce9 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/README.md @@ -0,0 +1,721 @@ +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + + +## Project Status + +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. + + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the NextSequence() function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} + +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +When you have iterated to the end of the cursor then `Next()` will return `nil`. +You must seek to a position using `First()`, `Last()`, or `Seek()` before +calling `Next()` or `Prev()`. If you do not seek to a position then these +functions will return `nil`. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. It will also use `O_DIRECT` when available +to prevent page cache trashing. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can add a write-ahead log or + [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt + to mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. +* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go new file mode 100644 index 00000000..84acae6b --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go @@ -0,0 +1,138 @@ +package bolt + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 00000000..cca6b7eb --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 00000000..6d230935 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 00000000..e9d1c907 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,12 @@ +package bolt + +import ( + "syscall" +) + +var odirect = syscall.O_DIRECT + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 00000000..7c1bef1a --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,29 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +var odirect int + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 00000000..8351e129 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 00000000..f4dd26bb --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 00000000..6eef6b22 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,100 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(f *os.File, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(f *os.File) error { + return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 00000000..f480ee76 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,101 @@ + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(f *os.File, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(f *os.File) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 00000000..f0483655 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,132 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +var odirect int + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(f *os.File, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(f *os.File) error { + return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{}) +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 00000000..8db89776 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,10 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +var odirect int + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go new file mode 100644 index 00000000..d2f8c524 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,748 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go new file mode 100644 index 00000000..95e1905a --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go @@ -0,0 +1,1529 @@ +package main + +import ( + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + "unsafe" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boltdb/bolt" +) + +var ( + // ErrUsage is returned when a usage message was printed and the process + // should simply exit with an error. + ErrUsage = errors.New("usage") + + // ErrUnknownCommand is returned when a CLI command is not specified. + ErrUnknownCommand = errors.New("unknown command") + + // ErrPathRequired is returned when the path to a Bolt database is not specified. + ErrPathRequired = errors.New("path required") + + // ErrFileNotFound is returned when a Bolt database does not exist. + ErrFileNotFound = errors.New("file not found") + + // ErrInvalidValue is returned when a benchmark reads an unexpected value. + ErrInvalidValue = errors.New("invalid value") + + // ErrCorrupt is returned when a checking a data file finds errors. + ErrCorrupt = errors.New("invalid value") + + // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly + // divided by the iteration count. + ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") + + // ErrPageIDRequired is returned when a required page id is not specified. + ErrPageIDRequired = errors.New("page id required") + + // ErrPageNotFound is returned when specifying a page above the high water mark. + ErrPageNotFound = errors.New("page not found") + + // ErrPageFreed is returned when reading a page that has already been freed. + ErrPageFreed = errors.New("page freed") +) + +// PageHeaderSize represents the size of the bolt.page header. +const PageHeaderSize = 16 + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err == ErrUsage { + os.Exit(2) + } else if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} + +// Main represents the main program execution. +type Main struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main connect to the standard input/output. +func NewMain() *Main { + return &Main{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run executes the program. +func (m *Main) Run(args ...string) error { + // Require a command at the beginning. + if len(args) == 0 || strings.HasPrefix(args[0], "-") { + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + } + + // Execute command. + switch args[0] { + case "help": + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + case "bench": + return newBenchCommand(m).Run(args[1:]...) + case "check": + return newCheckCommand(m).Run(args[1:]...) + case "dump": + return newDumpCommand(m).Run(args[1:]...) + case "info": + return newInfoCommand(m).Run(args[1:]...) + case "page": + return newPageCommand(m).Run(args[1:]...) + case "pages": + return newPagesCommand(m).Run(args[1:]...) + case "stats": + return newStatsCommand(m).Run(args[1:]...) + default: + return ErrUnknownCommand + } +} + +// Usage returns the help message. +func (m *Main) Usage() string { + return strings.TrimLeft(` +Bolt is a tool for inspecting bolt databases. + +Usage: + + bolt command [arguments] + +The commands are: + + bench run synthetic benchmark against bolt + check verifies integrity of bolt database + info print basic info + help print this screen + pages print list of pages with their types + stats iterate over all pages and generate usage stats + +Use "bolt [command] -h" for more information about a command. +`, "\n") +} + +// CheckCommand represents the "check" command execution. +type CheckCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewCheckCommand returns a CheckCommand. +func newCheckCommand(m *Main) *CheckCommand { + return &CheckCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CheckCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Perform consistency check. + return db.View(func(tx *bolt.Tx) error { + var count int + ch := tx.Check() + loop: + for { + select { + case err, ok := <-ch: + if !ok { + break loop + } + fmt.Fprintln(cmd.Stdout, err) + count++ + } + } + + // Print summary of errors. + if count > 0 { + fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) + return ErrCorrupt + } + + // Notify user that database is valid. + fmt.Fprintln(cmd.Stdout, "OK") + return nil + }) +} + +// Usage returns the help message. +func (cmd *CheckCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt check PATH + +Check opens a database at PATH and runs an exhaustive check to verify that +all pages are accessible or are marked as freed. It also verifies that no +pages are double referenced. + +Verification errors will stream out as they are found and the process will +return after all pages have been checked. +`, "\n") +} + +// InfoCommand represents the "info" command execution. +type InfoCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewInfoCommand returns a InfoCommand. +func newInfoCommand(m *Main) *InfoCommand { + return &InfoCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *InfoCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open the database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print basic database info. + info := db.Info() + fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) + + return nil +} + +// Usage returns the help message. +func (cmd *InfoCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt info PATH + +Info prints basic information about the Bolt database at PATH. +`, "\n") +} + +// DumpCommand represents the "dump" command execution. +type DumpCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newDumpCommand returns a DumpCommand. +func newDumpCommand(m *Main) *DumpCommand { + return &DumpCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *DumpCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database to retrieve page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return err + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Print page to stdout. + if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { + return err + } + } + + return nil +} + +// PrintPage prints a given page as hexidecimal. +func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *DumpCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt dump -page PAGEID PATH + +Dump prints a hexidecimal dump of a single page. +`, "\n") +} + +// PageCommand represents the "page" command execution. +type PageCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newPageCommand returns a PageCommand. +func newPageCommand(m *Main) *PageCommand { + return &PageCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PageCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Retrieve page info and page size. + p, buf, err := ReadPage(path, pageID) + if err != nil { + return err + } + + // Print basic page info. + fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) + fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) + fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) + + // Print type-specific data. + switch p.Type() { + case "meta": + err = cmd.PrintMeta(cmd.Stdout, buf) + case "leaf": + err = cmd.PrintLeaf(cmd.Stdout, buf) + case "branch": + err = cmd.PrintBranch(cmd.Stdout, buf) + case "freelist": + err = cmd.PrintFreelist(cmd.Stdout, buf) + } + if err != nil { + return err + } + } + + return nil +} + +// PrintMeta prints the data from the meta page. +func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") + return nil +} + +// PrintLeaf prints the data for a leaf page. +func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + // Format value as string. + var v string + if (e.flags & uint32(bucketLeafFlag)) != 0 { + b := (*bucket)(unsafe.Pointer(&e.value()[0])) + v = fmt.Sprintf("", b.root, b.sequence) + } else if isPrintable(string(e.value())) { + k = fmt.Sprintf("%q", string(e.value())) + } else { + k = fmt.Sprintf("%x", string(e.value())) + } + + fmt.Fprintf(w, "%s: %s\n", k, v) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintBranch prints the data for a leaf page. +func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.branchPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + fmt.Fprintf(w, "%s: \n", k, e.pgid) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintFreelist prints the data for a freelist page. +func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each page in the freelist. + ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) + for i := uint16(0); i < p.count; i++ { + fmt.Fprintf(w, "%d\n", ids[i]) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintPage prints a given page as hexidecimal. +func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *PageCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt page -page PATH pageid [pageid...] + +Page prints one or more pages in human readable format. +`, "\n") +} + +// PagesCommand represents the "pages" command execution. +type PagesCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPagesCommand returns a PagesCommand. +func newPagesCommand(m *Main) *PagesCommand { + return &PagesCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PagesCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + // Write header. + fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") + fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") + + return db.Update(func(tx *bolt.Tx) error { + var id int + for { + p, err := tx.Page(id) + if err != nil { + return &PageError{ID: id, Err: err} + } else if p == nil { + break + } + + // Only display count and overflow if this is a non-free page. + var count, overflow string + if p.Type != "free" { + count = strconv.Itoa(p.Count) + if p.OverflowCount > 0 { + overflow = strconv.Itoa(p.OverflowCount) + } + } + + // Print table row. + fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) + + // Move to the next non-overflow page. + id += 1 + if p.Type != "free" { + id += p.OverflowCount + } + } + return nil + }) +} + +// Usage returns the help message. +func (cmd *PagesCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt pages PATH + +Pages prints a table of pages with their type (meta, leaf, branch, freelist). +Leaf and branch pages will show a key count in the "items" column while the +freelist will show the number of free pages in the "items" column. + +The "overflow" column shows the number of blocks that the page spills over +into. Normally there is no overflow but large keys and values can cause +a single page to take up multiple blocks. +`, "\n") +} + +// StatsCommand represents the "stats" command execution. +type StatsCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewStatsCommand returns a StatsCommand. +func newStatsCommand(m *Main) *StatsCommand { + return &StatsCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *StatsCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path, prefix := fs.Arg(0), fs.Arg(1) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx *bolt.Tx) error { + var s bolt.BucketStats + var count int + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + if bytes.HasPrefix(name, []byte(prefix)) { + s.Add(b.Stats()) + count += 1 + } + return nil + }); err != nil { + return err + } + + fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) + + fmt.Fprintln(cmd.Stdout, "Page count statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) + fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) + + fmt.Fprintln(cmd.Stdout, "Tree statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) + fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) + + fmt.Fprintln(cmd.Stdout, "Page size utilization") + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) + var percentage int + if s.BranchAlloc != 0 { + percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) + percentage = 0 + if s.LeafAlloc != 0 { + percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) + + fmt.Fprintln(cmd.Stdout, "Bucket statistics") + fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) + percentage = 0 + if s.LeafInuse != 0 { + percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) + + return nil + }) +} + +// Usage returns the help message. +func (cmd *StatsCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt stats PATH + +Stats performs an extensive search of the database to track every page +reference. It starts at the current meta page and recursively iterates +through every accessible bucket. + +The following errors can be reported: + + already freed + The page is referenced more than once in the freelist. + + unreachable unfreed + The page is not referenced by a bucket or in the freelist. + + reachable freed + The page is referenced by a bucket but is also in the freelist. + + out of bounds + A page is referenced that is above the high water mark. + + multiple references + A page is referenced by more than one other page. + + invalid type + The page type is not "meta", "leaf", "branch", or "freelist". + +No errors should occur in your database. However, if for some reason you +experience corruption, please submit a ticket to the Bolt project page: + + https://github.com/boltdb/bolt/issues +`, "\n") +} + +var benchBucketName = []byte("bench") + +// BenchCommand represents the "bench" command execution. +type BenchCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewBenchCommand returns a BenchCommand using the +func newBenchCommand(m *Main) *BenchCommand { + return &BenchCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the "bench" command. +func (cmd *BenchCommand) Run(args ...string) error { + // Parse CLI arguments. + options, err := cmd.ParseFlags(args) + if err != nil { + return err + } + + // Remove path if "-work" is not set. Otherwise keep path. + if options.Work { + fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) + } else { + defer os.Remove(options.Path) + } + + // Create database. + db, err := bolt.Open(options.Path, 0666, nil) + if err != nil { + return err + } + db.NoSync = options.NoSync + defer db.Close() + + // Write to the database. + var results BenchResults + if err := cmd.runWrites(db, options, &results); err != nil { + return fmt.Errorf("write: %v", err) + } + + // Read from the database. + if err := cmd.runReads(db, options, &results); err != nil { + return fmt.Errorf("bench: read: %s", err) + } + + // Print results. + fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) + fmt.Fprintln(os.Stderr, "") + return nil +} + +// ParseFlags parses the command line flags. +func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { + var options BenchOptions + + // Parse flagset. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") + fs.StringVar(&options.WriteMode, "write-mode", "seq", "") + fs.StringVar(&options.ReadMode, "read-mode", "seq", "") + fs.IntVar(&options.Iterations, "count", 1000, "") + fs.IntVar(&options.BatchSize, "batch-size", 0, "") + fs.IntVar(&options.KeySize, "key-size", 8, "") + fs.IntVar(&options.ValueSize, "value-size", 32, "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.StringVar(&options.BlockProfile, "blockprofile", "", "") + fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") + fs.BoolVar(&options.NoSync, "no-sync", false, "") + fs.BoolVar(&options.Work, "work", false, "") + fs.StringVar(&options.Path, "path", "", "") + fs.SetOutput(cmd.Stderr) + if err := fs.Parse(args); err != nil { + return nil, err + } + + // Set batch size to iteration size if not set. + // Require that batch size can be evenly divided by the iteration count. + if options.BatchSize == 0 { + options.BatchSize = options.Iterations + } else if options.Iterations%options.BatchSize != 0 { + return nil, ErrNonDivisibleBatchSize + } + + // Generate temp path if one is not passed in. + if options.Path == "" { + f, err := ioutil.TempFile("", "bolt-bench-") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + f.Close() + os.Remove(f.Name()) + options.Path = f.Name() + } + + return &options, nil +} + +// Writes to the database. +func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for writes. + if options.ProfileMode == "rw" || options.ProfileMode == "w" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.WriteMode { + case "seq": + err = cmd.runWritesSequential(db, options, results) + case "rnd": + err = cmd.runWritesRandom(db, options, results) + case "seq-nest": + err = cmd.runWritesSequentialNested(db, options, results) + case "rnd-nest": + err = cmd.runWritesRandomNested(db, options, results) + default: + return fmt.Errorf("invalid write mode: %s", options.WriteMode) + } + + // Save time to write. + results.WriteDuration = time.Since(t) + + // Stop profiling for writes only. + if options.ProfileMode == "w" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + key := make([]byte, options.KeySize) + value := make([]byte, options.ValueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + top, err := tx.CreateBucketIfNotExists(benchBucketName) + if err != nil { + return err + } + top.FillPercent = options.FillPercent + + // Create bucket key. + name := make([]byte, options.KeySize) + binary.BigEndian.PutUint32(name, keySource()) + + // Create bucket. + b, err := top.CreateBucketIfNotExists(name) + if err != nil { + return err + } + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + var key = make([]byte, options.KeySize) + var value = make([]byte, options.ValueSize) + + // Generate key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert value into subbucket. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +// Reads from the database. +func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for reads. + if options.ProfileMode == "r" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.ReadMode { + case "seq": + switch options.WriteMode { + case "seq-nest", "rnd-nest": + err = cmd.runReadsSequentialNested(db, options, results) + default: + err = cmd.runReadsSequential(db, options, results) + } + default: + return fmt.Errorf("invalid read mode: %s", options.ReadMode) + } + + // Save read time. + results.ReadDuration = time.Since(t) + + // Stop profiling for reads. + if options.ProfileMode == "rw" || options.ProfileMode == "r" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + + c := tx.Bucket(benchBucketName).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return errors.New("invalid value") + } + count++ + } + + if options.WriteMode == "seq" && count != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + var top = tx.Bucket(benchBucketName) + if err := top.ForEach(func(name, _ []byte) error { + c := top.Bucket(name).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return ErrInvalidValue + } + count++ + } + return nil + }); err != nil { + return err + } + + if options.WriteMode == "seq-nest" && count != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +// File handlers for the various profiles. +var cpuprofile, memprofile, blockprofile *os.File + +// Starts all profiles set on the options. +func (cmd *BenchCommand) startProfiling(options *BenchOptions) { + var err error + + // Start CPU profiling. + if options.CPUProfile != "" { + cpuprofile, err = os.Create(options.CPUProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) + os.Exit(1) + } + pprof.StartCPUProfile(cpuprofile) + } + + // Start memory profiling. + if options.MemProfile != "" { + memprofile, err = os.Create(options.MemProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) + os.Exit(1) + } + runtime.MemProfileRate = 4096 + } + + // Start fatal profiling. + if options.BlockProfile != "" { + blockprofile, err = os.Create(options.BlockProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) + os.Exit(1) + } + runtime.SetBlockProfileRate(1) + } +} + +// Stops all profiles. +func (cmd *BenchCommand) stopProfiling() { + if cpuprofile != nil { + pprof.StopCPUProfile() + cpuprofile.Close() + cpuprofile = nil + } + + if memprofile != nil { + pprof.Lookup("heap").WriteTo(memprofile, 0) + memprofile.Close() + memprofile = nil + } + + if blockprofile != nil { + pprof.Lookup("block").WriteTo(blockprofile, 0) + blockprofile.Close() + blockprofile = nil + runtime.SetBlockProfileRate(0) + } +} + +// BenchOptions represents the set of options that can be passed to "bolt bench". +type BenchOptions struct { + ProfileMode string + WriteMode string + ReadMode string + Iterations int + BatchSize int + KeySize int + ValueSize int + CPUProfile string + MemProfile string + BlockProfile string + StatsInterval time.Duration + FillPercent float64 + NoSync bool + Work bool + Path string +} + +// BenchResults represents the performance results of the benchmark. +type BenchResults struct { + WriteOps int + WriteDuration time.Duration + ReadOps int + ReadDuration time.Duration +} + +// Returns the duration for a single write operation. +func (r *BenchResults) WriteOpDuration() time.Duration { + if r.WriteOps == 0 { + return 0 + } + return r.WriteDuration / time.Duration(r.WriteOps) +} + +// Returns average number of write operations that can be performed per second. +func (r *BenchResults) WriteOpsPerSecond() int { + var op = r.WriteOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +// Returns the duration for a single read operation. +func (r *BenchResults) ReadOpDuration() time.Duration { + if r.ReadOps == 0 { + return 0 + } + return r.ReadDuration / time.Duration(r.ReadOps) +} + +// Returns average number of read operations that can be performed per second. +func (r *BenchResults) ReadOpsPerSecond() int { + var op = r.ReadOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +type PageError struct { + ID int + Err error +} + +func (e *PageError) Error() string { + return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) +} + +// isPrintable returns true if the string is valid unicode and contains only printable runes. +func isPrintable(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, ch := range s { + if !unicode.IsPrint(ch) { + return false + } + } + return true +} + +// ReadPage reads page info & full page data from a path. +// This is not transactionally safe. +func ReadPage(path string, pageID int) (*page, []byte, error) { + // Find page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return nil, nil, fmt.Errorf("read page size: %s", err) + } + + // Open database file. + f, err := os.Open(path) + if err != nil { + return nil, nil, err + } + defer f.Close() + + // Read one block into buffer. + buf := make([]byte, pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + + // Determine total number of blocks. + p := (*page)(unsafe.Pointer(&buf[0])) + overflowN := p.overflow + + // Re-read entire page (with overflow) into buffer. + buf = make([]byte, (int(overflowN)+1)*pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + p = (*page)(unsafe.Pointer(&buf[0])) + + return p, buf, nil +} + +// ReadPageSize reads page size a path. +// This is not transactionally safe. +func ReadPageSize(path string) (int, error) { + // Open database file. + f, err := os.Open(path) + if err != nil { + return 0, err + } + defer f.Close() + + // Read 4KB chunk. + buf := make([]byte, 4096) + if _, err := io.ReadFull(f, buf); err != nil { + return 0, err + } + + // Read page size from metadata. + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + return int(m.pageSize), nil +} + +// atois parses a slice of strings into integers. +func atois(strs []string) ([]int, error) { + var a []int + for _, str := range strs { + i, err := strconv.Atoi(str) + if err != nil { + return nil, err + } + a = append(a, i) + } + return a, nil +} + +// DO NOT EDIT. Copied from the "bolt" package. +const maxAllocSize = 0xFFFFFFF + +// DO NOT EDIT. Copied from the "bolt" package. +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +// DO NOT EDIT. Copied from the "bolt" package. +const bucketLeafFlag = 0x01 + +// DO NOT EDIT. Copied from the "bolt" package. +type pgid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type txid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type bucket struct { + root pgid + sequence uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) Type() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go new file mode 100644 index 00000000..006c5488 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,384 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + return c.keyValue() +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/db.go new file mode 100644 index 00000000..d39c4aa9 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/db.go @@ -0,0 +1,792 @@ +package bolt + +import ( + "fmt" + "hash/fnv" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronzied using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + path string + file *os.File + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, fmt.Errorf("stat error: %s", err) + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + return nil, fmt.Errorf("meta0 error: %s", err) + } + db.pageSize = int(m.pageSize) + } + } + + // Memory map the data file. + if err := db.mmap(0); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. + if err := db.meta0.validate(); err != nil { + return fmt.Errorf("meta0 error: %s", err) + } + if err := db.meta1.validate(); err != nil { + return fmt.Errorf("meta1 error: %s", err) + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 1MB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + db.opened = false + + db.freelist = nil + db.path = "" + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + _ = funlock(db.file) + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be depedent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + if db.meta0.txid > db.meta1.txid { + return db.meta0 + } + return db.meta1 +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + buf := make([]byte, count*db.pageSize) + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } else if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go new file mode 100644 index 00000000..cc937845 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go new file mode 100644 index 00000000..6883786d --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go @@ -0,0 +1,70 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when a data file is not a Bolt-formatted database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go new file mode 100644 index 00000000..0161948f --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,242 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/node.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/node.go new file mode 100644 index 00000000..c9fb21c7 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/node.go @@ -0,0 +1,636 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If target node has extra nodes then just move one over. + if target.numChildren() > target.minKeys() { + if useNextSibling { + // Reparent and move node. + if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + n.inodes = append(n.inodes, target.inodes[0]) + target.inodes = target.inodes[1:] + + // Update target key on parent. + target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0) + target.key = target.inodes[0].key + _assert(len(target.key) > 0, "rebalance(1): zero-length node key") + } else { + // Reparent and move node. + if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[1:], n.inodes) + n.inodes[0] = target.inodes[len(target.inodes)-1] + target.inodes = target.inodes[:len(target.inodes)-1] + } + + // Update parent key for node. + n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0) + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "rebalance(2): zero-length node key") + + return + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/page.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/page.go new file mode 100644 index 00000000..818aa1b1 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/page.go @@ -0,0 +1,172 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/server/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/server/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go new file mode 100644 index 00000000..fe6c287f --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go @@ -0,0 +1,620 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + if err, ok := <-tx.Check(); ok { + panic("check fail: " + err.Error()) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() in +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader directly. + var f *os.File + if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { + // Fallback to a regular open if that doesn't work. + if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { + return 0, err + } + } + + // Copy the meta pages. + tx.db.metalock.Lock() + n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) + tx.db.metalock.Unlock() + if err != nil { + _ = f.Close() + return n, fmt.Errorf("meta copy: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + _ = f.Close() + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Clear out page cache. + tx.pages = make(map[pgid]*page) + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary bufferred page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/README.md b/server/Godeps/_workspace/src/github.com/boombuler/barcode/README.md index d7abc4ee..f97c6e89 100644 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/README.md +++ b/server/Godeps/_workspace/src/github.com/boombuler/barcode/README.md @@ -12,6 +12,6 @@ This is a package for GO which can be used to create different types of barcodes * 2 of 5 ##Documentation## -See [GoDoc](https://godoc.org/github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boombuler/barcode) +See [GoDoc](https://godoc.org/github.com/boombuler/barcode) To create a barcode use the Encode function from one of the subpackages. diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/codabar/encoder_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/codabar/encoder_test.go deleted file mode 100644 index 5f13009e..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/codabar/encoder_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package codabar - -import ( - "image/color" - "testing" -) - -func Test_Encode(t *testing.T) { - _, err := Encode("FOOBAR") - if err == nil { - t.Error("\"FOOBAR\" should not be encodable") - } - - testEncode := func(txt, testResult string) { - code, err := Encode(txt) - if err != nil || code == nil { - t.Fail() - } else { - if code.Bounds().Max.X != len(testResult) { - t.Errorf("%v: length missmatch", txt) - } else { - for i, r := range testResult { - if (code.At(i, 0) == color.Black) != (r == '1') { - t.Errorf("%v: code missmatch on position %d", txt, i) - } - } - } - } - } - - testEncode("A40156B", "10110010010101101001010101001101010110010110101001010010101101010010011") -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/code128/encode_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/code128/encode_test.go deleted file mode 100644 index a0e2a837..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/code128/encode_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package code128 - -import ( - "image/color" - "testing" -) - -func testEncode(t *testing.T, txt, testResult string) { - code, err := Encode(txt) - if err != nil || code == nil { - t.Error(err) - } else { - if code.Bounds().Max.X != len(testResult) { - t.Errorf("%v: length missmatch", txt) - } else { - for i, r := range testResult { - if (code.At(i, 0) == color.Black) != (r == '1') { - t.Errorf("%v: code missmatch on position %d", txt, i) - } - } - } - } -} - -func Test_EncodeFunctionChars(t *testing.T) { - encFNC1 := "11110101110" - encFNC2 := "11110101000" - encFNC3 := "10111100010" - encFNC4 := "10111101110" - encStartB := "11010010000" - encStop := "1100011101011" - - testEncode(t, string(FNC1)+"123", encStartB+encFNC1+"10011100110"+"11001110010"+"11001011100"+"11001000010"+encStop) - testEncode(t, string(FNC2)+"123", encStartB+encFNC2+"10011100110"+"11001110010"+"11001011100"+"11100010110"+encStop) - testEncode(t, string(FNC3)+"123", encStartB+encFNC3+"10011100110"+"11001110010"+"11001011100"+"11101000110"+encStop) - testEncode(t, string(FNC4)+"123", encStartB+encFNC4+"10011100110"+"11001110010"+"11001011100"+"11100011010"+encStop) -} - -func Test_Unencodable(t *testing.T) { - if _, err := Encode(""); err == nil { - t.Fail() - } - if _, err := Encode("ä"); err == nil { - t.Fail() - } -} - -func Test_EncodeCTable(t *testing.T) { - testEncode(t, "HI345678H", "110100100001100010100011000100010101110111101000101100011100010110110000101001011110111011000101000111011000101100011101011") - testEncode(t, "334455", "11010011100101000110001000110111011101000110100100111101100011101011") -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/datamatrix/errorcorrection_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/datamatrix/errorcorrection_test.go deleted file mode 100644 index 78de8f70..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/datamatrix/errorcorrection_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package datamatrix - -import ( - "bytes" - "testing" -) - -func Test_GetPolynomial(t *testing.T) { - var gf_polys map[int][]int = map[int][]int{ - 5: []int{228, 48, 15, 111, 62}, - 7: []int{23, 68, 144, 134, 240, 92, 254}, - 10: []int{28, 24, 185, 166, 223, 248, 116, 255, 110, 61}, - 11: []int{175, 138, 205, 12, 194, 168, 39, 245, 60, 97, 120}, - 12: []int{41, 153, 158, 91, 61, 42, 142, 213, 97, 178, 100, 242}, - 14: []int{156, 97, 192, 252, 95, 9, 157, 119, 138, 45, 18, 186, 83, 185}, - 18: []int{83, 195, 100, 39, 188, 75, 66, 61, 241, 213, 109, 129, 94, 254, 225, 48, 90, 188}, - 20: []int{15, 195, 244, 9, 233, 71, 168, 2, 188, 160, 153, 145, 253, 79, 108, 82, 27, 174, 186, 172}, - 24: []int{52, 190, 88, 205, 109, 39, 176, 21, 155, 197, 251, 223, 155, 21, 5, 172, 254, 124, 12, 181, 184, 96, 50, 193}, - 28: []int{211, 231, 43, 97, 71, 96, 103, 174, 37, 151, 170, 53, 75, 34, 249, 121, 17, 138, 110, 213, 141, 136, 120, 151, 233, 168, 93, 255}, - 36: []int{245, 127, 242, 218, 130, 250, 162, 181, 102, 120, 84, 179, 220, 251, 80, 182, 229, 18, 2, 4, 68, 33, 101, 137, 95, 119, 115, 44, 175, 184, 59, 25, 225, 98, 81, 112}, - 42: []int{77, 193, 137, 31, 19, 38, 22, 153, 247, 105, 122, 2, 245, 133, 242, 8, 175, 95, 100, 9, 167, 105, 214, 111, 57, 121, 21, 1, 253, 57, 54, 101, 248, 202, 69, 50, 150, 177, 226, 5, 9, 5}, - 48: []int{245, 132, 172, 223, 96, 32, 117, 22, 238, 133, 238, 231, 205, 188, 237, 87, 191, 106, 16, 147, 118, 23, 37, 90, 170, 205, 131, 88, 120, 100, 66, 138, 186, 240, 82, 44, 176, 87, 187, 147, 160, 175, 69, 213, 92, 253, 225, 19}, - 56: []int{175, 9, 223, 238, 12, 17, 220, 208, 100, 29, 175, 170, 230, 192, 215, 235, 150, 159, 36, 223, 38, 200, 132, 54, 228, 146, 218, 234, 117, 203, 29, 232, 144, 238, 22, 150, 201, 117, 62, 207, 164, 13, 137, 245, 127, 67, 247, 28, 155, 43, 203, 107, 233, 53, 143, 46}, - 62: []int{242, 93, 169, 50, 144, 210, 39, 118, 202, 188, 201, 189, 143, 108, 196, 37, 185, 112, 134, 230, 245, 63, 197, 190, 250, 106, 185, 221, 175, 64, 114, 71, 161, 44, 147, 6, 27, 218, 51, 63, 87, 10, 40, 130, 188, 17, 163, 31, 176, 170, 4, 107, 232, 7, 94, 166, 224, 124, 86, 47, 11, 204}, - 68: []int{220, 228, 173, 89, 251, 149, 159, 56, 89, 33, 147, 244, 154, 36, 73, 127, 213, 136, 248, 180, 234, 197, 158, 177, 68, 122, 93, 213, 15, 160, 227, 236, 66, 139, 153, 185, 202, 167, 179, 25, 220, 232, 96, 210, 231, 136, 223, 239, 181, 241, 59, 52, 172, 25, 49, 232, 211, 189, 64, 54, 108, 153, 132, 63, 96, 103, 82, 186}, - } - - for i, tst := range gf_polys { - res := ec.getPolynomial(i) - if len(res) != len(tst) { - t.Fail() - } - for i := 0; i < len(res); i++ { - if res[i] != tst[i] { - t.Fail() - } - } - } -} - -func Test_CalcECC(t *testing.T) { - data := []byte{142, 164, 186} - var size *dmCodeSize = nil - for _, s := range codeSizes { - if s.DataCodewords() >= len(data) { - size = s - break - } - } - if size == nil { - t.Error("size not found") - } - - if bytes.Compare(ec.calcECC(data, size), []byte{142, 164, 186, 114, 25, 5, 88, 102}) != 0 { - t.Error("ECC Test 1 failed") - } - data = []byte{66, 129, 70} - if bytes.Compare(ec.calcECC(data, size), []byte{66, 129, 70, 138, 234, 82, 82, 95}) != 0 { - t.Error("ECC Test 2 failed") - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/ean/encoder_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/ean/encoder_test.go deleted file mode 100644 index 6f6da3f3..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/ean/encoder_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package ean - -import ( - "image/color" - "testing" -) - -func testHelper(t *testing.T, testCode, testResult, kind string, checkMetadata bool) { - code, err := Encode(testCode) - if err != nil { - t.Error(err) - } - if checkMetadata && (code.Metadata().Dimensions != 1 || code.Content() != testCode || code.Metadata().CodeKind != kind) { - t.Error("Metadata missmatch") - } - if len(testResult) != code.Bounds().Max.X { - t.Fail() - } - for i, r := range testResult { - if (code.At(i, 0) == color.Black) != (r == '1') { - t.Fail() - } - } -} - -func Test_EncodeEAN(t *testing.T) { - testHelper(t, "5901234123457", "10100010110100111011001100100110111101001110101010110011011011001000010101110010011101000100101", "EAN 13", true) - testHelper(t, "55123457", "1010110001011000100110010010011010101000010101110010011101000100101", "EAN 8", true) - testHelper(t, "5512345", "1010110001011000100110010010011010101000010101110010011101000100101", "EAN 8", false) - _, err := Encode("55123458") //<-- Invalid checksum - if err == nil { - t.Error("Invalid checksum not detected") - } - _, err = Encode("invalid") - if err == nil { - t.Error("\"invalid\" should not be encodable") - } - _, err = Encode("invalid") - if err == nil { - t.Error("\"invalid\" should not be encodable") - } - bits := encodeEAN13("invalid error") - if bits != nil { - t.Error("\"invalid error\" should not be encodable") - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/alphanumeric_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/alphanumeric_test.go deleted file mode 100644 index d8b1d38f..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/alphanumeric_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package qr - -import ( - "bytes" - "testing" -) - -func makeString(length int, content string) string { - res := "" - - for i := 0; i < length; i++ { - res += content - } - - return res -} - -func Test_AlphaNumericEncoding(t *testing.T) { - encode := AlphaNumeric.getEncoder() - - x, vi, err := encode("HELLO WORLD", M) - - if x == nil || vi == nil || vi.Version != 1 || bytes.Compare(x.GetBytes(), []byte{32, 91, 11, 120, 209, 114, 220, 77, 67, 64, 236, 17, 236, 17, 236, 17}) != 0 { - t.Errorf("\"HELLO WORLD\" failed to encode: %s", err) - } - - x, vi, err = encode(makeString(4296, "A"), L) - if x == nil || vi == nil || err != nil { - t.Fail() - } - x, vi, err = encode(makeString(4297, "A"), L) - if x != nil || vi != nil || err == nil { - t.Fail() - } - x, vi, err = encode("ABc", L) - if x != nil || vi != nil || err == nil { - t.Fail() - } - x, vi, err = encode("hello world", M) - - if x != nil || vi != nil || err == nil { - t.Error("\"hello world\" should not be encodable in alphanumeric mode") - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/automatic_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/automatic_test.go deleted file mode 100644 index 07587d42..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/automatic_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package qr - -import ( - "bytes" - "testing" -) - -func Test_AutomaticEncoding(t *testing.T) { - tests := map[string]encodeFn{ - "0123456789": Numeric.getEncoder(), - "ALPHA NUMERIC": AlphaNumeric.getEncoder(), - "unicode encoing": Unicode.getEncoder(), - "very long unicode encoding" + makeString(3000, "A"): nil, - } - - for str, enc := range tests { - testValue, _, _ := Auto.getEncoder()(str, M) - if enc != nil { - correctValue, _, _ := enc(str, M) - if testValue == nil || bytes.Compare(correctValue.GetBytes(), testValue.GetBytes()) != 0 { - t.Errorf("wrong encoding used for '%s'", str) - } - } else { - if testValue != nil { - t.Errorf("wrong encoding used for '%s'", str) - } - } - - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/blocks_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/blocks_test.go deleted file mode 100644 index 656fba05..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/blocks_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package qr - -import ( - "bytes" - "testing" -) - -func Test_Blocks(t *testing.T) { - byteIt := make(chan byte) - go func() { - for _, b := range []byte{67, 85, 70, 134, 87, 38, 85, 194, 119, 50, 6, 18, 6, 103, 38, 246, 246, 66, 7, 118, 134, 242, 7, 38, 86, 22, 198, 199, 146, 6, 182, 230, 247, 119, 50, 7, 118, 134, 87, 38, 82, 6, 134, 151, 50, 7, 70, 247, 118, 86, 194, 6, 151, 50, 16, 236, 17, 236, 17, 236, 17, 236} { - byteIt <- b - } - close(byteIt) - }() - vi := &versionInfo{5, Q, 18, 2, 15, 2, 16} - - data := splitToBlocks(byteIt, vi).interleave(vi) - if bytes.Compare(data, []byte{67, 246, 182, 70, 85, 246, 230, 247, 70, 66, 247, 118, 134, 7, 119, 86, 87, 118, 50, 194, 38, 134, 7, 6, 85, 242, 118, 151, 194, 7, 134, 50, 119, 38, 87, 16, 50, 86, 38, 236, 6, 22, 82, 17, 18, 198, 6, 236, 6, 199, 134, 17, 103, 146, 151, 236, 38, 6, 50, 17, 7, 236, 213, 87, 148, 235, 199, 204, 116, 159, 11, 96, 177, 5, 45, 60, 212, 173, 115, 202, 76, 24, 247, 182, 133, 147, 241, 124, 75, 59, 223, 157, 242, 33, 229, 200, 238, 106, 248, 134, 76, 40, 154, 27, 195, 255, 117, 129, 230, 172, 154, 209, 189, 82, 111, 17, 10, 2, 86, 163, 108, 131, 161, 163, 240, 32, 111, 120, 192, 178, 39, 133, 141, 236}) != 0 { - t.Fail() - } - - byteIt2 := make(chan byte) - go func() { - for _, b := range []byte{67, 85, 70, 134, 87, 38, 85, 194, 119, 50, 6, 18, 6, 103, 38, 246, 246, 66, 7, 118, 134, 242, 7, 38, 86, 22, 198, 199, 146, 6, 182, 230, 247, 119, 50, 7, 118, 134, 87, 38, 82, 6, 134, 151, 50, 7, 70, 247, 118, 86, 194, 6, 151, 50, 16, 236, 17, 236, 17, 236, 17, 236} { - byteIt2 <- b - } - close(byteIt2) - }() - vi = &versionInfo{5, Q, 18, 2, 16, 2, 15} - - data = splitToBlocks(byteIt2, vi).interleave(vi) - if bytes.Compare(data, []byte{67, 246, 247, 247, 85, 66, 119, 118, 70, 7, 50, 86, 134, 118, 7, 194, 87, 134, 118, 6, 38, 242, 134, 151, 85, 7, 87, 50, 194, 38, 38, 16, 119, 86, 82, 236, 50, 22, 6, 17, 6, 198, 134, 236, 18, 199, 151, 17, 6, 146, 50, 236, 103, 6, 7, 17, 38, 182, 70, 236, 246, 230, 71, 101, 27, 62, 13, 91, 166, 86, 138, 16, 78, 229, 102, 11, 199, 107, 2, 182, 132, 103, 89, 66, 136, 69, 78, 255, 116, 129, 126, 163, 219, 234, 158, 216, 42, 234, 97, 62, 186, 59, 123, 148, 220, 191, 254, 145, 82, 95, 129, 79, 236, 254, 30, 174, 228, 50, 181, 110, 150, 205, 34, 235, 242, 0, 115, 147, 58, 243, 28, 140, 221, 219}) != 0 { - t.Fail() - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/encoder_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/encoder_test.go deleted file mode 100644 index 10479b6f..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/encoder_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package qr - -import ( - "fmt" - "image/png" - "os" - "testing" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boombuler/barcode" -) - -type test struct { - Text string - Mode Encoding - ECL ErrorCorrectionLevel - Result string -} - -var tests = []test{ - test{ - Text: "hello world", - Mode: Unicode, - ECL: H, - Result: ` -+++++++.+.+.+...+.+++++++ -+.....+.++...+++..+.....+ -+.+++.+.+.+.++.++.+.+++.+ -+.+++.+....++.++..+.+++.+ -+.+++.+..+...++.+.+.+++.+ -+.....+.+..+..+++.+.....+ -+++++++.+.+.+.+.+.+++++++ -........++..+..+......... -..+++.+.+++.+.++++++..+++ -+++..+..+...++.+...+..+.. -+...+.++++....++.+..++.++ -++.+.+.++...+...+.+....++ -..+..+++.+.+++++.++++++++ -+.+++...+..++..++..+..+.. -+.....+..+.+.....+++++.++ -+.+++.....+...+.+.+++...+ -+.+..+++...++.+.+++++++.. -........+....++.+...+.+.. -+++++++......++++.+.+.+++ -+.....+....+...++...++.+. -+.+++.+.+.+...+++++++++.. -+.+++.+.++...++...+.++..+ -+.+++.+.++.+++++..++.+..+ -+.....+..+++..++.+.++...+ -+++++++....+..+.+..+..+++`, - }, -} - -func Test_GetUnknownEncoder(t *testing.T) { - if unknownEncoding.getEncoder() != nil { - t.Fail() - } -} - -func Test_EncodingStringer(t *testing.T) { - tests := map[Encoding]string{ - Auto: "Auto", - Numeric: "Numeric", - AlphaNumeric: "AlphaNumeric", - Unicode: "Unicode", - unknownEncoding: "", - } - - for enc, str := range tests { - if enc.String() != str { - t.Fail() - } - } -} - -func Test_InvalidEncoding(t *testing.T) { - _, err := Encode("hello world", H, Numeric) - if err == nil { - t.Fail() - } -} - -func imgStrToBools(str string) []bool { - res := make([]bool, 0, len(str)) - for _, r := range str { - if r == '+' { - res = append(res, true) - } else if r == '.' { - res = append(res, false) - } - } - return res -} - -func Test_Encode(t *testing.T) { - for _, tst := range tests { - res, err := Encode(tst.Text, tst.ECL, tst.Mode) - if err != nil { - t.Error(err) - } - qrCode, ok := res.(*qrcode) - if !ok { - t.Fail() - } - testRes := imgStrToBools(tst.Result) - if (qrCode.dimension * qrCode.dimension) != len(testRes) { - t.Fail() - } - t.Logf("dim %d", qrCode.dimension) - for i := 0; i < len(testRes); i++ { - x := i % qrCode.dimension - y := i / qrCode.dimension - if qrCode.Get(x, y) != testRes[i] { - t.Errorf("Failed at index %d", i) - } - } - } -} - -func ExampleEncode() { - f, _ := os.Create("qrcode.png") - defer f.Close() - - qrcode, err := Encode("hello world", L, Auto) - if err != nil { - fmt.Println(err) - } else { - qrcode, err = barcode.Scale(qrcode, 100, 100) - if err != nil { - fmt.Println(err) - } else { - png.Encode(f, qrcode) - } - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/errorcorrection_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/errorcorrection_test.go deleted file mode 100644 index 56ed3a14..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/errorcorrection_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package qr - -import ( - "bytes" - "testing" -) - -func Test_LogTables(t *testing.T) { - for i := 1; i <= 255; i++ { - tmp := ec.fld.LogTbl[i] - if i != ec.fld.ALogTbl[tmp] { - t.Errorf("Invalid LogTables: %d", i) - } - } - - if ec.fld.ALogTbl[11] != 232 || ec.fld.ALogTbl[87] != 127 || ec.fld.ALogTbl[225] != 36 { - t.Fail() - } -} - -func Test_ErrorCorrection(t *testing.T) { - doTest := func(b []byte, ecc []byte) { - cnt := byte(len(ecc)) - res := ec.calcECC(b, cnt) - if bytes.Compare(res, ecc) != 0 { - t.Errorf("ECC error!\nGot: %v\nExpected:%v", res, ecc) - } - } - // Issue #5 - doTest([]byte{66, 196, 148, 21, 99, 19, 151, 151, 53, 149, 54, 195, 4, 133, 87, 84, 115, 85, 22, 148, 52, 71, 102, 68, 134, 182, 247, 119, 22, 68, 117, 134, 35, 4, 134, 38, 21, 84, 21, 117, 87, 164, 135, 115, 211, 208, 236, 17, 236, 17, 236, 17, 236, 17, 236}, []byte{187, 187, 171, 253, 164, 129, 104, 133, 3, 75, 87, 98, 241, 146, 138}) - - // Other tests - doTest([]byte{17, 168, 162, 241, 255, 205, 240, 179, 88, 101, 71, 130, 2, 54, 147, 111, 232, 58, 202, 171, 85, 22, 229, 187}, []byte{30, 142, 171, 131, 189}) - doTest([]byte{36, 153, 55, 100, 228, 252, 0, 35, 85, 7, 237, 117, 182, 73, 83, 244, 8, 64, 55, 252, 200, 250, 72, 92, 97, 125, 96}, []byte{129, 124, 218, 148, 49, 108, 68, 255, 58, 212, 56, 60, 142, 45, 216, 124, 253, 214, 206, 208, 145, 169, 43}) - doTest([]byte{250, 195, 230, 128, 31, 168, 86, 123, 244, 129, 74, 130, 222, 225, 140, 129, 114, 132, 128, 88, 96, 13, 165, 132, 116, 22, 42, 81, 219, 3, 102, 156, 69, 70, 90, 68, 7, 245, 150, 160, 252, 121, 20}, []byte{124, 23, 233, 71, 200, 211, 54, 141, 10, 23, 206, 147, 116, 35, 45, 218, 158, 193, 80, 194, 129, 147, 8, 78, 229, 112, 89, 161, 167, 203, 11, 245, 186, 187, 17, 7, 175}) - doTest([]byte{121, 234, 24, 188, 218, 238, 248, 223, 98, 124, 237, 30, 98, 12, 9, 126, 5, 160, 240, 27, 174, 60, 152, 134, 71, 122, 125, 238, 223, 91, 231, 248, 230, 152, 250, 44, 17, 149, 0, 20, 109, 188, 227, 202}, []byte{209, 71, 225, 216, 240, 127, 111, 98, 194, 133, 114, 63, 35, 167, 184, 4, 209, 211, 40, 14, 74, 37, 21, 76, 95, 206, 90, 152, 110, 64, 6, 92, 80, 255, 127, 35, 111, 25, 1, 73}) - doTest([]byte{165, 233, 141, 34, 247, 216, 35, 163, 61, 61, 81, 146, 116, 96, 113, 10, 0, 6, 148, 244, 55, 201, 17, 220, 109, 111}, []byte{93, 173, 231, 160}) - doTest([]byte{173, 242, 89, 205, 24, 33, 213, 147, 96, 189, 100, 15, 213, 67, 91, 189, 218, 127, 32, 160, 162, 99, 187, 221, 53, 121, 238, 219, 215, 176, 181, 135, 56, 71, 246, 74, 228}, []byte{194, 130, 43, 168, 223, 144, 223, 49, 5, 162, 62, 218, 50, 205, 249, 84, 188, 25, 109, 110, 49, 224, 194, 244, 83, 221, 236, 71, 197, 159, 182}) - doTest([]byte{82, 138, 221, 169, 67, 161, 132, 31, 243, 110, 83, 1, 238, 79, 255, 57, 74, 54, 123, 151, 159, 50, 250, 188, 176, 8, 221, 215, 141, 77, 16}, []byte{197, 122, 225, 65, 40, 69, 153, 100, 73, 245, 150, 213, 104, 127, 3}) - doTest([]byte{5, 206, 21, 196, 185, 120, 60, 177, 90, 251, 109, 131, 174, 199, 55, 56, 14, 171, 19, 104, 236, 218, 31, 144, 33, 249, 58, 195, 173, 145, 166, 93, 122, 171, 232, 128, 233, 116, 144, 189, 62, 230, 68, 55, 140, 56, 1, 65, 165, 158, 127}, []byte{73, 141, 230, 252, 225, 173, 251, 194, 150, 98, 141, 241, 246, 11, 16, 8, 42}) - doTest([]byte{112, 106, 43, 174, 133, 163, 192, 61, 121, 3, 200, 84, 15, 9, 3, 222, 183, 78, 153, 26, 85, 41, 5, 149, 232, 3, 233, 247, 249, 29, 15, 18, 4, 96, 9, 64, 188, 210}, []byte{16, 254, 143, 110, 63, 167, 213, 242, 95, 78, 215, 145, 231, 59, 158, 36, 149, 247, 123, 114, 247, 202, 15, 56, 229, 163, 186, 73, 82, 230, 111, 108, 111, 182, 193, 46, 116}) - doTest([]byte{208, 128, 197, 227, 124, 226, 125, 46, 253, 98, 238, 80, 229, 134, 167, 70, 101, 150, 198, 130, 185, 200, 68, 91}, []byte{229, 167, 187, 39, 92, 90, 210, 25, 206, 237, 90, 194, 206, 39, 2, 11, 78, 48, 247}) - doTest([]byte{79, 175, 255, 194, 34, 229, 234, 200, 74, 213, 100, 33, 24, 5, 133, 186, 249, 151, 46, 190, 44, 126, 184, 195, 219, 37, 11, 225, 23, 8, 59, 106, 239, 198, 146, 205, 47, 59, 63, 9, 102, 29, 60, 209, 226, 67, 126, 193, 252, 255, 206, 172, 44, 53, 137, 209, 246}, []byte{237, 8, 12, 44, 90, 243, 24, 100, 123, 216, 185, 91, 182, 60, 9, 145, 126, 254, 139, 24, 211, 150, 219, 28, 138, 197, 13, 109, 227, 31, 60, 128, 237, 181, 183, 2, 138, 232, 112, 5}) - doTest([]byte{253, 217, 8, 176, 66, 153, 249, 49, 82, 114, 184, 139, 190, 87}, []byte{28, 55, 193, 193, 179, 246, 222, 5, 95, 96, 13, 242}) - doTest([]byte{15, 65, 231, 224, 151, 167, 74, 228, 23}, []byte{200, 90, 82}) - doTest([]byte{61, 186, 61, 193, 215, 243, 84, 66, 48, 93, 108, 249, 55, 232}, []byte{0, 180, 53, 152, 134, 252, 165, 168}) - doTest([]byte{78, 68, 116, 15, 85}, []byte{36}) - doTest([]byte{122, 143}, []byte{245}) - doTest([]byte{78, 85, 143, 35}, []byte{226, 85}) - doTest([]byte{11, 188, 118, 21, 177, 224, 151, 105, 21, 245, 251, 162, 72, 175, 248, 134, 123, 251, 160, 163, 42, 57, 53, 222, 195, 49, 199, 151, 5, 236, 160, 57, 212, 241, 44, 43}, []byte{186, 106}) - doTest([]byte{157, 99, 220, 166, 63, 18, 225, 215, 71, 95, 99, 200, 218, 147, 131, 245, 222, 209, 135, 152, 82, 128, 24, 0, 100, 40, 84, 193, 205, 86, 130, 204, 235, 100, 94, 61}, []byte{41, 171, 66, 233}) - doTest([]byte{249, 34, 253, 235, 233, 104, 52, 60, 17, 13, 182, 223, 19, 91, 164, 2, 196, 29, 74, 219, 65, 23, 190, 31, 10, 241, 221, 150, 221, 118, 53, 69, 45, 90, 215, 100, 155, 102, 150, 176, 203, 39, 22, 70, 10, 238}, []byte{161, 49, 179, 149, 178, 146, 208, 144, 19, 158, 180, 152, 243, 138, 143, 243, 82, 112, 229, 10, 113, 255, 139, 246}) - doTest([]byte{39, 232, 159, 64, 242, 235, 66, 226, 100, 221, 225, 247, 139, 157, 95, 155}, []byte{41, 9, 244}) - doTest([]byte{177, 185, 131, 64, 103, 93, 134, 153, 15, 26, 0, 119, 21, 27, 174, 181, 111, 245, 214, 244, 83, 66, 24, 244, 255, 189, 133, 158, 37, 46, 199, 123, 110, 153, 61, 137, 163, 231, 129, 65, 186, 89, 219, 39, 226, 236, 199, 197, 73, 213}, []byte{37, 59, 125, 211, 249, 177, 107, 79, 107, 47, 242, 168, 49, 38, 168, 198, 199, 91, 212, 22, 107, 244}) - doTest([]byte{196, 226, 29, 110, 161, 143, 64, 169, 216, 231, 115}, []byte{253, 93, 218, 129, 37}) - doTest([]byte{133, 8, 124, 221, 36, 17, 135, 115, 149, 58, 250, 103, 241, 18, 19, 246, 191, 85, 80, 255, 93, 182, 140, 123, 206, 232, 20, 166, 216, 105, 210, 229, 249, 212, 93, 227, 75, 231, 36, 195, 166, 246, 47, 168, 35, 7, 176, 124, 44, 179, 24, 145}, []byte{78, 57, 134, 181, 215, 149, 111, 51, 172, 58, 114, 3, 140, 186, 126, 40, 190}) - doTest([]byte{245, 206, 124, 0, 15, 59, 253, 225, 155}, []byte{65, 14, 188, 213, 18, 113, 161, 16}) - doTest([]byte{20, 109, 28, 180, 48, 170, 216, 48, 140, 89, 103}, []byte{193, 147, 50, 209, 160}) - doTest([]byte{87, 198, 56, 151, 121, 37, 81, 64, 193, 24, 222, 142, 102, 74, 216, 233, 198, 197, 90, 4, 65, 14, 154, 147, 200, 252, 8, 64, 97, 150, 136, 141}, []byte{231, 190, 32, 90, 100, 40, 41, 103, 200, 200, 243, 75, 177, 7, 93, 28, 83, 47, 188, 236, 20, 95, 69, 104, 155, 102, 110, 197}) - doTest([]byte{168, 72, 2, 101, 103, 118, 218, 38, 82, 85, 62, 37, 201, 96, 255, 71, 198}, []byte{129, 33, 28, 228, 195, 120, 101, 46, 119, 126}) - doTest([]byte{130, 162, 73, 44, 165, 207, 124, 28, 17, 223, 43, 143, 81, 70, 205, 161, 143, 230, 97, 94, 228, 41, 26, 187, 69, 85, 162, 51, 168, 64, 26, 207, 245, 128}, []byte{6, 171}) - doTest([]byte{95, 28, 93, 149, 234, 89, 201, 71, 39, 197, 236, 223, 251, 190, 112, 96, 101, 53, 40, 88, 136, 141, 230, 80, 45, 73, 116, 208, 197, 91, 154, 209, 128, 214, 66, 114, 137, 204, 115, 139, 96, 211, 148, 127, 104, 194}, []byte{10, 102, 57, 95, 61, 212, 130, 71, 74, 58, 82, 115, 238, 213, 251, 184, 203, 250, 55, 186, 37, 16, 71, 247, 146, 194, 74, 208, 221, 6, 81, 172, 204, 73, 102, 40, 247, 174, 213, 37, 225, 246, 8, 58}) - doTest([]byte{207, 185, 106, 191, 87, 109, 110, 210, 54, 12, 103, 161, 228}, []byte{214, 138, 159, 195, 154, 236, 33, 243, 53, 79, 227}) - doTest([]byte{203, 43, 26, 94, 37, 123, 254, 215, 153, 193, 157, 248, 180, 249, 103, 232, 107, 17, 138, 0, 11, 240, 218, 122, 19, 103, 112, 60, 125, 100, 209, 166, 103, 81, 200, 84, 77, 100, 18, 110, 209, 225, 209, 254, 185, 116, 186, 216, 206, 36, 252, 144, 90, 247, 117, 219, 81, 160}, []byte{185, 176, 106, 253, 76, 153, 185, 211, 187, 153, 210, 31, 99, 4, 46, 145, 221, 99, 236, 19, 126, 138, 66, 26, 40, 217, 170, 217, 147}) - doTest([]byte{11, 193, 90, 52, 239, 247, 144, 99, 48, 19, 154, 6, 255, 28, 47, 41, 30, 220}, []byte{235, 165, 125, 82, 28, 116, 21, 133, 243, 222, 241, 20, 134}) - doTest([]byte{173, 151, 109, 88, 104, 65, 76, 111, 219, 237, 2, 173, 25, 84, 98, 16, 135, 157, 14, 194, 228, 86, 167, 187, 137, 245, 144, 61, 200, 76, 188, 117, 223, 172, 16, 116, 84, 1, 203, 173, 170, 32, 135, 67, 16}, []byte{150, 31, 11, 211, 82, 221, 251, 84, 254, 121, 68, 34, 211, 142, 197, 246, 138, 204, 60, 197, 210, 238, 142, 234, 187, 200, 179, 228}) - doTest([]byte{171, 185, 30, 162, 129, 205, 254, 186, 86, 239, 178, 206, 115, 177, 14, 166, 143, 48, 141, 205, 109, 67, 238, 187, 134, 210, 96, 23, 195, 206, 100, 171, 156, 8, 229, 131, 169, 169, 59, 167, 224, 241, 185, 132, 162, 50, 87, 252, 156, 122, 248, 19, 130, 31, 127}, []byte{62, 42, 216, 109, 23, 176, 255, 137, 139, 90, 7, 186, 175, 243, 160, 206, 37, 94, 157, 217, 11, 169, 126, 41, 73, 133, 212, 232, 249, 117, 70, 147, 137, 156, 43, 243, 234, 155, 94, 38, 59, 211, 218, 165, 3, 33, 231, 237, 92, 16, 128}) - doTest([]byte{98, 28, 174, 108, 231, 247, 135, 139, 6, 50, 107, 203, 138, 252, 229, 245, 230, 236, 124, 138, 105, 25, 83, 122}, []byte{97, 214, 25, 2, 14, 48, 65, 212, 241, 200, 81, 57, 176, 59, 16, 55, 20, 91, 66}) - doTest([]byte{73, 214, 80, 41, 125, 136, 126, 184, 70, 141, 140, 58, 249, 250, 49, 249, 155, 0, 236, 49, 17, 125, 18, 29}, []byte{128, 16, 47, 235, 125, 128, 97, 245, 177, 210, 219, 195}) - doTest([]byte{3, 220, 98, 73, 200, 52, 8, 107, 173, 177, 58, 221, 180, 226, 76, 210, 182, 88, 104, 171, 243, 129, 88, 112, 126, 83, 141, 50, 106, 204, 195, 51, 141, 75, 132, 161}, []byte{110, 178, 213, 174, 1, 241, 95}) - doTest([]byte{196, 88, 50, 142, 76, 128, 190, 189, 76, 9, 228, 62, 198, 186, 180, 240, 62, 130, 132, 242}, []byte{244, 89, 17, 143, 3, 180, 150, 242, 167, 214, 209, 133, 120, 213, 173, 59, 25, 158, 251}) - doTest([]byte{166, 214, 1, 225, 237, 7, 80, 104, 94, 170, 125, 184, 148, 16, 121, 101, 52, 216, 177, 192, 6, 132, 77, 44, 5, 9, 126, 156, 12, 2, 29, 99, 51, 78, 177, 92, 140, 107, 146, 183, 109, 227, 171, 57, 193, 14, 37}, []byte{245, 46, 189, 11, 202, 195, 89, 53, 215, 172, 132, 196, 145, 141, 239, 160, 242, 7, 85, 251, 193, 85}) -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/numeric_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/numeric_test.go deleted file mode 100644 index 21aa9e34..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/numeric_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package qr - -import ( - "bytes" - "testing" -) - -func Test_NumericEncoding(t *testing.T) { - encode := Numeric.getEncoder() - x, vi, err := encode("01234567", H) - if x == nil || vi == nil || vi.Version != 1 || bytes.Compare(x.GetBytes(), []byte{16, 32, 12, 86, 97, 128, 236, 17, 236}) != 0 { - t.Error("\"01234567\" failed to encode") - } - x, vi, err = encode("0123456789012345", H) - if x == nil || vi == nil || vi.Version != 1 || bytes.Compare(x.GetBytes(), []byte{16, 64, 12, 86, 106, 110, 20, 234, 80}) != 0 { - t.Error("\"0123456789012345\" failed to encode") - } - x, vi, err = encode("foo", H) - if err == nil { - t.Error("Numeric encoding should not be able to encode \"foo\"") - } - x, vi, err = encode(makeString(14297, "1"), H) - if x != nil || vi != nil || err == nil { - t.Fail() - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/qrcode_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/qrcode_test.go deleted file mode 100644 index 8cc17e3e..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/qrcode_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package qr - -import ( - "image/color" - "testing" -) - -func Test_NewQRCode(t *testing.T) { - bc := newBarcode(2) - if bc == nil { - t.Fail() - } - if bc.data.Len() != 4 { - t.Fail() - } - if bc.dimension != 2 { - t.Fail() - } -} - -func Test_QRBasics(t *testing.T) { - qr := newBarcode(10) - if qr.ColorModel() != color.Gray16Model { - t.Fail() - } - code, _ := Encode("test", L, Unicode) - if code.Content() != "test" { - t.Fail() - } - if code.Metadata().Dimensions != 2 { - t.Fail() - } - bounds := code.Bounds() - if bounds.Min.X != 0 || bounds.Min.Y != 0 || bounds.Max.X != 21 || bounds.Max.Y != 21 { - t.Fail() - } - if code.At(0, 0) != color.Black || code.At(0, 7) != color.White { - t.Fail() - } - qr = code.(*qrcode) - if !qr.Get(0, 0) || qr.Get(0, 7) { - t.Fail() - } - sum := qr.calcPenaltyRule1() + qr.calcPenaltyRule2() + qr.calcPenaltyRule3() + qr.calcPenaltyRule4() - if qr.calcPenalty() != sum { - t.Fail() - } -} - -func Test_Penalty1(t *testing.T) { - qr := newBarcode(7) - if qr.calcPenaltyRule1() != 70 { - t.Fail() - } - qr.Set(0, 0, true) - if qr.calcPenaltyRule1() != 68 { - t.Fail() - } - qr.Set(0, 6, true) - if qr.calcPenaltyRule1() != 66 { - t.Fail() - } -} - -func Test_Penalty2(t *testing.T) { - qr := newBarcode(3) - if qr.calcPenaltyRule2() != 12 { - t.Fail() - } - qr.Set(0, 0, true) - qr.Set(1, 1, true) - qr.Set(2, 0, true) - if qr.calcPenaltyRule2() != 0 { - t.Fail() - } - qr.Set(1, 1, false) - if qr.calcPenaltyRule2() != 6 { - t.Fail() - } -} - -func Test_Penalty3(t *testing.T) { - runTest := func(content string, result uint) { - code, _ := Encode(content, L, AlphaNumeric) - qr := code.(*qrcode) - if qr.calcPenaltyRule3() != result { - t.Errorf("Failed Penalty Rule 3 for content \"%s\" got %d but expected %d", content, qr.calcPenaltyRule3(), result) - } - } - runTest("A", 80) - runTest("FOO", 40) - runTest("0815", 0) -} - -func Test_Penalty4(t *testing.T) { - qr := newBarcode(3) - if qr.calcPenaltyRule4() != 100 { - t.Fail() - } - qr.Set(0, 0, true) - if qr.calcPenaltyRule4() != 70 { - t.Fail() - } - qr.Set(0, 1, true) - if qr.calcPenaltyRule4() != 50 { - t.Fail() - } - qr.Set(0, 2, true) - if qr.calcPenaltyRule4() != 30 { - t.Fail() - } - qr.Set(1, 0, true) - if qr.calcPenaltyRule4() != 10 { - t.Fail() - } - qr.Set(1, 1, true) - if qr.calcPenaltyRule4() != 10 { - t.Fail() - } - qr = newBarcode(2) - qr.Set(0, 0, true) - qr.Set(1, 0, true) - if qr.calcPenaltyRule4() != 0 { - t.Fail() - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/unicode_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/unicode_test.go deleted file mode 100644 index 76e5fbfe..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/unicode_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package qr - -import ( - "bytes" - "testing" -) - -func Test_UnicodeEncoding(t *testing.T) { - encode := Unicode.getEncoder() - x, vi, err := encode("A", H) // 65 - if x == nil || vi == nil || vi.Version != 1 || bytes.Compare(x.GetBytes(), []byte{64, 20, 16, 236, 17, 236, 17, 236, 17}) != 0 { - t.Errorf("\"A\" failed to encode: %s", err) - } - _, _, err = encode(makeString(3000, "A"), H) - if err == nil { - t.Error("Unicode encoding should not be able to encode a 3kb string") - } -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/versioninfo_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/versioninfo_test.go deleted file mode 100644 index f41aa37e..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr/versioninfo_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package qr - -import "testing" - -var testvi = &versionInfo{7, M, 0, 1, 10, 2, 5} // Fake versionInfo to run some of the tests - -func Test_ErrorCorrectionStringer(t *testing.T) { - tests := map[ErrorCorrectionLevel]string{ - L: "L", M: "M", Q: "Q", H: "H", ErrorCorrectionLevel(99): "unknown", - } - for ecl, str := range tests { - if ecl.String() != str { - t.Fail() - } - } -} - -func Test_CharCountBits(t *testing.T) { - v1 := &versionInfo{5, M, 0, 0, 0, 0, 0} - v2 := &versionInfo{15, M, 0, 0, 0, 0, 0} - v3 := &versionInfo{30, M, 0, 0, 0, 0, 0} - - if v1.charCountBits(numericMode) != 10 { - t.Fail() - } - if v1.charCountBits(alphaNumericMode) != 9 { - t.Fail() - } - if v1.charCountBits(byteMode) != 8 { - t.Fail() - } - if v1.charCountBits(kanjiMode) != 8 { - t.Fail() - } - if v2.charCountBits(numericMode) != 12 { - t.Fail() - } - if v2.charCountBits(alphaNumericMode) != 11 { - t.Fail() - } - if v2.charCountBits(byteMode) != 16 { - t.Fail() - } - if v2.charCountBits(kanjiMode) != 10 { - t.Fail() - } - if v3.charCountBits(numericMode) != 14 { - t.Fail() - } - if v3.charCountBits(alphaNumericMode) != 13 { - t.Fail() - } - if v3.charCountBits(byteMode) != 16 { - t.Fail() - } - if v3.charCountBits(kanjiMode) != 12 { - t.Fail() - } - if v1.charCountBits(encodingMode(3)) != 0 { - t.Fail() - } -} - -func Test_TotalDataBytes(t *testing.T) { - if testvi.totalDataBytes() != 20 { - t.Fail() - } -} - -func Test_ModulWidth(t *testing.T) { - if testvi.modulWidth() != 45 { - t.Fail() - } -} - -func Test_FindSmallestVersionInfo(t *testing.T) { - if findSmallestVersionInfo(H, alphaNumericMode, 10208) != nil { - t.Error("there should be no version with this capacity") - } - test := func(cap int, tVersion byte) { - v := findSmallestVersionInfo(H, alphaNumericMode, cap) - if v == nil || v.Version != tVersion { - t.Errorf("version %d should be returned.", tVersion) - } - } - test(10191, 40) - test(5591, 29) - test(5592, 30) - test(190, 3) - test(200, 4) -} - -type aligmnentTest struct { - version byte - patterns []int -} - -var allAligmnentTests = []*aligmnentTest{ - &aligmnentTest{1, []int{}}, - &aligmnentTest{2, []int{6, 18}}, - &aligmnentTest{3, []int{6, 22}}, - &aligmnentTest{4, []int{6, 26}}, - &aligmnentTest{5, []int{6, 30}}, - &aligmnentTest{6, []int{6, 34}}, - &aligmnentTest{7, []int{6, 22, 38}}, - &aligmnentTest{8, []int{6, 24, 42}}, - &aligmnentTest{9, []int{6, 26, 46}}, - &aligmnentTest{10, []int{6, 28, 50}}, - &aligmnentTest{11, []int{6, 30, 54}}, - &aligmnentTest{12, []int{6, 32, 58}}, - &aligmnentTest{13, []int{6, 34, 62}}, - &aligmnentTest{14, []int{6, 26, 46, 66}}, - &aligmnentTest{15, []int{6, 26, 48, 70}}, - &aligmnentTest{16, []int{6, 26, 50, 74}}, - &aligmnentTest{17, []int{6, 30, 54, 78}}, - &aligmnentTest{18, []int{6, 30, 56, 82}}, - &aligmnentTest{19, []int{6, 30, 58, 86}}, - &aligmnentTest{20, []int{6, 34, 62, 90}}, - &aligmnentTest{21, []int{6, 28, 50, 72, 94}}, - &aligmnentTest{22, []int{6, 26, 50, 74, 98}}, - &aligmnentTest{23, []int{6, 30, 54, 78, 102}}, - &aligmnentTest{24, []int{6, 28, 54, 80, 106}}, - &aligmnentTest{25, []int{6, 32, 58, 84, 110}}, - &aligmnentTest{26, []int{6, 30, 58, 86, 114}}, - &aligmnentTest{27, []int{6, 34, 62, 90, 118}}, - &aligmnentTest{28, []int{6, 26, 50, 74, 98, 122}}, - &aligmnentTest{29, []int{6, 30, 54, 78, 102, 126}}, - &aligmnentTest{30, []int{6, 26, 52, 78, 104, 130}}, - &aligmnentTest{31, []int{6, 30, 56, 82, 108, 134}}, - &aligmnentTest{32, []int{6, 34, 60, 86, 112, 138}}, - &aligmnentTest{33, []int{6, 30, 58, 86, 114, 142}}, - &aligmnentTest{34, []int{6, 34, 62, 90, 118, 146}}, - &aligmnentTest{35, []int{6, 30, 54, 78, 102, 126, 150}}, - &aligmnentTest{36, []int{6, 24, 50, 76, 102, 128, 154}}, - &aligmnentTest{37, []int{6, 28, 54, 80, 106, 132, 158}}, - &aligmnentTest{38, []int{6, 32, 58, 84, 110, 136, 162}}, - &aligmnentTest{39, []int{6, 26, 54, 82, 110, 138, 166}}, - &aligmnentTest{40, []int{6, 30, 58, 86, 114, 142, 170}}, -} - -func Test_AlignmentPatternPlacements(t *testing.T) { - for _, at := range allAligmnentTests { - vi := &versionInfo{at.version, M, 0, 0, 0, 0, 0} - - res := vi.alignmentPatternPlacements() - if len(res) != len(at.patterns) { - t.Errorf("number of alignmentpatterns missmatch for version %d", at.version) - } - for i := 0; i < len(res); i++ { - if res[i] != at.patterns[i] { - t.Errorf("alignmentpatterns for version %d missmatch on index %d", at.version, i) - } - } - - } - -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/twooffive/encoder_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/twooffive/encoder_test.go deleted file mode 100644 index 007e5c61..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/twooffive/encoder_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package twooffive - -import ( - "image/color" - "testing" -) - -func Test_AddCheckSum(t *testing.T) { - if sum, err := AddCheckSum("1234567"); err != nil || sum != "12345670" { - t.Fail() - } - if _, err := AddCheckSum("1ABC"); err == nil { - t.Fail() - } - if _, err := AddCheckSum(""); err == nil { - t.Fail() - } -} - -func Test_Encode(t *testing.T) { - _, err := Encode("FOOBAR", false) - if err == nil { - t.Error("\"FOOBAR\" should not be encodable") - } - - testEncode := func(interleaved bool, txt, testResult string) { - code, err := Encode(txt, interleaved) - if err != nil || code == nil { - t.Fail() - } else { - if code.Bounds().Max.X != len(testResult) { - t.Errorf("%v: length missmatch! %v != %v", txt, code.Bounds().Max.X, len(testResult)) - } else { - for i, r := range testResult { - if (code.At(i, 0) == color.Black) != (r == '1') { - t.Errorf("%v: code missmatch on position %d", txt, i) - } - } - } - } - } - - testEncode(false, "12345670", "1101101011101010101110101110101011101110111010101010101110101110111010111010101011101110101010101011101110101011101110101101011") - testEncode(true, "12345670", "1010110100101011001101101001010011010011001010101010011001101101") -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/utils/galoisfield_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/utils/galoisfield_test.go deleted file mode 100644 index fc8134fa..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/utils/galoisfield_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package utils - -import ( - "testing" -) - -func Test_GF(t *testing.T) { - log := []int{ - 0, 255, 1, 240, 2, 225, 241, 53, 3, 38, 226, 133, 242, 43, 54, 210, - 4, 195, 39, 114, 227, 106, 134, 28, 243, 140, 44, 23, 55, 118, 211, 234, - 5, 219, 196, 96, 40, 222, 115, 103, 228, 78, 107, 125, 135, 8, 29, 162, - 244, 186, 141, 180, 45, 99, 24, 49, 56, 13, 119, 153, 212, 199, 235, 91, - 6, 76, 220, 217, 197, 11, 97, 184, 41, 36, 223, 253, 116, 138, 104, 193, - 229, 86, 79, 171, 108, 165, 126, 145, 136, 34, 9, 74, 30, 32, 163, 84, - 245, 173, 187, 204, 142, 81, 181, 190, 46, 88, 100, 159, 25, 231, 50, 207, - 57, 147, 14, 67, 120, 128, 154, 248, 213, 167, 200, 63, 236, 110, 92, 176, - 7, 161, 77, 124, 221, 102, 218, 95, 198, 90, 12, 152, 98, 48, 185, 179, - 42, 209, 37, 132, 224, 52, 254, 239, 117, 233, 139, 22, 105, 27, 194, 113, - 230, 206, 87, 158, 80, 189, 172, 203, 109, 175, 166, 62, 127, 247, 146, 66, - 137, 192, 35, 252, 10, 183, 75, 216, 31, 83, 33, 73, 164, 144, 85, 170, - 246, 65, 174, 61, 188, 202, 205, 157, 143, 169, 82, 72, 182, 215, 191, 251, - 47, 178, 89, 151, 101, 94, 160, 123, 26, 112, 232, 21, 51, 238, 208, 131, - 58, 69, 148, 18, 15, 16, 68, 17, 121, 149, 129, 19, 155, 59, 249, 70, - 214, 250, 168, 71, 201, 156, 64, 60, 237, 130, 111, 20, 93, 122, 177, 150, - } - - alog := []int{ - 1, 2, 4, 8, 16, 32, 64, 128, 45, 90, 180, 69, 138, 57, 114, 228, - 229, 231, 227, 235, 251, 219, 155, 27, 54, 108, 216, 157, 23, 46, 92, 184, - 93, 186, 89, 178, 73, 146, 9, 18, 36, 72, 144, 13, 26, 52, 104, 208, - 141, 55, 110, 220, 149, 7, 14, 28, 56, 112, 224, 237, 247, 195, 171, 123, - 246, 193, 175, 115, 230, 225, 239, 243, 203, 187, 91, 182, 65, 130, 41, 82, - 164, 101, 202, 185, 95, 190, 81, 162, 105, 210, 137, 63, 126, 252, 213, 135, - 35, 70, 140, 53, 106, 212, 133, 39, 78, 156, 21, 42, 84, 168, 125, 250, - 217, 159, 19, 38, 76, 152, 29, 58, 116, 232, 253, 215, 131, 43, 86, 172, - 117, 234, 249, 223, 147, 11, 22, 44, 88, 176, 77, 154, 25, 50, 100, 200, - 189, 87, 174, 113, 226, 233, 255, 211, 139, 59, 118, 236, 245, 199, 163, 107, - 214, 129, 47, 94, 188, 85, 170, 121, 242, 201, 191, 83, 166, 97, 194, 169, - 127, 254, 209, 143, 51, 102, 204, 181, 71, 142, 49, 98, 196, 165, 103, 206, - 177, 79, 158, 17, 34, 68, 136, 61, 122, 244, 197, 167, 99, 198, 161, 111, - 222, 145, 15, 30, 60, 120, 240, 205, 183, 67, 134, 33, 66, 132, 37, 74, - 148, 5, 10, 20, 40, 80, 160, 109, 218, 153, 31, 62, 124, 248, 221, 151, - 3, 6, 12, 24, 48, 96, 192, 173, 119, 238, 241, 207, 179, 75, 150, 1, - } - - gf := NewGaloisField(301) - if len(gf.LogTbl) != len(gf.ALogTbl) || len(gf.LogTbl) != len(log) { - t.Fail() - } - for i := 0; i < len(log); i++ { - if gf.LogTbl[i] != log[i] { - t.Error("Invalid Log Table") - } - if gf.ALogTbl[i] != alog[i] { - t.Error("Invalid ALog Table") - } - } - -} diff --git a/server/Godeps/_workspace/src/github.com/boombuler/barcode/utils/runeint_test.go b/server/Godeps/_workspace/src/github.com/boombuler/barcode/utils/runeint_test.go deleted file mode 100644 index f1fdbc5f..00000000 --- a/server/Godeps/_workspace/src/github.com/boombuler/barcode/utils/runeint_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package utils - -import "testing" - -func Test_RuneToIntIntToRune(t *testing.T) { - if IntToRune(0) != '0' { - t.Errorf("failed IntToRune(0) returned %d", string(IntToRune(0))) - } - if IntToRune(9) != '9' { - t.Errorf("failed IntToRune(9) returned %d", IntToRune(9)) - } - if IntToRune(10) != 'F' { - t.Errorf("failed IntToRune(10) returned %d", IntToRune(10)) - } - if RuneToInt('0') != 0 { - t.Error("failed RuneToInt('0') returned %d", RuneToInt(0)) - } - if RuneToInt('9') != 9 { - t.Error("failed RuneToInt('9') returned %d", RuneToInt(9)) - } - if RuneToInt('F') != -1 { - t.Error("failed RuneToInt('F') returned %d", RuneToInt('F')) - } -} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 00000000..80bed650 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 00000000..d6089146 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.3.3 + - 1.4.2 + - 1.5 + - tip diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000..df83a9c2 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000..bf0100f4 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,98 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. + +## What the heck is a JWT? + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Parse and Verify + +Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used. + +```go + token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + return myLookupKey(token.Header["kid"]), nil + }) + + if err == nil && token.Valid { + deliverGoodness("!") + } else { + deliverUtterRejection(":(") + } +``` + +## Create a token + +```go + // Create the token + token := jwt.New(jwt.SigningMethodHS256) + // Set some claims + token.Claims["foo"] = "bar" + token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() + // Sign and get the complete encoded token as a string + tokenString, err := token.SignedString(mySigningKey) +``` + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b). diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000..9eb7ff9c --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,67 @@ +## `jwt-go` Version History + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go new file mode 100644 index 00000000..03564bc7 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go @@ -0,0 +1,210 @@ +// A useful example app. You can use this to debug your tokens on the command line. +// This is also a great place to look at how you might use this library. +// +// Example usage: +// The following will create and sign a token, then verify it and output the original claims. +// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" +) + +var ( + // Options + flagAlg = flag.String("alg", "", "signing algorithm identifier") + flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") + flagCompact = flag.Bool("compact", false, "output compact JSON") + flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") + + // Modes - exactly one of these is required + flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") + flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") +) + +func main() { + // Usage message if you ask for -help or if you mess up inputs. + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") + flag.PrintDefaults() + } + + // Parse command line options + flag.Parse() + + // Do the thing. If something goes wrong, print error to stderr + // and exit with a non-zero status code + if err := start(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +// Figure out which thing to do and then do that +func start() error { + if *flagSign != "" { + return signToken() + } else if *flagVerify != "" { + return verifyToken() + } else { + flag.Usage() + return fmt.Errorf("None of the required flags are present. What do you want me to do?") + } +} + +// Helper func: Read input from specified file or stdin +func loadData(p string) ([]byte, error) { + if p == "" { + return nil, fmt.Errorf("No path specified") + } + + var rdr io.Reader + if p == "-" { + rdr = os.Stdin + } else { + if f, err := os.Open(p); err == nil { + rdr = f + defer f.Close() + } else { + return nil, err + } + } + return ioutil.ReadAll(rdr) +} + +// Print a json object in accordance with the prophecy (or the command line options) +func printJSON(j interface{}) error { + var out []byte + var err error + + if *flagCompact == false { + out, err = json.MarshalIndent(j, "", " ") + } else { + out, err = json.Marshal(j) + } + + if err == nil { + fmt.Println(string(out)) + } + + return err +} + +// Verify a token and output the claims. This is a great example +// of how to verify and view a token. +func verifyToken() error { + // get the token + tokData, err := loadData(*flagVerify) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } + + // trim possible whitespace from token + tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) + if *flagDebug { + fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) + } + + // Parse the token. Load the key from command line option + token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { + data, err := loadData(*flagKey) + if err != nil { + return nil, err + } + if isEs() { + return jwt.ParseECPublicKeyFromPEM(data) + } + return data, nil + }) + + // Print some debug data + if *flagDebug && token != nil { + fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) + fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) + } + + // Print an error if we can't parse for some reason + if err != nil { + return fmt.Errorf("Couldn't parse token: %v", err) + } + + // Is token invalid? + if !token.Valid { + return fmt.Errorf("Token is invalid") + } + + // Print the token details + if err := printJSON(token.Claims); err != nil { + return fmt.Errorf("Failed to output claims: %v", err) + } + + return nil +} + +// Create, sign, and output a token. This is a great, simple example of +// how to use this library to create and sign a token. +func signToken() error { + // get the token data from command line arguments + tokData, err := loadData(*flagSign) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } else if *flagDebug { + fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) + } + + // parse the JSON of the claims + var claims map[string]interface{} + if err := json.Unmarshal(tokData, &claims); err != nil { + return fmt.Errorf("Couldn't parse claims JSON: %v", err) + } + + // get the key + var key interface{} + key, err = loadData(*flagKey) + if err != nil { + return fmt.Errorf("Couldn't read key: %v", err) + } + + // get the signing alg + alg := jwt.GetSigningMethod(*flagAlg) + if alg == nil { + return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) + } + + // create a new token + token := jwt.New(alg) + token.Claims = claims + + if isEs() { + if k, ok := key.([]byte); !ok { + return fmt.Errorf("Couldn't convert key data to key") + } else { + key, err = jwt.ParseECPrivateKeyFromPEM(k) + if err != nil { + return err + } + } + } + + if out, err := token.SignedString(key); err == nil { + fmt.Println(out) + } else { + return fmt.Errorf("Error signing token: %v", err) + } + + return nil +} + +func isEs() bool { + return strings.HasPrefix(*flagAlg, "ES") +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000..a86dc1a3 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000..0518ed10 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKey + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000..d19624b7 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000..e9e788ff --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,43 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid or of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + ErrNoTokenInRequest = errors.New("no token present in request") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + ValidationErrorExpired // Exp validation failed + ValidationErrorNotValidYet // NBF validation failed +) + +// The error from Parse if token is not valid +type ValidationError struct { + err string + Errors uint32 // bitfield. see ValidationError... constants +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.err == "" { + return "token is invalid" + } + return e.err +} + +// No errors +func (e *ValidationError) valid() bool { + if e.Errors > 0 { + return false + } + return true +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000..192e625f --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKey + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000..3fc27bfe --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,113 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, &ValidationError{err: "token contains an invalid number of segments", Errors: ValidationErrorMalformed} + } + + var err error + token := &Token{Raw: tokenString} + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + if err = dec.Decode(&token.Claims); err != nil { + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, &ValidationError{err: "signing method (alg) is unavailable.", Errors: ValidationErrorUnverifiable} + } + } else { + return token, &ValidationError{err: "signing method (alg) is unspecified.", Errors: ValidationErrorUnverifiable} + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, &ValidationError{err: fmt.Sprintf("signing method %v is invalid", alg), Errors: ValidationErrorSignatureInvalid} + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, &ValidationError{err: "no Keyfunc was provided.", Errors: ValidationErrorUnverifiable} + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{err: err.Error(), Errors: ValidationErrorUnverifiable} + } + + // Check expiration times + vErr := &ValidationError{} + now := TimeFunc().Unix() + if exp, ok := token.Claims["exp"].(float64); ok { + if now > int64(exp) { + vErr.err = "token is expired" + vErr.Errors |= ValidationErrorExpired + } + } + if nbf, ok := token.Claims["nbf"].(float64); ok { + if now < int64(nbf) { + vErr.err = "token is not valid yet" + vErr.Errors |= ValidationErrorNotValidYet + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.err = err.Error() + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000..cddffced --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,114 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as +// []byte, or an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + + switch k := key.(type) { + case []byte: + if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil { + return err + } + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as +// []byte, or an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var err error + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case []byte: + if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil { + return "", err + } + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000..b5b70735 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000..6f3b6ff0 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,68 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000..12cf0f3d --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,24 @@ +package jwt + +var signingMethods = map[string]func() SigningMethod{} + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem new file mode 100644 index 00000000..a6882b3e --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 +AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM +cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END EC PRIVATE KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem new file mode 100644 index 00000000..7191361e --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK +RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END PUBLIC KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem new file mode 100644 index 00000000..a86c823e --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy +SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb +ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ +GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= +-----END EC PRIVATE KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem new file mode 100644 index 00000000..e80d0056 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem @@ -0,0 +1,5 @@ +-----BEGIN PUBLIC KEY----- +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp +W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 +SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA +-----END PUBLIC KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem new file mode 100644 index 00000000..213afaf1 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH +QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw +12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN +4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 +8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== +-----END EC PRIVATE KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem new file mode 100644 index 00000000..02ea0220 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem @@ -0,0 +1,6 @@ +-----BEGIN PUBLIC KEY----- +MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh +ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 +7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w +9vakvpzjPXhkvoMt/Tk= +-----END PUBLIC KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey new file mode 100644 index 00000000..435b8ddb --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey @@ -0,0 +1 @@ +#5K+~ew{Z(T(P.ZGwb="=.!r.O͚gЀ \ No newline at end of file diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key new file mode 100644 index 00000000..abdbade3 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn +SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i +cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC +PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR +ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA +Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 +n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy +MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 +POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE +KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM +IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn +FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY +mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj +FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U +I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs +2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn +/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT +OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 +EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ +hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 +4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb +mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry +eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 +CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ +9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq +-----END RSA PRIVATE KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub new file mode 100644 index 00000000..03dc982a --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 +fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 +mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp +HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 +XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b +ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy +7wIDAQAB +-----END PUBLIC KEY----- diff --git a/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000..d35aaa4a --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,126 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use propries in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims map[string]interface{} // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: make(map[string]interface{}), + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var source map[string]interface{} + if i == 0 { + source = t.Header + } else { + source = t.Claims + } + + var jsonValue []byte + if jsonValue, err = json.Marshal(source); err != nil { + return "", err + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +// Try to find the token in an http.Request. +// This method will call ParseMultipartForm if there's no token in the header. +// Currently, it looks in the Authorization header as well as +// looking for an 'access_token' request parameter in req.Form. +func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) { + + // Look for an Authorization header + if ah := req.Header.Get("Authorization"); ah != "" { + // Should be a bearer token + if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" { + return Parse(ah[7:], keyFunc) + } + } + + // Look for "access_token" parameter + req.ParseMultipartForm(10e6) + if tokStr := req.Form.Get("access_token"); tokStr != "" { + return Parse(tokStr, keyFunc) + } + + return nil, ErrNoTokenInRequest + +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/server/Godeps/_workspace/src/github.com/facebookgo/clock/clock_test.go b/server/Godeps/_workspace/src/github.com/facebookgo/clock/clock_test.go deleted file mode 100644 index d8c56c32..00000000 --- a/server/Godeps/_workspace/src/github.com/facebookgo/clock/clock_test.go +++ /dev/null @@ -1,536 +0,0 @@ -package clock_test - -import ( - "fmt" - "os" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/facebookgo/clock" -) - -// Ensure that the clock's After channel sends at the correct time. -func TestClock_After(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - go func() { - time.Sleep(30 * time.Millisecond) - t.Fatal("too late") - }() - gosched() - - <-clock.New().After(20 * time.Millisecond) - if !ok { - t.Fatal("too early") - } -} - -// Ensure that the clock's AfterFunc executes at the correct time. -func TestClock_AfterFunc(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - go func() { - time.Sleep(30 * time.Millisecond) - t.Fatal("too late") - }() - gosched() - - var wg sync.WaitGroup - wg.Add(1) - clock.New().AfterFunc(20*time.Millisecond, func() { - wg.Done() - }) - wg.Wait() - if !ok { - t.Fatal("too early") - } -} - -// Ensure that the clock's time matches the standary library. -func TestClock_Now(t *testing.T) { - a := time.Now().Round(time.Second) - b := clock.New().Now().Round(time.Second) - if !a.Equal(b) { - t.Errorf("not equal: %s != %s", a, b) - } -} - -// Ensure that the clock sleeps for the appropriate amount of time. -func TestClock_Sleep(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - go func() { - time.Sleep(30 * time.Millisecond) - t.Fatal("too late") - }() - gosched() - - clock.New().Sleep(20 * time.Millisecond) - if !ok { - t.Fatal("too early") - } -} - -// Ensure that the clock ticks correctly. -func TestClock_Tick(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - go func() { - time.Sleep(50 * time.Millisecond) - t.Fatal("too late") - }() - gosched() - - c := clock.New().Tick(20 * time.Millisecond) - <-c - <-c - if !ok { - t.Fatal("too early") - } -} - -// Ensure that the clock's ticker ticks correctly. -func TestClock_Ticker(t *testing.T) { - var ok bool - go func() { - time.Sleep(100 * time.Millisecond) - ok = true - }() - go func() { - time.Sleep(200 * time.Millisecond) - t.Fatal("too late") - }() - gosched() - - ticker := clock.New().Ticker(50 * time.Millisecond) - <-ticker.C - <-ticker.C - if !ok { - t.Fatal("too early") - } -} - -// Ensure that the clock's ticker can stop correctly. -func TestClock_Ticker_Stp(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - gosched() - - ticker := clock.New().Ticker(20 * time.Millisecond) - <-ticker.C - ticker.Stop() - select { - case <-ticker.C: - t.Fatal("unexpected send") - case <-time.After(30 * time.Millisecond): - } -} - -// Ensure that the clock's timer waits correctly. -func TestClock_Timer(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - go func() { - time.Sleep(30 * time.Millisecond) - t.Fatal("too late") - }() - gosched() - - timer := clock.New().Timer(20 * time.Millisecond) - <-timer.C - if !ok { - t.Fatal("too early") - } -} - -// Ensure that the clock's timer can be stopped. -func TestClock_Timer_Stop(t *testing.T) { - var ok bool - go func() { - time.Sleep(10 * time.Millisecond) - ok = true - }() - - timer := clock.New().Timer(20 * time.Millisecond) - timer.Stop() - select { - case <-timer.C: - t.Fatal("unexpected send") - case <-time.After(30 * time.Millisecond): - } -} - -// Ensure that the mock's After channel sends at the correct time. -func TestMock_After(t *testing.T) { - var ok int32 - clock := clock.NewMock() - - // Create a channel to execute after 10 mock seconds. - ch := clock.After(10 * time.Second) - go func(ch <-chan time.Time) { - <-ch - atomic.StoreInt32(&ok, 1) - }(ch) - - // Move clock forward to just before the time. - clock.Add(9 * time.Second) - if atomic.LoadInt32(&ok) == 1 { - t.Fatal("too early") - } - - // Move clock forward to the after channel's time. - clock.Add(1 * time.Second) - if atomic.LoadInt32(&ok) == 0 { - t.Fatal("too late") - } -} - -// Ensure that the mock's AfterFunc executes at the correct time. -func TestMock_AfterFunc(t *testing.T) { - var ok int32 - clock := clock.NewMock() - - // Execute function after duration. - clock.AfterFunc(10*time.Second, func() { - atomic.StoreInt32(&ok, 1) - }) - - // Move clock forward to just before the time. - clock.Add(9 * time.Second) - if atomic.LoadInt32(&ok) == 1 { - t.Fatal("too early") - } - - // Move clock forward to the after channel's time. - clock.Add(1 * time.Second) - if atomic.LoadInt32(&ok) == 0 { - t.Fatal("too late") - } -} - -// Ensure that the mock's AfterFunc doesn't execute if stopped. -func TestMock_AfterFunc_Stop(t *testing.T) { - // Execute function after duration. - clock := clock.NewMock() - timer := clock.AfterFunc(10*time.Second, func() { - t.Fatal("unexpected function execution") - }) - gosched() - - // Stop timer & move clock forward. - timer.Stop() - clock.Add(10 * time.Second) - gosched() -} - -// Ensure that the mock's current time can be changed. -func TestMock_Now(t *testing.T) { - clock := clock.NewMock() - if now := clock.Now(); !now.Equal(time.Unix(0, 0)) { - t.Fatalf("expected epoch, got: ", now) - } - - // Add 10 seconds and check the time. - clock.Add(10 * time.Second) - if now := clock.Now(); !now.Equal(time.Unix(10, 0)) { - t.Fatalf("expected epoch, got: ", now) - } -} - -// Ensure that the mock can sleep for the correct time. -func TestMock_Sleep(t *testing.T) { - var ok int32 - clock := clock.NewMock() - - // Create a channel to execute after 10 mock seconds. - go func() { - clock.Sleep(10 * time.Second) - atomic.StoreInt32(&ok, 1) - }() - gosched() - - // Move clock forward to just before the sleep duration. - clock.Add(9 * time.Second) - if atomic.LoadInt32(&ok) == 1 { - t.Fatal("too early") - } - - // Move clock forward to the after the sleep duration. - clock.Add(1 * time.Second) - if atomic.LoadInt32(&ok) == 0 { - t.Fatal("too late") - } -} - -// Ensure that the mock's Tick channel sends at the correct time. -func TestMock_Tick(t *testing.T) { - var n int32 - clock := clock.NewMock() - - // Create a channel to increment every 10 seconds. - go func() { - tick := clock.Tick(10 * time.Second) - for { - <-tick - atomic.AddInt32(&n, 1) - } - }() - gosched() - - // Move clock forward to just before the first tick. - clock.Add(9 * time.Second) - if atomic.LoadInt32(&n) != 0 { - t.Fatalf("expected 0, got %d", n) - } - - // Move clock forward to the start of the first tick. - clock.Add(1 * time.Second) - if atomic.LoadInt32(&n) != 1 { - t.Fatalf("expected 1, got %d", n) - } - - // Move clock forward over several ticks. - clock.Add(30 * time.Second) - if atomic.LoadInt32(&n) != 4 { - t.Fatalf("expected 4, got %d", n) - } -} - -// Ensure that the mock's Ticker channel sends at the correct time. -func TestMock_Ticker(t *testing.T) { - var n int32 - clock := clock.NewMock() - - // Create a channel to increment every microsecond. - go func() { - ticker := clock.Ticker(1 * time.Microsecond) - for { - <-ticker.C - atomic.AddInt32(&n, 1) - } - }() - gosched() - - // Move clock forward. - clock.Add(10 * time.Microsecond) - if atomic.LoadInt32(&n) != 10 { - t.Fatalf("unexpected: %d", n) - } -} - -// Ensure that the mock's Ticker channel won't block if not read from. -func TestMock_Ticker_Overflow(t *testing.T) { - clock := clock.NewMock() - ticker := clock.Ticker(1 * time.Microsecond) - clock.Add(10 * time.Microsecond) - ticker.Stop() -} - -// Ensure that the mock's Ticker can be stopped. -func TestMock_Ticker_Stop(t *testing.T) { - var n int32 - clock := clock.NewMock() - - // Create a channel to increment every second. - ticker := clock.Ticker(1 * time.Second) - go func() { - for { - <-ticker.C - atomic.AddInt32(&n, 1) - } - }() - gosched() - - // Move clock forward. - clock.Add(5 * time.Second) - if atomic.LoadInt32(&n) != 5 { - t.Fatalf("expected 5, got: %d", n) - } - - ticker.Stop() - - // Move clock forward again. - clock.Add(5 * time.Second) - if atomic.LoadInt32(&n) != 5 { - t.Fatalf("still expected 5, got: %d", n) - } -} - -// Ensure that multiple tickers can be used together. -func TestMock_Ticker_Multi(t *testing.T) { - var n int32 - clock := clock.NewMock() - - go func() { - a := clock.Ticker(1 * time.Microsecond) - b := clock.Ticker(3 * time.Microsecond) - - for { - select { - case <-a.C: - atomic.AddInt32(&n, 1) - case <-b.C: - atomic.AddInt32(&n, 100) - } - } - }() - gosched() - - // Move clock forward. - clock.Add(10 * time.Microsecond) - gosched() - if atomic.LoadInt32(&n) != 310 { - t.Fatalf("unexpected: %d", n) - } -} - -func ExampleMock_After() { - // Create a new mock clock. - clock := clock.NewMock() - count := 0 - - // Create a channel to execute after 10 mock seconds. - go func() { - <-clock.After(10 * time.Second) - count = 100 - }() - runtime.Gosched() - - // Print the starting value. - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Move the clock forward 5 seconds and print the value again. - clock.Add(5 * time.Second) - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Move the clock forward 5 seconds to the tick time and check the value. - clock.Add(5 * time.Second) - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Output: - // 1970-01-01 00:00:00 +0000 UTC: 0 - // 1970-01-01 00:00:05 +0000 UTC: 0 - // 1970-01-01 00:00:10 +0000 UTC: 100 -} - -func ExampleMock_AfterFunc() { - // Create a new mock clock. - clock := clock.NewMock() - count := 0 - - // Execute a function after 10 mock seconds. - clock.AfterFunc(10*time.Second, func() { - count = 100 - }) - runtime.Gosched() - - // Print the starting value. - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Move the clock forward 10 seconds and print the new value. - clock.Add(10 * time.Second) - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Output: - // 1970-01-01 00:00:00 +0000 UTC: 0 - // 1970-01-01 00:00:10 +0000 UTC: 100 -} - -func ExampleMock_Sleep() { - // Create a new mock clock. - clock := clock.NewMock() - count := 0 - - // Execute a function after 10 mock seconds. - go func() { - clock.Sleep(10 * time.Second) - count = 100 - }() - runtime.Gosched() - - // Print the starting value. - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Move the clock forward 10 seconds and print the new value. - clock.Add(10 * time.Second) - fmt.Printf("%s: %d\n", clock.Now().UTC(), count) - - // Output: - // 1970-01-01 00:00:00 +0000 UTC: 0 - // 1970-01-01 00:00:10 +0000 UTC: 100 -} - -func ExampleMock_Ticker() { - // Create a new mock clock. - clock := clock.NewMock() - count := 0 - - // Increment count every mock second. - go func() { - ticker := clock.Ticker(1 * time.Second) - for { - <-ticker.C - count++ - } - }() - runtime.Gosched() - - // Move the clock forward 10 seconds and print the new value. - clock.Add(10 * time.Second) - fmt.Printf("Count is %d after 10 seconds\n", count) - - // Move the clock forward 5 more seconds and print the new value. - clock.Add(5 * time.Second) - fmt.Printf("Count is %d after 15 seconds\n", count) - - // Output: - // Count is 10 after 10 seconds - // Count is 15 after 15 seconds -} - -func ExampleMock_Timer() { - // Create a new mock clock. - clock := clock.NewMock() - count := 0 - - // Increment count after a mock second. - go func() { - timer := clock.Timer(1 * time.Second) - <-timer.C - count++ - }() - runtime.Gosched() - - // Move the clock forward 10 seconds and print the new value. - clock.Add(10 * time.Second) - fmt.Printf("Count is %d after 10 seconds\n", count) - - // Output: - // Count is 1 after 10 seconds -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func gosched() { time.Sleep(1 * time.Millisecond) } diff --git a/server/Godeps/_workspace/src/github.com/facebookgo/httpdown/httpdown_test.go b/server/Godeps/_workspace/src/github.com/facebookgo/httpdown/httpdown_test.go deleted file mode 100644 index c0115a07..00000000 --- a/server/Godeps/_workspace/src/github.com/facebookgo/httpdown/httpdown_test.go +++ /dev/null @@ -1,677 +0,0 @@ -package httpdown_test - -import ( - "bytes" - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "regexp" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/facebookgo/ensure" - "github.com/facebookgo/freeport" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/facebookgo/clock" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/facebookgo/httpdown" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/facebookgo/stats" -) - -type onCloseListener struct { - net.Listener - mutex sync.Mutex - onClose chan struct{} -} - -func (o *onCloseListener) Close() error { - // Listener is closed twice, once by Grace, and once by the http library, so - // we guard against a double close of the chan. - defer func() { - o.mutex.Lock() - defer o.mutex.Unlock() - if o.onClose != nil { - close(o.onClose) - o.onClose = nil - } - }() - return o.Listener.Close() -} - -func NewOnCloseListener(l net.Listener) (net.Listener, chan struct{}) { - c := make(chan struct{}) - return &onCloseListener{Listener: l, onClose: c}, c -} - -type closeErrListener struct { - net.Listener - err error -} - -func (c *closeErrListener) Close() error { - c.Listener.Close() - return c.err -} - -type acceptErrListener struct { - net.Listener - err chan error -} - -func (c *acceptErrListener) Accept() (net.Conn, error) { - return nil, <-c.err -} - -type closeErrConn struct { - net.Conn - unblockClose chan chan struct{} -} - -func (c *closeErrConn) Close() error { - ch := <-c.unblockClose - - // Close gets called multiple times, but only the first one gets this ch - if ch != nil { - defer close(ch) - } - - return c.Conn.Close() -} - -type closeErrConnListener struct { - net.Listener - unblockClose chan chan struct{} -} - -func (l *closeErrConnListener) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return c, err - } - return &closeErrConn{Conn: c, unblockClose: l.unblockClose}, nil -} - -func TestHTTPStopWithNoRequest(t *testing.T) { - t.Parallel() - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - - statsDone := make(chan struct{}, 2) - hc := &stats.HookClient{ - BumpSumHook: func(key string, val float64) { - if key == "serve" && val == 1 { - statsDone <- struct{}{} - } - if key == "stop" && val == 1 { - statsDone <- struct{}{} - } - }, - } - - server := &http.Server{} - down := &httpdown.HTTP{Stats: hc} - s := down.Serve(server, listener) - ensure.Nil(t, s.Stop()) - <-statsDone - <-statsDone -} - -func TestHTTPStopWithFinishedRequest(t *testing.T) { - t.Parallel() - hello := []byte("hello") - fin := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(fin) - w.Write(hello) - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - actualBody, err := ioutil.ReadAll(res.Body) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualBody, hello) - ensure.Nil(t, res.Body.Close()) - - // At this point the request is finished, and the connection should be alive - // but idle (because we have keep alive enabled by default in our Transport). - ensure.Nil(t, s.Stop()) - <-fin - - ensure.Nil(t, s.Wait()) -} - -func TestHTTPStopWithActiveRequest(t *testing.T) { - t.Parallel() - const count = 10000 - hello := []byte("hello") - finOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.WriteHeader(200) - for i := 0; i < count; i++ { - w.Write(hello) - } - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - actualBody, err := ioutil.ReadAll(res.Body) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualBody, bytes.Repeat(hello, count)) - ensure.Nil(t, res.Body.Close()) - <-finOkHandler - <-finStop -} - -func TestNewRequestAfterStop(t *testing.T) { - t.Parallel() - const count = 10000 - hello := []byte("hello") - finOkHandler := make(chan struct{}) - unblockOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.WriteHeader(200) - const diff = 500 - for i := 0; i < count-diff; i++ { - w.Write(hello) - } - <-unblockOkHandler - for i := 0; i < diff; i++ { - w.Write(hello) - } - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - listener, onClose := NewOnCloseListener(listener) - ensure.Nil(t, err) - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - // Wait until the listener is closed. - <-onClose - - // Now the next request should not be able to connect as the listener is - // now closed. - _, err = client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - - // We should just get "connection refused" here, but sometimes, very rarely, - // we get a "connection reset" instead. Unclear why this happens. - ensure.Err(t, err, regexp.MustCompile("(connection refused|connection reset by peer)$")) - - // Unblock the handler and ensure we finish writing the rest of the body - // successfully. - close(unblockOkHandler) - actualBody, err := ioutil.ReadAll(res.Body) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualBody, bytes.Repeat(hello, count)) - ensure.Nil(t, res.Body.Close()) - <-finOkHandler - <-finStop -} - -func TestHTTPListenerCloseError(t *testing.T) { - t.Parallel() - expectedError := errors.New("foo") - listener, err := net.Listen("tcp", "127.0.0.1:0") - listener = &closeErrListener{Listener: listener, err: expectedError} - ensure.Nil(t, err) - server := &http.Server{} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - ensure.DeepEqual(t, s.Stop(), expectedError) -} - -func TestHTTPServeError(t *testing.T) { - t.Parallel() - expectedError := errors.New("foo") - listener, err := net.Listen("tcp", "127.0.0.1:0") - errChan := make(chan error) - listener = &acceptErrListener{Listener: listener, err: errChan} - ensure.Nil(t, err) - server := &http.Server{} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - errChan <- expectedError - ensure.DeepEqual(t, s.Wait(), expectedError) - ensure.Nil(t, s.Stop()) -} - -func TestHTTPWithinStopTimeout(t *testing.T) { - t.Parallel() - hello := []byte("hello") - finOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.WriteHeader(200) - w.Write(hello) - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{StopTimeout: time.Minute} - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - actualBody, err := ioutil.ReadAll(res.Body) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualBody, hello) - ensure.Nil(t, res.Body.Close()) - <-finOkHandler - <-finStop -} - -func TestHTTPStopTimeoutMissed(t *testing.T) { - t.Parallel() - - klock := clock.NewMock() - - const count = 10000 - hello := []byte("hello") - finOkHandler := make(chan struct{}) - unblockOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.Header().Set("Content-Length", fmt.Sprint(len(hello)*count)) - w.WriteHeader(200) - for i := 0; i < count/2; i++ { - w.Write(hello) - } - <-unblockOkHandler - for i := 0; i < count/2; i++ { - w.Write(hello) - } - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{ - StopTimeout: time.Minute, - Clock: klock, - } - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - klock.Wait(clock.Calls{After: 1}) // wait for Stop to call After - klock.Add(down.StopTimeout) - - _, err = ioutil.ReadAll(res.Body) - ensure.Err(t, err, regexp.MustCompile("^unexpected EOF$")) - ensure.Nil(t, res.Body.Close()) - close(unblockOkHandler) - <-finOkHandler - <-finStop -} - -func TestHTTPKillTimeout(t *testing.T) { - t.Parallel() - - klock := clock.NewMock() - - statsDone := make(chan struct{}, 1) - hc := &stats.HookClient{ - BumpSumHook: func(key string, val float64) { - if key == "kill" && val == 1 { - statsDone <- struct{}{} - } - }, - } - - const count = 10000 - hello := []byte("hello") - finOkHandler := make(chan struct{}) - unblockOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.Header().Set("Content-Length", fmt.Sprint(len(hello)*count)) - w.WriteHeader(200) - for i := 0; i < count/2; i++ { - w.Write(hello) - } - <-unblockOkHandler - for i := 0; i < count/2; i++ { - w.Write(hello) - } - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{ - StopTimeout: time.Minute, - KillTimeout: time.Minute, - Stats: hc, - Clock: klock, - } - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - klock.Wait(clock.Calls{After: 1}) // wait for Stop to call After - klock.Add(down.StopTimeout) - - _, err = ioutil.ReadAll(res.Body) - ensure.Err(t, err, regexp.MustCompile("^unexpected EOF$")) - ensure.Nil(t, res.Body.Close()) - close(unblockOkHandler) - <-finOkHandler - <-finStop - <-statsDone -} - -func TestHTTPKillTimeoutMissed(t *testing.T) { - t.Parallel() - - klock := clock.NewMock() - - statsDone := make(chan struct{}, 1) - hc := &stats.HookClient{ - BumpSumHook: func(key string, val float64) { - if key == "kill.timeout" && val == 1 { - statsDone <- struct{}{} - } - }, - } - - const count = 10000 - hello := []byte("hello") - finOkHandler := make(chan struct{}) - unblockOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.Header().Set("Content-Length", fmt.Sprint(len(hello)*count)) - w.WriteHeader(200) - for i := 0; i < count/2; i++ { - w.Write(hello) - } - <-unblockOkHandler - for i := 0; i < count/2; i++ { - w.Write(hello) - } - } - - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - unblockConnClose := make(chan chan struct{}, 1) - listener = &closeErrConnListener{ - Listener: listener, - unblockClose: unblockConnClose, - } - - server := &http.Server{Handler: http.HandlerFunc(okHandler)} - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{ - StopTimeout: time.Minute, - KillTimeout: time.Minute, - Stats: hc, - Clock: klock, - } - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - - // Start the Stop process. - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - klock.Wait(clock.Calls{After: 1}) // wait for Stop to call After - klock.Add(down.StopTimeout) // trigger stop timeout - klock.Wait(clock.Calls{After: 2}) // wait for Kill to call After - klock.Add(down.KillTimeout) // trigger kill timeout - - // We hit both the StopTimeout & the KillTimeout. - <-finStop - - // Then we unblock the Close, so we get an unexpected EOF since we close - // before we finish writing the response. - connCloseDone := make(chan struct{}) - unblockConnClose <- connCloseDone - <-connCloseDone - close(unblockConnClose) - - // Then we unblock the handler which tries to write the rest of the data. - close(unblockOkHandler) - - _, err = ioutil.ReadAll(res.Body) - ensure.Err(t, err, regexp.MustCompile("^unexpected EOF$")) - ensure.Nil(t, res.Body.Close()) - <-finOkHandler - <-statsDone -} - -func TestDoubleStop(t *testing.T) { - t.Parallel() - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - ensure.Nil(t, s.Stop()) - ensure.Nil(t, s.Stop()) -} - -func TestExistingConnState(t *testing.T) { - t.Parallel() - hello := []byte("hello") - fin := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(fin) - w.Write(hello) - } - - var called int32 - listener, err := net.Listen("tcp", "127.0.0.1:0") - ensure.Nil(t, err) - server := &http.Server{ - Handler: http.HandlerFunc(okHandler), - ConnState: func(c net.Conn, s http.ConnState) { - atomic.AddInt32(&called, 1) - }, - } - transport := &http.Transport{} - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{} - s := down.Serve(server, listener) - res, err := client.Get(fmt.Sprintf("http://%s/", listener.Addr().String())) - ensure.Nil(t, err) - actualBody, err := ioutil.ReadAll(res.Body) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualBody, hello) - ensure.Nil(t, res.Body.Close()) - - ensure.Nil(t, s.Stop()) - <-fin - - ensure.True(t, atomic.LoadInt32(&called) > 0) -} - -func TestHTTPDefaultListenError(t *testing.T) { - if os.Getuid() == 0 { - t.Skip("cant run this test as root") - } - - statsDone := make(chan struct{}, 1) - hc := &stats.HookClient{ - BumpSumHook: func(key string, val float64) { - if key == "listen.error" && val == 1 { - statsDone <- struct{}{} - } - }, - } - - t.Parallel() - down := &httpdown.HTTP{Stats: hc} - _, err := down.ListenAndServe(&http.Server{}) - ensure.Err(t, err, regexp.MustCompile("listen tcp :80: bind: permission denied")) - <-statsDone -} - -func TestHTTPSDefaultListenError(t *testing.T) { - if os.Getuid() == 0 { - t.Skip("cant run this test as root") - } - t.Parallel() - - cert, err := tls.X509KeyPair(localhostCert, localhostKey) - if err != nil { - t.Fatalf("error loading cert: %v", err) - } - - down := &httpdown.HTTP{} - _, err = down.ListenAndServe(&http.Server{ - TLSConfig: &tls.Config{ - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{cert}, - }, - }) - ensure.Err(t, err, regexp.MustCompile("listen tcp :443: bind: permission denied")) -} - -func TestTLS(t *testing.T) { - t.Parallel() - port, err := freeport.Get() - ensure.Nil(t, err) - - cert, err := tls.X509KeyPair(localhostCert, localhostKey) - if err != nil { - t.Fatalf("error loading cert: %v", err) - } - const count = 10000 - hello := []byte("hello") - finOkHandler := make(chan struct{}) - okHandler := func(w http.ResponseWriter, r *http.Request) { - defer close(finOkHandler) - w.WriteHeader(200) - for i := 0; i < count; i++ { - w.Write(hello) - } - } - - server := &http.Server{ - Addr: fmt.Sprintf("0.0.0.0:%d", port), - Handler: http.HandlerFunc(okHandler), - TLSConfig: &tls.Config{ - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{cert}, - }, - } - transport := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - client := &http.Client{Transport: transport} - down := &httpdown.HTTP{} - s, err := down.ListenAndServe(server) - ensure.Nil(t, err) - res, err := client.Get(fmt.Sprintf("https://%s/", server.Addr)) - ensure.Nil(t, err) - - finStop := make(chan struct{}) - go func() { - defer close(finStop) - ensure.Nil(t, s.Stop()) - }() - - actualBody, err := ioutil.ReadAll(res.Body) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualBody, bytes.Repeat(hello, count)) - ensure.Nil(t, res.Body.Close()) - <-finOkHandler - <-finStop -} - -// localhostCert is a PEM-encoded TLS cert with SAN IPs -// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end -// of ASN.1 time). -// generated from src/pkg/crypto/tls: -// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h -var localhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD -bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj -bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBALyCfqwwip8BvTKgVKGdmjZTU8DD -ndR+WALmFPIRqn89bOU3s30olKiqYEju/SFoEvMyFRT/TWEhXHDaufThqaMCAwEA -AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud -EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA -AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAr/09uy108p51rheIOSnz4zgduyTl -M+4AmRo8/U1twEZLgfAGG/GZjREv2y4mCEUIM3HebCAqlA5jpRg76Rf8jw== ------END CERTIFICATE-----`) - -// localhostKey is the private key for localhostCert. -var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBOQIBAAJBALyCfqwwip8BvTKgVKGdmjZTU8DDndR+WALmFPIRqn89bOU3s30o -lKiqYEju/SFoEvMyFRT/TWEhXHDaufThqaMCAwEAAQJAPXuWUxTV8XyAt8VhNQER -LgzJcUKb9JVsoS1nwXgPksXnPDKnL9ax8VERrdNr+nZbj2Q9cDSXBUovfdtehcdP -qQIhAO48ZsPylbTrmtjDEKiHT2Ik04rLotZYS2U873J6I7WlAiEAypDjYxXyafv/ -Yo1pm9onwcetQKMW8CS3AjuV9Axzj6cCIEx2Il19fEMG4zny0WPlmbrcKvD/DpJQ -4FHrzsYlIVTpAiAas7S1uAvneqd0l02HlN9OxQKKlbUNXNme+rnOnOGS2wIgS0jW -zl1jvrOSJeP1PpAHohWz6LOhEr8uvltWkN6x3vE= ------END RSA PRIVATE KEY-----`) diff --git a/server/Godeps/_workspace/src/github.com/facebookgo/stats/stats_test.go b/server/Godeps/_workspace/src/github.com/facebookgo/stats/stats_test.go deleted file mode 100644 index 07fa5ace..00000000 --- a/server/Godeps/_workspace/src/github.com/facebookgo/stats/stats_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package stats_test - -import ( - "testing" - - "github.com/facebookgo/ensure" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/facebookgo/stats" -) - -// Ensure calling End works even when a BumpTimeHook isn't provided. -func TestHookClientBumpTime(t *testing.T) { - (&stats.HookClient{}).BumpTime("foo").End() -} - -func TestPrefixClient(t *testing.T) { - const ( - prefix1 = "prefix1" - prefix2 = "prefix2" - avgKey = "avg" - avgVal = float64(1) - sumKey = "sum" - sumVal = float64(2) - histogramKey = "histogram" - histogramVal = float64(3) - timeKey = "time" - ) - - var keys []string - hc := &stats.HookClient{ - BumpAvgHook: func(key string, val float64) { - keys = append(keys, key) - ensure.DeepEqual(t, val, avgVal) - }, - BumpSumHook: func(key string, val float64) { - keys = append(keys, key) - ensure.DeepEqual(t, val, sumVal) - }, - BumpHistogramHook: func(key string, val float64) { - keys = append(keys, key) - ensure.DeepEqual(t, val, histogramVal) - }, - BumpTimeHook: func(key string) interface { - End() - } { - return multiEnderTest{ - EndHook: func() { - keys = append(keys, key) - }, - } - }, - } - - pc := stats.PrefixClient([]string{prefix1, prefix2}, hc) - pc.BumpAvg(avgKey, avgVal) - pc.BumpSum(sumKey, sumVal) - pc.BumpHistogram(histogramKey, histogramVal) - pc.BumpTime(timeKey).End() - - ensure.SameElements(t, keys, []string{ - prefix1 + avgKey, - prefix1 + sumKey, - prefix1 + histogramKey, - prefix1 + timeKey, - prefix2 + avgKey, - prefix2 + sumKey, - prefix2 + histogramKey, - prefix2 + timeKey, - }) -} - -type multiEnderTest struct { - EndHook func() -} - -func (e multiEnderTest) End() { - e.EndHook() -} diff --git a/server/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/server/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml index d87d4657..6796581f 100644 --- a/server/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml +++ b/server/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml @@ -4,4 +4,6 @@ go: - 1.0 - 1.1 - 1.2 + - 1.3 + - 1.4 - tip diff --git a/server/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/server/Godeps/_workspace/src/github.com/gorilla/context/context_test.go deleted file mode 100644 index 9814c501..00000000 --- a/server/Godeps/_workspace/src/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Set(r, key, value) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/server/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/server/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2b..00000000 --- a/server/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/server/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/server/Godeps/_workspace/src/github.com/gorilla/mux/mux.go index 7301407e..31c91b89 100644 --- a/server/Godeps/_workspace/src/github.com/gorilla/mux/mux.go +++ b/server/Godeps/_workspace/src/github.com/gorilla/mux/mux.go @@ -152,6 +152,13 @@ func (r *Router) getRegexpGroup() *routeRegexpGroup { return nil } +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- @@ -224,6 +231,12 @@ func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + // ---------------------------------------------------------------------------- // Context // ---------------------------------------------------------------------------- diff --git a/server/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/server/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index f025dc43..00000000 --- a/server/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,943 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "testing" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/context" -) - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - shouldRedirect := test.shouldRedirect - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/server/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/server/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 1f7c190c..00000000 --- a/server/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,714 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/server/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/server/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go index a6305483..aa306798 100644 --- a/server/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go +++ b/server/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go @@ -150,11 +150,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { } // url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } +func (r *routeRegexp) url(values map[string]string) (string, error) { urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] diff --git a/server/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/server/Godeps/_workspace/src/github.com/gorilla/mux/route.go index c310e66b..d4f01468 100644 --- a/server/Godeps/_workspace/src/github.com/gorilla/mux/route.go +++ b/server/Godeps/_workspace/src/github.com/gorilla/mux/route.go @@ -31,6 +31,8 @@ type Route struct { name string // Error resulted from building a route. err error + + buildVarsFunc BuildVarsFunc } // Match matches the route against the request. @@ -360,6 +362,19 @@ func (r *Route) Schemes(schemes ...string) *Route { return r.addMatcher(schemeMatcher(schemes)) } +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + // Subrouter ------------------------------------------------------------------ // Subrouter creates a subrouter for the route. @@ -422,17 +437,20 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.regexp == nil { return nil, errors.New("mux: route doesn't have a host or path") } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } var scheme, host, path string - var err error if r.regexp.host != nil { // Set a default scheme. scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { + if host, err = r.regexp.host.url(values); err != nil { return nil, err } } if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { + if path, err = r.regexp.path.url(values); err != nil { return nil, err } } @@ -453,7 +471,11 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } - host, err := r.regexp.host.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) if err != nil { return nil, err } @@ -473,7 +495,11 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } - path, err := r.regexp.path.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) if err != nil { return nil, err } @@ -482,6 +508,26 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { }, nil } +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairs(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -490,6 +536,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { type parentRoute interface { getNamedRoutes() map[string]*Route getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string } // getNamedRoutes returns the map where named routes are registered. diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/example_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/example_test.go deleted file mode 100644 index 04e1651b..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/example_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright... - -// This example demonstrates opening a Connection and doing some basic operations. -package swift_test - -import ( - "fmt" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/ncw/swift" -) - -func ExampleConnection() { - // Create a v1 auth connection - c := swift.Connection{ - // This should be your username - UserName: "user", - // This should be your api key - ApiKey: "key", - // This should be a v1 auth url, eg - // Rackspace US https://auth.api.rackspacecloud.com/v1.0 - // Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 - // Memset Memstore UK https://auth.storage.memset.com/v1.0 - AuthUrl: "auth_url", - } - - // Authenticate - err := c.Authenticate() - if err != nil { - panic(err) - } - // List all the containers - containers, err := c.ContainerNames(nil) - fmt.Println(containers) - // etc... - - // ------ or alternatively create a v2 connection ------ - - // Create a v2 auth connection - c = swift.Connection{ - // This is the sub user for the storage - eg "admin" - UserName: "user", - // This should be your api key - ApiKey: "key", - // This should be a version2 auth url, eg - // Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 - // Memset Memstore v2 https://auth.storage.memset.com/v2.0 - AuthUrl: "v2_auth_url", - // Region to use - default is use first region if unset - Region: "LON", - // Name of the tenant - this is likely your username - Tenant: "jim", - } - - // as above... -} - -var container string - -func ExampleConnection_ObjectsWalk() { - objects := make([]string, 0) - err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - fmt.Println("Found all the objects", objects, err) -} - -func ExampleConnection_VersionContainerCreate() { - // Use the helper method to create the current and versions container. - if err := c.VersionContainerCreate("cds", "cd-versions"); err != nil { - fmt.Print(err.Error()) - } -} - -func ExampleConnection_VersionEnable() { - // Build the containers manually and enable them. - if err := c.ContainerCreate("movie-versions", nil); err != nil { - fmt.Print(err.Error()) - } - if err := c.ContainerCreate("movies", nil); err != nil { - fmt.Print(err.Error()) - } - if err := c.VersionEnable("movies", "movie-versions"); err != nil { - fmt.Print(err.Error()) - } - - // Access the primary container as usual with ObjectCreate(), ObjectPut(), etc. - // etc... -} - -func ExampleConnection_VersionDisable() { - // Disable versioning on a container. Note that this does not delete the versioning container. - c.VersionDisable("movies") -} diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go deleted file mode 100644 index 47560d57..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go +++ /dev/null @@ -1,213 +0,0 @@ -// Tests for swift metadata -package swift - -import ( - "testing" - "time" -) - -func TestHeadersToMetadata(t *testing.T) { -} - -func TestHeadersToAccountMetadata(t *testing.T) { -} - -func TestHeadersToContainerMetadata(t *testing.T) { -} - -func TestHeadersToObjectMetadata(t *testing.T) { -} - -func TestMetadataToHeaders(t *testing.T) { -} - -func TestMetadataToAccountHeaders(t *testing.T) { -} - -func TestMetadataToContainerHeaders(t *testing.T) { -} - -func TestMetadataToObjectHeaders(t *testing.T) { -} - -func TestNsToFloatString(t *testing.T) { - for _, d := range []struct { - ns int64 - fs string - }{ - {0, "0"}, - {1, "0.000000001"}, - {1000, "0.000001"}, - {1000000, "0.001"}, - {100000000, "0.1"}, - {1000000000, "1"}, - {10000000000, "10"}, - {12345678912, "12.345678912"}, - {12345678910, "12.34567891"}, - {12345678900, "12.3456789"}, - {12345678000, "12.345678"}, - {12345670000, "12.34567"}, - {12345600000, "12.3456"}, - {12345000000, "12.345"}, - {12340000000, "12.34"}, - {12300000000, "12.3"}, - {12000000000, "12"}, - {10000000000, "10"}, - {1347717491123123123, "1347717491.123123123"}, - } { - if nsToFloatString(d.ns) != d.fs { - t.Error("Failed", d.ns, "!=", d.fs) - } - if d.ns > 0 && nsToFloatString(-d.ns) != "-"+d.fs { - t.Error("Failed on negative", d.ns, "!=", d.fs) - } - } -} - -func TestFloatStringToNs(t *testing.T) { - for _, d := range []struct { - ns int64 - fs string - }{ - {0, "0"}, - {0, "0."}, - {0, ".0"}, - {0, "0.0"}, - {0, "0.0000000001"}, - {1, "0.000000001"}, - {1000, "0.000001"}, - {1000000, "0.001"}, - {100000000, "0.1"}, - {100000000, "0.10"}, - {100000000, "0.1000000001"}, - {1000000000, "1"}, - {1000000000, "1."}, - {1000000000, "1.0"}, - {10000000000, "10"}, - {12345678912, "12.345678912"}, - {12345678912, "12.3456789129"}, - {12345678912, "12.34567891299"}, - {12345678910, "12.34567891"}, - {12345678900, "12.3456789"}, - {12345678000, "12.345678"}, - {12345670000, "12.34567"}, - {12345600000, "12.3456"}, - {12345000000, "12.345"}, - {12340000000, "12.34"}, - {12300000000, "12.3"}, - {12000000000, "12"}, - {10000000000, "10"}, - // This is a typical value which has more bits in than a float64 - {1347717491123123123, "1347717491.123123123"}, - } { - ns, err := floatStringToNs(d.fs) - if err != nil { - t.Error("Failed conversion", err) - } - if ns != d.ns { - t.Error("Failed", d.fs, "!=", d.ns, "was", ns) - } - if d.ns > 0 { - ns, err := floatStringToNs("-" + d.fs) - if err != nil { - t.Error("Failed conversion", err) - } - if ns != -d.ns { - t.Error("Failed on negative", -d.ns, "!=", "-"+d.fs) - } - } - } - - // These are expected to produce errors - for _, fs := range []string{ - "", - " 1", - "- 1", - "- 1", - "1.-1", - "1.0.0", - "1x0", - } { - ns, err := floatStringToNs(fs) - if err == nil { - t.Error("Didn't produce expected error", fs, ns) - } - } - -} - -func TestGetModTime(t *testing.T) { - for _, d := range []struct { - ns string - t string - }{ - {"1354040105", "2012-11-27T18:15:05Z"}, - {"1354040105.", "2012-11-27T18:15:05Z"}, - {"1354040105.0", "2012-11-27T18:15:05Z"}, - {"1354040105.000000000000", "2012-11-27T18:15:05Z"}, - {"1354040105.123", "2012-11-27T18:15:05.123Z"}, - {"1354040105.123456", "2012-11-27T18:15:05.123456Z"}, - {"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"}, - {"1354040105.123456789123", "2012-11-27T18:15:05.123456789Z"}, - {"0", "1970-01-01T00:00:00.000000000Z"}, - } { - expected, err := time.Parse(time.RFC3339, d.t) - if err != nil { - t.Error("Bad test", err) - } - m := Metadata{"mtime": d.ns} - actual, err := m.GetModTime() - if err != nil { - t.Error("Parse error", err) - } - if !actual.Equal(expected) { - t.Error("Expecting", expected, expected.UnixNano(), "got", actual, actual.UnixNano()) - } - } - for _, ns := range []string{ - "EMPTY", - "", - " 1", - "- 1", - "- 1", - "1.-1", - "1.0.0", - "1x0", - } { - m := Metadata{} - if ns != "EMPTY" { - m["mtime"] = ns - } - actual, err := m.GetModTime() - if err == nil { - t.Error("Expected error not produced") - } - if !actual.IsZero() { - t.Error("Expected output to be zero") - } - } -} - -func TestSetModTime(t *testing.T) { - for _, d := range []struct { - ns string - t string - }{ - {"1354040105", "2012-11-27T18:15:05Z"}, - {"1354040105", "2012-11-27T18:15:05.000000Z"}, - {"1354040105.123", "2012-11-27T18:15:05.123Z"}, - {"1354040105.123456", "2012-11-27T18:15:05.123456Z"}, - {"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"}, - {"0", "1970-01-01T00:00:00.000000000Z"}, - } { - time, err := time.Parse(time.RFC3339, d.t) - if err != nil { - t.Error("Bad test", err) - } - m := Metadata{} - m.SetModTime(time) - if m["mtime"] != d.ns { - t.Error("mtime wrong", m, "should be", d.ns) - } - } -} diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go deleted file mode 100644 index f6fbadf2..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// See swift_test.go for requirements to run this test. -package rs_test - -import ( - "os" - "testing" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/ncw/swift/rs" -) - -var ( - c rs.RsConnection -) - -const ( - CONTAINER = "GoSwiftUnitTest" - OBJECT = "test_object" - CONTENTS = "12345" - CONTENT_SIZE = int64(len(CONTENTS)) - CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b" -) - -// Test functions are run in order - this one must be first! -func TestAuthenticate(t *testing.T) { - UserName := os.Getenv("SWIFT_API_USER") - ApiKey := os.Getenv("SWIFT_API_KEY") - AuthUrl := os.Getenv("SWIFT_AUTH_URL") - if UserName == "" || ApiKey == "" || AuthUrl == "" { - t.Fatal("SWIFT_API_USER, SWIFT_API_KEY and SWIFT_AUTH_URL not all set") - } - c = rs.RsConnection{} - c.UserName = UserName - c.ApiKey = ApiKey - c.AuthUrl = AuthUrl - err := c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -// Setup -func TestContainerCreate(t *testing.T) { - err := c.ContainerCreate(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } -} - -func TestCDNEnable(t *testing.T) { - headers, err := c.ContainerCDNEnable(CONTAINER, 0) - if err != nil { - t.Error(err) - } - if _, ok := headers["X-Cdn-Uri"]; !ok { - t.Error("Failed to enable CDN for container") - } -} - -func TestOnReAuth(t *testing.T) { - c2 := rs.RsConnection{} - c2.UserName = c.UserName - c2.ApiKey = c.ApiKey - c2.AuthUrl = c.AuthUrl - _, err := c2.ContainerCDNEnable(CONTAINER, 0) - if err != nil { - t.Fatalf("Failed to reauthenticate: %v", err) - } -} - -func TestCDNMeta(t *testing.T) { - headers, err := c.ContainerCDNMeta(CONTAINER) - if err != nil { - t.Error(err) - } - if _, ok := headers["X-Cdn-Uri"]; !ok { - t.Error("CDN is not enabled") - } -} - -func TestCDNDisable(t *testing.T) { - err := c.ContainerCDNDisable(CONTAINER) // files stick in CDN until TTL expires - if err != nil { - t.Error(err) - } -} - -// Teardown -func TestContainerDelete(t *testing.T) { - err := c.ContainerDelete(CONTAINER) - if err != nil { - t.Fatal(err) - } -} diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go deleted file mode 100644 index e8b1f437..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go +++ /dev/null @@ -1,409 +0,0 @@ -// This tests the swift package internals -// -// It does not require access to a swift server -// -// FIXME need to add more tests and to check URLs and parameters -package swift - -import ( - "fmt" - "io" - "net" - "net/http" - "testing" - - // "net/http/httputil" - // "os" -) - -const ( - TEST_ADDRESS = "localhost:5324" - AUTH_URL = "http://" + TEST_ADDRESS + "/v1.0" - PROXY_URL = "http://" + TEST_ADDRESS + "/proxy" - USERNAME = "test" - APIKEY = "apikey" - AUTH_TOKEN = "token" -) - -// Globals -var ( - server *SwiftServer - c *Connection -) - -// SwiftServer implements a test swift server -type SwiftServer struct { - t *testing.T - checks []*Check -} - -// Used to check and reply to http transactions -type Check struct { - in Headers - out Headers - rx *string - tx *string - err *Error - url *string -} - -// Add a in check -func (check *Check) In(in Headers) *Check { - check.in = in - return check -} - -// Add an out check -func (check *Check) Out(out Headers) *Check { - check.out = out - return check -} - -// Add an Error check -func (check *Check) Error(StatusCode int, Text string) *Check { - check.err = newError(StatusCode, Text) - return check -} - -// Add a rx check -func (check *Check) Rx(rx string) *Check { - check.rx = &rx - return check -} - -// Add an tx check -func (check *Check) Tx(tx string) *Check { - check.tx = &tx - return check -} - -// Add an URL check -func (check *Check) Url(url string) *Check { - check.url = &url - return check -} - -// Add a check -func (s *SwiftServer) AddCheck(t *testing.T) *Check { - server.t = t - check := &Check{ - in: Headers{}, - out: Headers{}, - err: nil, - } - s.checks = append(s.checks, check) - return check -} - -// Responds to a request -func (s *SwiftServer) Respond(w http.ResponseWriter, r *http.Request) { - if len(s.checks) < 1 { - s.t.Fatal("Unexpected http transaction") - } - check := s.checks[0] - s.checks = s.checks[1:] - - // Check URL - if check.url != nil && *check.url != r.URL.String() { - s.t.Errorf("Expecting URL %q but got %q", *check.url, r.URL) - } - - // Check headers - for k, v := range check.in { - actual := r.Header.Get(k) - if actual != v { - s.t.Errorf("Expecting header %q=%q but got %q", k, v, actual) - } - } - // Write output headers - h := w.Header() - for k, v := range check.out { - h.Set(k, v) - } - // Return an error if required - if check.err != nil { - http.Error(w, check.err.Text, check.err.StatusCode) - } else { - if check.tx != nil { - _, err := w.Write([]byte(*check.tx)) - if err != nil { - s.t.Error("Write failed", err) - } - } - } -} - -// Checks to see all responses are used up -func (s *SwiftServer) Finished() { - if len(s.checks) > 0 { - s.t.Error("Unused checks", s.checks) - } -} - -func handle(w http.ResponseWriter, r *http.Request) { - // out, _ := httputil.DumpRequest(r, true) - // os.Stdout.Write(out) - server.Respond(w, r) -} - -func NewSwiftServer() *SwiftServer { - server := &SwiftServer{} - http.HandleFunc("/", handle) - go http.ListenAndServe(TEST_ADDRESS, nil) - fmt.Print("Waiting for server to start ") - for { - fmt.Print(".") - conn, err := net.Dial("tcp", TEST_ADDRESS) - if err == nil { - conn.Close() - fmt.Println(" Started") - break - } - } - return server -} - -func init() { - server = NewSwiftServer() - c = &Connection{ - UserName: USERNAME, - ApiKey: APIKEY, - AuthUrl: AUTH_URL, - } -} - -// Check the error is a swift error -func checkError(t *testing.T, err error, StatusCode int, Text string) { - if err == nil { - t.Fatal("No error returned") - } - err2, ok := err.(*Error) - if !ok { - t.Fatal("Bad error type") - } - if err2.StatusCode != StatusCode { - t.Fatalf("Bad status code, expecting %d got %d", StatusCode, err2.StatusCode) - } - if err2.Text != Text { - t.Fatalf("Bad error string, expecting %q got %q", Text, err2.Text) - } -} - -// FIXME copied from swift_test.go -func compareMaps(t *testing.T, a, b map[string]string) { - if len(a) != len(b) { - t.Error("Maps different sizes", a, b) - } - for ka, va := range a { - if vb, ok := b[ka]; !ok || va != vb { - t.Error("Difference in key", ka, va, b[ka]) - } - } - for kb, vb := range b { - if va, ok := a[kb]; !ok || vb != va { - t.Error("Difference in key", kb, vb, a[kb]) - } - } -} - -func TestInternalError(t *testing.T) { - e := newError(404, "Not Found!") - if e.StatusCode != 404 || e.Text != "Not Found!" { - t.Fatal("Bad error") - } - if e.Error() != "Not Found!" { - t.Fatal("Bad error") - } - -} - -func testCheckClose(c io.Closer, e error) (err error) { - err = e - defer checkClose(c, &err) - return -} - -// Make a closer which returns the error of our choice -type myCloser struct { - err error -} - -func (c *myCloser) Close() error { - return c.err -} - -func TestInternalCheckClose(t *testing.T) { - if testCheckClose(&myCloser{nil}, nil) != nil { - t.Fatal("bad 1") - } - if testCheckClose(&myCloser{nil}, ObjectCorrupted) != ObjectCorrupted { - t.Fatal("bad 2") - } - if testCheckClose(&myCloser{ObjectNotFound}, nil) != ObjectNotFound { - t.Fatal("bad 3") - } - if testCheckClose(&myCloser{ObjectNotFound}, ObjectCorrupted) != ObjectCorrupted { - t.Fatal("bad 4") - } -} - -func TestInternalParseHeaders(t *testing.T) { - resp := &http.Response{StatusCode: 200} - if c.parseHeaders(resp, nil) != nil { - t.Error("Bad 1") - } - if c.parseHeaders(resp, authErrorMap) != nil { - t.Error("Bad 1") - } - - resp = &http.Response{StatusCode: 299} - if c.parseHeaders(resp, nil) != nil { - t.Error("Bad 1") - } - - resp = &http.Response{StatusCode: 199, Status: "BOOM"} - checkError(t, c.parseHeaders(resp, nil), 199, "HTTP Error: 199: BOOM") - - resp = &http.Response{StatusCode: 300, Status: "BOOM"} - checkError(t, c.parseHeaders(resp, nil), 300, "HTTP Error: 300: BOOM") - - resp = &http.Response{StatusCode: 404, Status: "BOOM"} - checkError(t, c.parseHeaders(resp, nil), 404, "HTTP Error: 404: BOOM") - if c.parseHeaders(resp, ContainerErrorMap) != ContainerNotFound { - t.Error("Bad 1") - } - if c.parseHeaders(resp, objectErrorMap) != ObjectNotFound { - t.Error("Bad 1") - } -} - -func TestInternalReadHeaders(t *testing.T) { - resp := &http.Response{Header: http.Header{}} - compareMaps(t, readHeaders(resp), Headers{}) - - resp = &http.Response{Header: http.Header{ - "one": []string{"1"}, - "two": []string{"2"}, - }} - compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"}) - - // FIXME this outputs a log which we should test and check - resp = &http.Response{Header: http.Header{ - "one": []string{"1", "11", "111"}, - "two": []string{"2"}, - }} - compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"}) -} - -func TestInternalStorage(t *testing.T) { - // FIXME -} - -// ------------------------------------------------------------ - -func TestInternalAuthenticate(t *testing.T) { - server.AddCheck(t).In(Headers{ - "User-Agent": DefaultUserAgent, - "X-Auth-Key": APIKEY, - "X-Auth-User": USERNAME, - }).Out(Headers{ - "X-Storage-Url": PROXY_URL, - "X-Auth-Token": AUTH_TOKEN, - }).Url("/v1.0") - defer server.Finished() - - err := c.Authenticate() - if err != nil { - t.Fatal(err) - } - if c.StorageUrl != PROXY_URL { - t.Error("Bad storage url") - } - if c.AuthToken != AUTH_TOKEN { - t.Error("Bad auth token") - } - if !c.Authenticated() { - t.Error("Didn't authenticate") - } -} - -func TestInternalAuthenticateDenied(t *testing.T) { - server.AddCheck(t).Error(400, "Bad request") - server.AddCheck(t).Error(401, "DENIED") - defer server.Finished() - c.UnAuthenticate() - err := c.Authenticate() - if err != AuthorizationFailed { - t.Fatal("Expecting AuthorizationFailed", err) - } - // FIXME - // if c.Authenticated() { - // t.Fatal("Expecting not authenticated") - // } -} - -func TestInternalAuthenticateBad(t *testing.T) { - server.AddCheck(t).Out(Headers{ - "X-Storage-Url": PROXY_URL, - }) - defer server.Finished() - err := c.Authenticate() - checkError(t, err, 0, "Response didn't have storage url and auth token") - if c.Authenticated() { - t.Fatal("Expecting not authenticated") - } - - server.AddCheck(t).Out(Headers{ - "X-Auth-Token": AUTH_TOKEN, - }) - err = c.Authenticate() - checkError(t, err, 0, "Response didn't have storage url and auth token") - if c.Authenticated() { - t.Fatal("Expecting not authenticated") - } - - server.AddCheck(t) - err = c.Authenticate() - checkError(t, err, 0, "Response didn't have storage url and auth token") - if c.Authenticated() { - t.Fatal("Expecting not authenticated") - } - - server.AddCheck(t).Out(Headers{ - "X-Storage-Url": PROXY_URL, - "X-Auth-Token": AUTH_TOKEN, - }) - err = c.Authenticate() - if err != nil { - t.Fatal(err) - } - if !c.Authenticated() { - t.Fatal("Expecting authenticated") - } -} - -func testContainerNames(t *testing.T, rx string, expected []string) { - server.AddCheck(t).In(Headers{ - "User-Agent": DefaultUserAgent, - "X-Auth-Token": AUTH_TOKEN, - }).Tx(rx).Url("/proxy") - containers, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - if len(containers) != len(expected) { - t.Fatal("Wrong number of containers", len(containers), rx, len(expected), expected) - } - for i := range containers { - if containers[i] != expected[i] { - t.Error("Bad container", containers[i], expected[i]) - } - } -} -func TestInternalContainerNames(t *testing.T) { - defer server.Finished() - testContainerNames(t, "", []string{}) - testContainerNames(t, "one", []string{"one"}) - testContainerNames(t, "one\n", []string{"one"}) - testContainerNames(t, "one\ntwo\nthree\n", []string{"one", "two", "three"}) -} diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go deleted file mode 100644 index a6922b25..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go +++ /dev/null @@ -1,1284 +0,0 @@ -// This tests the swift packagae -// -// It can be used with a real swift server which should be set up in -// the environment variables SWIFT_API_USER, SWIFT_API_KEY and -// SWIFT_AUTH_URL -// In case those variables are not defined, a fake Swift server -// is used instead - see Testing in README.md for more info -// -// The functions are designed to run in order and create things the -// next function tests. This means that if it goes wrong it is likely -// errors will propagate. You may need to tidy up the CONTAINER to -// get it to run cleanly. -package swift_test - -import ( - "archive/tar" - "bytes" - "crypto/md5" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net/http" - "os" - "sync" - "testing" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/ncw/swift" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/ncw/swift/swifttest" -) - -var ( - c swift.Connection - srv *swifttest.SwiftServer - m1 = swift.Metadata{"Hello": "1", "potato-Salad": "2"} - m2 = swift.Metadata{"hello": "", "potato-salad": ""} - skipVersionTests = false -) - -const ( - TEST_ADDRESS = "localhost:6543" - AUTH_URL = "http://" + TEST_ADDRESS + "/v1.0" - CONTAINER = "GoSwiftUnitTest" - VERSIONS_CONTAINER = "GoSwiftUnitTestVersions" - CURRENT_CONTAINER = "GoSwiftUnitTestCurrent" - OBJECT = "test_object" - OBJECT2 = "test_object2" - CONTENTS = "12345" - CONTENTS2 = "54321" - CONTENT_SIZE = int64(len(CONTENTS)) - CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b" -) - -type someTransport struct{ http.Transport } - -func TestTransport(t *testing.T) { - var err error - UserName := os.Getenv("SWIFT_API_USER") - ApiKey := os.Getenv("SWIFT_API_KEY") - AuthUrl := os.Getenv("SWIFT_AUTH_URL") - if UserName == "" || ApiKey == "" || AuthUrl == "" { - srv, err = swifttest.NewSwiftServer(TEST_ADDRESS) - if err != nil { - t.Fatal("Failed to create server", err) - } - UserName = "swifttest" - ApiKey = "swifttest" - AuthUrl = AUTH_URL - } - tr := &someTransport{Transport: http.Transport{MaxIdleConnsPerHost: 2048}} - ct := swift.Connection{ - UserName: UserName, - ApiKey: ApiKey, - AuthUrl: AuthUrl, - Tenant: os.Getenv("SWIFT_TENANT"), - TenantId: os.Getenv("SWIFT_TENANT_ID"), - Transport: tr, - ConnectTimeout: 60 * time.Second, - Timeout: 60 * time.Second, - } - err = ct.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !ct.Authenticated() { - t.Fatal("Not authenticated") - } - if srv != nil { - srv.Close() - } -} - -// The following Test functions are run in order - this one must come before the others! -func TestAuthenticate(t *testing.T) { - var err error - UserName := os.Getenv("SWIFT_API_USER") - ApiKey := os.Getenv("SWIFT_API_KEY") - AuthUrl := os.Getenv("SWIFT_AUTH_URL") - if UserName == "" || ApiKey == "" || AuthUrl == "" { - srv, err = swifttest.NewSwiftServer(TEST_ADDRESS) - if err != nil { - t.Fatal("Failed to create server", err) - } - UserName = "swifttest" - ApiKey = "swifttest" - AuthUrl = AUTH_URL - } - c = swift.Connection{ - UserName: UserName, - ApiKey: ApiKey, - AuthUrl: AuthUrl, - Tenant: os.Getenv("SWIFT_TENANT"), - TenantId: os.Getenv("SWIFT_TENANT_ID"), - } - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -// Attempt to trigger a race in authenticate -// -// Run with -race to test -func TestAuthenticateRace(t *testing.T) { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - err := c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } - }() - } - wg.Wait() -} - -// Test a connection can be serialized and unserialized with JSON -func TestSerializeConnectionJson(t *testing.T) { - serializedConnection, err := json.Marshal(c) - if err != nil { - t.Fatalf("Failed to serialize connection: %v", err) - } - c2 := new(swift.Connection) - err = json.Unmarshal(serializedConnection, &c2) - if err != nil { - t.Fatalf("Failed to unserialize connection: %v", err) - } - if !c2.Authenticated() { - t.Fatal("Should be authenticated") - } - _, _, err = c2.Account() - if err != nil { - t.Fatalf("Failed to use unserialized connection: %v", err) - } -} - -// Test a connection can be serialized and unserialized with XML -func TestSerializeConnectionXml(t *testing.T) { - serializedConnection, err := xml.Marshal(c) - if err != nil { - t.Fatalf("Failed to serialize connection: %v", err) - } - c2 := new(swift.Connection) - err = xml.Unmarshal(serializedConnection, &c2) - if err != nil { - t.Fatalf("Failed to unserialize connection: %v", err) - } - if !c2.Authenticated() { - t.Fatal("Should be authenticated") - } - _, _, err = c2.Account() - if err != nil { - t.Fatalf("Failed to use unserialized connection: %v", err) - } -} - -// Test the reauthentication logic -func TestOnReAuth(t *testing.T) { - c2 := c - c2.UnAuthenticate() - _, _, err := c2.Account() - if err != nil { - t.Fatalf("Failed to reauthenticate: %v", err) - } -} -func TestAccount(t *testing.T) { - info, headers, err := c.Account() - if err != nil { - t.Fatal(err) - } - if headers["X-Account-Container-Count"] != fmt.Sprintf("%d", info.Containers) { - t.Error("Bad container count") - } - if headers["X-Account-Bytes-Used"] != fmt.Sprintf("%d", info.BytesUsed) { - t.Error("Bad bytes count") - } - if headers["X-Account-Object-Count"] != fmt.Sprintf("%d", info.Objects) { - t.Error("Bad objects count") - } - //fmt.Println(info) - //fmt.Println(headers) -} - -func compareMaps(t *testing.T, a, b map[string]string) { - if len(a) != len(b) { - t.Error("Maps different sizes", a, b) - } - for ka, va := range a { - if vb, ok := b[ka]; !ok || va != vb { - t.Error("Difference in key", ka, va, b[ka]) - } - } - for kb, vb := range b { - if va, ok := a[kb]; !ok || vb != va { - t.Error("Difference in key", kb, vb, a[kb]) - } - } -} - -func TestAccountUpdate(t *testing.T) { - err := c.AccountUpdate(m1.AccountHeaders()) - if err != nil { - t.Fatal(err) - } - - _, headers, err := c.Account() - if err != nil { - t.Fatal(err) - } - m := headers.AccountMetadata() - delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set - compareMaps(t, m, map[string]string{"hello": "1", "potato-salad": "2"}) - - err = c.AccountUpdate(m2.AccountHeaders()) - if err != nil { - t.Fatal(err) - } - - _, headers, err = c.Account() - if err != nil { - t.Fatal(err) - } - m = headers.AccountMetadata() - delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set - compareMaps(t, m, map[string]string{}) - - //fmt.Println(c.Account()) - //fmt.Println(headers) - //fmt.Println(headers.AccountMetadata()) - //fmt.Println(c.AccountUpdate(m2.AccountHeaders())) - //fmt.Println(c.Account()) -} - -func TestContainerCreate(t *testing.T) { - err := c.ContainerCreate(CONTAINER, m1.ContainerHeaders()) - if err != nil { - t.Fatal(err) - } -} - -func TestContainer(t *testing.T) { - info, headers, err := c.Container(CONTAINER) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ContainerMetadata(), map[string]string{"hello": "1", "potato-salad": "2"}) - if CONTAINER != info.Name { - t.Error("Bad container count") - } - if headers["X-Container-Bytes-Used"] != fmt.Sprintf("%d", info.Bytes) { - t.Error("Bad bytes count") - } - if headers["X-Container-Object-Count"] != fmt.Sprintf("%d", info.Count) { - t.Error("Bad objects count") - } - //fmt.Println(info) - //fmt.Println(headers) -} - -func TestContainersAll(t *testing.T) { - containers1, err := c.ContainersAll(nil) - if err != nil { - t.Fatal(err) - } - containers2, err := c.Containers(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestContainersAllWithLimit(t *testing.T) { - containers1, err := c.ContainersAll(&swift.ContainersOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - containers2, err := c.Containers(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestContainerUpdate(t *testing.T) { - err := c.ContainerUpdate(CONTAINER, m2.ContainerHeaders()) - if err != nil { - t.Fatal(err) - } - _, headers, err := c.Container(CONTAINER) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ContainerMetadata(), map[string]string{}) - //fmt.Println(headers) -} - -func TestContainerNames(t *testing.T) { - containers, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - // fmt.Printf("container %q\n", CONTAINER) - ok := false - for _, container := range containers { - if container == CONTAINER { - ok = true - break - } - } - if !ok { - t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) - } - // fmt.Println(containers) -} - -func TestContainerNamesAll(t *testing.T) { - containers1, err := c.ContainerNamesAll(nil) - if err != nil { - t.Fatal(err) - } - containers2, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestContainerNamesAllWithLimit(t *testing.T) { - containers1, err := c.ContainerNamesAll(&swift.ContainersOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - containers2, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestObjectPutString(t *testing.T) { - err := c.ObjectPutString(CONTAINER, OBJECT, CONTENTS, "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Error(err) - } - if info.ContentType != "application/octet-stream" { - t.Error("Bad content type", info.ContentType) - } - if info.Bytes != CONTENT_SIZE { - t.Error("Bad length") - } - if info.Hash != CONTENT_MD5 { - t.Error("Bad length") - } -} - -func TestObjectPutBytes(t *testing.T) { - err := c.ObjectPutBytes(CONTAINER, OBJECT, []byte(CONTENTS), "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Error(err) - } - if info.ContentType != "application/octet-stream" { - t.Error("Bad content type", info.ContentType) - } - if info.Bytes != CONTENT_SIZE { - t.Error("Bad length") - } - if info.Hash != CONTENT_MD5 { - t.Error("Bad length") - } -} - -func TestObjectPutMimeType(t *testing.T) { - err := c.ObjectPutString(CONTAINER, "test.jpg", CONTENTS, "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, "test.jpg") - if err != nil { - t.Error(err) - } - if info.ContentType != "image/jpeg" { - t.Error("Bad content type", info.ContentType) - } - - // Tidy up - err = c.ObjectDelete(CONTAINER, "test.jpg") - if err != nil { - t.Error(err) - } -} - -func TestObjectCreate(t *testing.T) { - out, err := c.ObjectCreate(CONTAINER, OBJECT2, true, "", "", nil) - if err != nil { - t.Fatal(err) - } - buf := &bytes.Buffer{} - hash := md5.New() - out2 := io.MultiWriter(out, buf, hash) - for i := 0; i < 100; i++ { - fmt.Fprintf(out2, "%d %s\n", i, CONTENTS) - } - err = out.Close() - if err != nil { - t.Error(err) - } - expected := buf.String() - contents, err := c.ObjectGetString(CONTAINER, OBJECT2) - if err != nil { - t.Error(err) - } - if contents != expected { - t.Error("Contents wrong") - } - - // Test writing on closed file - n, err := out.Write([]byte{0}) - if err == nil || n != 0 { - t.Error("Expecting error and n == 0 writing on closed file", err, n) - } - - // Now with hash instead - out, err = c.ObjectCreate(CONTAINER, OBJECT2, false, fmt.Sprintf("%x", hash.Sum(nil)), "", nil) - if err != nil { - t.Fatal(err) - } - _, err = out.Write(buf.Bytes()) - if err != nil { - t.Error(err) - } - err = out.Close() - if err != nil { - t.Error(err) - } - contents, err = c.ObjectGetString(CONTAINER, OBJECT2) - if err != nil { - t.Error(err) - } - if contents != expected { - t.Error("Contents wrong") - } - - // Now with bad hash - out, err = c.ObjectCreate(CONTAINER, OBJECT2, false, CONTENT_MD5, "", nil) - if err != nil { - t.Fatal(err) - } - // FIXME: work around bug which produces 503 not 422 for empty corrupted files - fmt.Fprintf(out, "Sausage") - err = out.Close() - if err != swift.ObjectCorrupted { - t.Error("Expecting object corrupted not", err) - } - - // Tidy up - err = c.ObjectDelete(CONTAINER, OBJECT2) - if err != nil { - t.Error(err) - } -} - -func TestObjectGetString(t *testing.T) { - contents, err := c.ObjectGetString(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - if contents != CONTENTS { - t.Error("Contents wrong") - } - //fmt.Println(contents) -} - -func TestObjectGetBytes(t *testing.T) { - contents, err := c.ObjectGetBytes(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - if string(contents) != CONTENTS { - t.Error("Contents wrong") - } - //fmt.Println(contents) -} - -func TestObjectOpen(t *testing.T) { - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - n, err := io.Copy(&buf, file) - if err != nil { - t.Fatal(err) - } - if n != CONTENT_SIZE { - t.Fatal("Wrong length", n, CONTENT_SIZE) - } - if buf.String() != CONTENTS { - t.Error("Contents wrong") - } - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectOpenPartial(t *testing.T) { - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - n, err := io.CopyN(&buf, file, 1) - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Fatal("Wrong length", n, CONTENT_SIZE) - } - if buf.String() != CONTENTS[:1] { - t.Error("Contents wrong") - } - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectOpenLength(t *testing.T) { - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - // FIXME ideally this would check both branches of the Length() code - n, err := file.Length() - if err != nil { - t.Fatal(err) - } - if n != CONTENT_SIZE { - t.Fatal("Wrong length", n, CONTENT_SIZE) - } - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectOpenSeek(t *testing.T) { - - plan := []struct { - whence int - offset int64 - result int64 - }{ - {-1, 0, 0}, - {-1, 0, 1}, - {-1, 0, 2}, - {0, 0, 0}, - {0, 0, 0}, - {0, 1, 1}, - {0, 2, 2}, - {1, 0, 3}, - {1, -2, 2}, - {1, 1, 4}, - {2, -1, 4}, - {2, -3, 2}, - {2, -2, 3}, - {2, -5, 0}, - {2, -4, 1}, - } - - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - - for _, p := range plan { - if p.whence >= 0 { - result, err := file.Seek(p.offset, p.whence) - if err != nil { - t.Fatal(err, p) - } - if result != p.result { - t.Fatal("Seek result was", result, "expecting", p.result, p) - } - - } - var buf bytes.Buffer - n, err := io.CopyN(&buf, file, 1) - if err != nil { - t.Fatal(err, p) - } - if n != 1 { - t.Fatal("Wrong length", n, p) - } - actual := buf.String() - expected := CONTENTS[p.result : p.result+1] - if actual != expected { - t.Error("Contents wrong, expecting", expected, "got", actual, p) - } - } - - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectUpdate(t *testing.T) { - err := c.ObjectUpdate(CONTAINER, OBJECT, m1.ObjectHeaders()) - if err != nil { - t.Fatal(err) - } -} - -func checkTime(t *testing.T, when time.Time, low, high int) { - dt := time.Now().Sub(when) - if dt < time.Duration(low)*time.Second || dt > time.Duration(high)*time.Second { - t.Errorf("Time is wrong: dt=%q, when=%q", dt, when) - } -} - -func TestObject(t *testing.T) { - object, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "1", "potato-salad": "2"}) - if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) -} - -func TestObjectUpdate2(t *testing.T) { - err := c.ObjectUpdate(CONTAINER, OBJECT, m2.ObjectHeaders()) - if err != nil { - t.Fatal(err) - } - _, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - //fmt.Println(headers, headers.ObjectMetadata()) - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) -} - -func TestContainers(t *testing.T) { - containers, err := c.Containers(nil) - if err != nil { - t.Fatal(err) - } - ok := false - for _, container := range containers { - if container.Name == CONTAINER { - ok = true - // Container may or may not have the file contents in it - // Swift updates may be behind - if container.Count == 0 && container.Bytes == 0 { - break - } - if container.Count == 1 && container.Bytes == CONTENT_SIZE { - break - } - t.Errorf("Bad size of Container %q: %q", CONTAINER, container) - break - } - } - if !ok { - t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) - } - //fmt.Println(containers) -} - -func TestObjectNames(t *testing.T) { - objects, err := c.ObjectNames(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectNamesAll(t *testing.T) { - objects, err := c.ObjectNamesAll(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectNamesAllWithLimit(t *testing.T) { - objects, err := c.ObjectNamesAll(CONTAINER, &swift.ObjectsOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectsWalk(t *testing.T) { - objects := make([]string, 0) - err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(CONTAINER, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjects(t *testing.T) { - objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 { - t.Fatal("Should only be 1 object") - } - object := objects[0] - if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) - // fmt.Println(objects) -} - -func TestObjectsDirectory(t *testing.T) { - err := c.ObjectPutString(CONTAINER, "directory", "", "application/directory") - if err != nil { - t.Fatal(err) - } - defer c.ObjectDelete(CONTAINER, "directory") - - // Look for the directory object and check we aren't confusing - // it with a pseudo directory object - objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 2 { - t.Fatal("Should only be 2 objects") - } - found := false - for i := range objects { - object := objects[i] - if object.Name == "directory" { - found = true - if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "d41d8cd98f00b204e9800998ecf8427e" || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) - } - } - if !found { - t.Error("Didn't find directory object") - } - // fmt.Println(objects) -} - -func TestObjectsPseudoDirectory(t *testing.T) { - err := c.ObjectPutString(CONTAINER, "directory/puppy.jpg", "cute puppy", "") - if err != nil { - t.Fatal(err) - } - defer c.ObjectDelete(CONTAINER, "directory/puppy.jpg") - - // Look for the pseudo directory - objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 2 { - t.Fatal("Should only be 2 objects", objects) - } - found := false - for i := range objects { - object := objects[i] - if object.Name == "directory/" { - found = true - if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "" || object.PseudoDirectory != true || object.SubDir != "directory/" && object.LastModified.IsZero() { - t.Error("Bad object info", object) - } - } - } - if !found { - t.Error("Didn't find directory object", objects) - } - - // Look in the pseudo directory now - objects, err = c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "directory/"}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 { - t.Fatal("Should only be 1 object", objects) - } - object := objects[0] - if object.Name != "directory/puppy.jpg" || object.Bytes != 10 || object.ContentType != "image/jpeg" || object.Hash != "87a12ea22fca7f54f0cefef1da535489" || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) - // fmt.Println(objects) -} - -func TestObjectsAll(t *testing.T) { - objects, err := c.ObjectsAll(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0].Name != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectsAllWithLimit(t *testing.T) { - objects, err := c.ObjectsAll(CONTAINER, &swift.ObjectsOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0].Name != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectNamesWithPath(t *testing.T) { - objects, err := c.ObjectNames(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: ""}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Bad listing with path", objects) - } - // fmt.Println(objects) - objects, err = c.ObjectNames(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "Downloads/"}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 0 { - t.Error("Bad listing with path", objects) - } - // fmt.Println(objects) -} - -func TestObjectCopy(t *testing.T) { - _, err := c.ObjectCopy(CONTAINER, OBJECT, CONTAINER, OBJECT2, nil) - if err != nil { - t.Fatal(err) - } - err = c.ObjectDelete(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } -} - -func TestObjectCopyWithMetadata(t *testing.T) { - m := swift.Metadata{} - m["copy-special-metadata"] = "hello" - m["hello"] = "3" - h := m.ObjectHeaders() - h["Content-Type"] = "image/jpeg" - _, err := c.ObjectCopy(CONTAINER, OBJECT, CONTAINER, OBJECT2, h) - if err != nil { - t.Fatal(err) - } - // Re-read the metadata to see if it is correct - _, headers, err := c.Object(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } - if headers["Content-Type"] != "image/jpeg" { - t.Error("Didn't change content type") - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "3", "potato-salad": "", "copy-special-metadata": "hello"}) - err = c.ObjectDelete(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } -} - -func TestObjectMove(t *testing.T) { - err := c.ObjectMove(CONTAINER, OBJECT, CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } - testExistenceAfterDelete(t, CONTAINER, OBJECT) - _, _, err = c.Object(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } - - err = c.ObjectMove(CONTAINER, OBJECT2, CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - testExistenceAfterDelete(t, CONTAINER, OBJECT2) - _, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) -} - -func TestObjectUpdateContentType(t *testing.T) { - err := c.ObjectUpdateContentType(CONTAINER, OBJECT, "text/potato") - if err != nil { - t.Fatal(err) - } - // Re-read the metadata to see if it is correct - _, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - if headers["Content-Type"] != "text/potato" { - t.Error("Didn't change content type") - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) -} - -func TestVersionContainerCreate(t *testing.T) { - if err := c.VersionContainerCreate(CURRENT_CONTAINER, VERSIONS_CONTAINER); err != nil { - if err == swift.Forbidden { - t.Log("Server doesn't support Versions - skipping test") - skipVersionTests = true - return - } - t.Fatal(err) - } -} - -func TestVersionObjectAdd(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - return - } - // Version 1 - if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS, ""); err != nil { - t.Fatal(err) - } - if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } else if contents != CONTENTS { - t.Error("Contents wrong") - } - - // Version 2 - if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil { - t.Fatal(err) - } - if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } else if contents != CONTENTS2 { - t.Error("Contents wrong") - } - - // Version 3 - if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil { - t.Fatal(err) - } -} - -func TestVersionObjectList(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - return - } - list, err := c.VersionObjectList(VERSIONS_CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - - if len(list) != 2 { - t.Error("Version list should return 2 objects") - } - - //fmt.Print(list) -} - -func TestVersionObjectDelete(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - return - } - // Delete Version 3 - if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } - - // Delete Version 2 - if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } - - // Contents should be reverted to Version 1 - if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } else if contents != CONTENTS { - t.Error("Contents wrong") - } -} - -// cleanUpContainer deletes everything in the container and then the -// container. It expects the container to be empty and if it wasn't -// it logs an error. -func cleanUpContainer(t *testing.T, container string) { - objects, err := c.Objects(container, nil) - if err != nil { - t.Error(err, container) - } else { - if len(objects) != 0 { - t.Error("Container not empty", container) - } - for _, object := range objects { - t.Log("Deleting spurious", object.Name) - err = c.ObjectDelete(container, object.Name) - if err != nil { - t.Error(err, container) - } - } - } - - if err := c.ContainerDelete(container); err != nil { - t.Error(err, container) - } -} - -func TestVersionDeleteContent(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - } else { - // Delete Version 1 - if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } - } - cleanUpContainer(t, VERSIONS_CONTAINER) - cleanUpContainer(t, CURRENT_CONTAINER) -} - -// Check for non existence after delete -// May have to do it a few times to wait for swift to be consistent. -func testExistenceAfterDelete(t *testing.T, container, object string) { - for i := 10; i <= 0; i-- { - _, _, err := c.Object(container, object) - if err == swift.ObjectNotFound { - break - } - if i == 0 { - t.Fatalf("Expecting object %q/%q not found not: err=%v", container, object, err) - } - time.Sleep(1 * time.Second) - } -} - -func TestObjectDelete(t *testing.T) { - err := c.ObjectDelete(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - testExistenceAfterDelete(t, CONTAINER, OBJECT) - err = c.ObjectDelete(CONTAINER, OBJECT) - if err != swift.ObjectNotFound { - t.Fatal("Expecting Object not found", err) - } -} - -func TestBulkDelete(t *testing.T) { - result, err := c.BulkDelete(CONTAINER, []string{OBJECT}) - if err == swift.Forbidden { - t.Log("Server doesn't support BulkDelete - skipping test") - return - } - if err != nil { - t.Fatal(err) - } - if result.NumberNotFound != 1 { - t.Error("Expected 1, actual:", result.NumberNotFound) - } - if result.NumberDeleted != 0 { - t.Error("Expected 0, actual:", result.NumberDeleted) - } - err = c.ObjectPutString(CONTAINER, OBJECT, CONTENTS, "") - if err != nil { - t.Fatal(err) - } - result, err = c.BulkDelete(CONTAINER, []string{OBJECT2, OBJECT}) - if err != nil { - t.Fatal(err) - } - if result.NumberNotFound != 1 { - t.Error("Expected 1, actual:", result.NumberNotFound) - } - if result.NumberDeleted != 1 { - t.Error("Expected 1, actual:", result.NumberDeleted) - } - t.Log("Errors:", result.Errors) -} - -func TestBulkUpload(t *testing.T) { - buffer := new(bytes.Buffer) - ds := tar.NewWriter(buffer) - var files = []struct{ Name, Body string }{ - {OBJECT, CONTENTS}, - {OBJECT2, CONTENTS2}, - } - for _, file := range files { - hdr := &tar.Header{ - Name: file.Name, - Size: int64(len(file.Body)), - } - if err := ds.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err := ds.Write([]byte(file.Body)); err != nil { - t.Fatal(err) - } - } - if err := ds.Close(); err != nil { - t.Fatal(err) - } - - result, err := c.BulkUpload(CONTAINER, buffer, swift.UploadTar, nil) - if err == swift.Forbidden { - t.Log("Server doesn't support BulkUpload - skipping test") - return - } - if err != nil { - t.Fatal(err) - } - if result.NumberCreated != 2 { - t.Error("Expected 2, actual:", result.NumberCreated) - } - t.Log("Errors:", result.Errors) - - _, _, err = c.Object(CONTAINER, OBJECT) - if err != nil { - t.Error("Expecting object to be found") - } - _, _, err = c.Object(CONTAINER, OBJECT2) - if err != nil { - t.Error("Expecting object to be found") - } - c.ObjectDelete(CONTAINER, OBJECT) - c.ObjectDelete(CONTAINER, OBJECT2) -} - -func TestObjectDifficultName(t *testing.T) { - const name = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/` - err := c.ObjectPutString(CONTAINER, name, CONTENTS, "") - if err != nil { - t.Fatal(err) - } - objects, err := c.ObjectNamesAll(CONTAINER, nil) - if err != nil { - t.Error(err) - } - found := false - for _, object := range objects { - if object == name { - found = true - break - } - } - if !found { - t.Errorf("Couldn't find %q in listing %q", name, objects) - } - err = c.ObjectDelete(CONTAINER, name) - if err != nil { - t.Fatal(err) - } -} - -func TestContainerDelete(t *testing.T) { - err := c.ContainerDelete(CONTAINER) - if err != nil { - t.Fatal(err) - } - err = c.ContainerDelete(CONTAINER) - if err != swift.ContainerNotFound { - t.Fatal("Expecting container not found", err) - } - _, _, err = c.Container(CONTAINER) - if err != swift.ContainerNotFound { - t.Fatal("Expecting container not found", err) - } -} - -func TestUnAuthenticate(t *testing.T) { - c.UnAuthenticate() - if c.Authenticated() { - t.Fatal("Shouldn't be authenticated") - } - // Test re-authenticate - err := c.Authenticate() - if err != nil { - t.Fatal("ReAuth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go deleted file mode 100644 index 2348617b..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// This tests TimeoutReader - -package swift - -import ( - "io" - "io/ioutil" - "sync" - "testing" - "time" -) - -// An io.ReadCloser for testing -type testReader struct { - sync.Mutex - n int - delay time.Duration - closed bool -} - -// Returns n bytes with at time.Duration delay -func newTestReader(n int, delay time.Duration) *testReader { - return &testReader{ - n: n, - delay: delay, - } -} - -// Returns 1 byte at a time after delay -func (t *testReader) Read(p []byte) (n int, err error) { - if t.n <= 0 { - return 0, io.EOF - } - time.Sleep(t.delay) - p[0] = 'A' - t.Lock() - t.n-- - t.Unlock() - return 1, nil -} - -// Close the channel -func (t *testReader) Close() error { - t.Lock() - t.closed = true - t.Unlock() - return nil -} - -func TestTimeoutReaderNoTimeout(t *testing.T) { - test := newTestReader(3, 10*time.Millisecond) - cancelled := false - cancel := func() { - cancelled = true - } - tr := newTimeoutReader(test, 100*time.Millisecond, cancel) - b, err := ioutil.ReadAll(tr) - if err != nil || string(b) != "AAA" { - t.Fatalf("Bad read %s %s", err, b) - } - if cancelled { - t.Fatal("Cancelled when shouldn't have been") - } - if test.n != 0 { - t.Fatal("Didn't read all") - } - if test.closed { - t.Fatal("Shouldn't be closed") - } - tr.Close() - if !test.closed { - t.Fatal("Should be closed") - } -} - -func TestTimeoutReaderTimeout(t *testing.T) { - // Return those bytes slowly so we get an idle timeout - test := newTestReader(3, 100*time.Millisecond) - cancelled := false - cancel := func() { - cancelled = true - } - tr := newTimeoutReader(test, 10*time.Millisecond, cancel) - _, err := ioutil.ReadAll(tr) - if err != TimeoutError { - t.Fatal("Expecting TimeoutError, got", err) - } - if !cancelled { - t.Fatal("Not cancelled when should have been") - } - test.Lock() - n := test.n - test.Unlock() - if n == 0 { - t.Fatal("Read all") - } - if n != 3 { - t.Fatal("Didn't read any") - } - if test.closed { - t.Fatal("Shouldn't be closed") - } - tr.Close() - if !test.closed { - t.Fatal("Should be closed") - } -} diff --git a/server/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go b/server/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go deleted file mode 100644 index 8b879d44..00000000 --- a/server/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// This tests WatchdogReader - -package swift - -import ( - "io/ioutil" - "testing" - "time" -) - -// Uses testReader from timeout_reader_test.go - -func testWatchdogReaderTimeout(t *testing.T, initialTimeout, watchdogTimeout time.Duration, expectedTimeout bool) { - test := newTestReader(3, 10*time.Millisecond) - timer := time.NewTimer(initialTimeout) - firedChan := make(chan bool) - started := make(chan bool) - go func() { - started <- true - select { - case <-timer.C: - firedChan <- true - } - }() - <-started - wr := newWatchdogReader(test, watchdogTimeout, timer) - b, err := ioutil.ReadAll(wr) - if err != nil || string(b) != "AAA" { - t.Fatalf("Bad read %s %s", err, b) - } - fired := false - select { - case fired = <-firedChan: - default: - } - if expectedTimeout { - if !fired { - t.Fatal("Timer should have fired") - } - } else { - if fired { - t.Fatal("Timer should not have fired") - } - } -} - -func TestWatchdogReaderNoTimeout(t *testing.T) { - testWatchdogReaderTimeout(t, 100*time.Millisecond, 100*time.Millisecond, false) -} - -func TestWatchdogReaderTimeout(t *testing.T) { - testWatchdogReaderTimeout(t, 5*time.Millisecond, 5*time.Millisecond, true) -} - -func TestWatchdogReaderNoTimeoutShortInitial(t *testing.T) { - testWatchdogReaderTimeout(t, 5*time.Millisecond, 100*time.Millisecond, false) -} - -func TestWatchdogReaderTimeoutLongInitial(t *testing.T) { - testWatchdogReaderTimeout(t, 100*time.Millisecond, 5*time.Millisecond, true) -} diff --git a/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/.gitignore b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/.gitignore new file mode 100644 index 00000000..f9d9cd8a --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/.gitignore @@ -0,0 +1,11 @@ +_obj +_test +*.6 +*.out +_testmain.go +\#* +.\#* +*.log +_cgo* +*.o +*.a diff --git a/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/COPYING b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/COPYING new file mode 100644 index 00000000..d7849fd8 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2011 by Krzysztof Kowalik + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/README.md b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/README.md new file mode 100644 index 00000000..e3d025d5 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/README.md @@ -0,0 +1,21 @@ +# Pure Go UUID implementation + +This package provides immutable UUID structs and the functions +NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +and 5 UUIDs as specified in [RFC 4122](http://www.ietf.org/rfc/rfc4122.txt). + +## Installation + +Use the `go` tool: + + $ go get github.com/nu7hatch/gouuid + +## Usage + +See [documentation and examples](http://godoc.org/github.com/nu7hatch/gouuid) +for more information. + +## Copyright + +Copyright (C) 2011 by Krzysztof Kowalik . See [COPYING](https://github.com/nu7hatch/gouuid/tree/master/COPYING) +file for details. diff --git a/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/uuid.go b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/uuid.go new file mode 100644 index 00000000..ac9623b7 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid/uuid.go @@ -0,0 +1,173 @@ +// This package provides immutable UUID structs and the functions +// NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +// and 5 UUIDs as specified in RFC 4122. +// +// Copyright (C) 2011 by Krzysztof Kowalik +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "hash" + "regexp" +) + +// The UUID reserved variants. +const ( + ReservedNCS byte = 0x80 + ReservedRFC4122 byte = 0x40 + ReservedMicrosoft byte = 0x20 + ReservedFuture byte = 0x00 +) + +// The following standard UUIDs are for use with NewV3() or NewV5(). +var ( + NamespaceDNS, _ = ParseHex("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = ParseHex("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = ParseHex("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// Pattern used to parse hex string representation of the UUID. +// FIXME: do something to consider both brackets at one time, +// current one allows to parse string with only one opening +// or closing bracket. +const hexPattern = "^(urn\\:uuid\\:)?\\{?([a-z0-9]{8})-([a-z0-9]{4})-" + + "([1-5][a-z0-9]{3})-([a-z0-9]{4})-([a-z0-9]{12})\\}?$" + +var re = regexp.MustCompile(hexPattern) + +// A UUID representation compliant with specification in +// RFC 4122 document. +type UUID [16]byte + +// ParseHex creates a UUID object from given hex string +// representation. Function accepts UUID string in following +// formats: +// +// uuid.ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// uuid.ParseHex("{6ba7b814-9dad-11d1-80b4-00c04fd430c8}") +// uuid.ParseHex("urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// +func ParseHex(s string) (u *UUID, err error) { + md := re.FindStringSubmatch(s) + if md == nil { + err = errors.New("Invalid UUID string") + return + } + hash := md[2] + md[3] + md[4] + md[5] + md[6] + b, err := hex.DecodeString(hash) + if err != nil { + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Parse creates a UUID object from given bytes slice. +func Parse(b []byte) (u *UUID, err error) { + if len(b) != 16 { + err = errors.New("Given slice is not valid UUID sequence") + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Generate a UUID based on the MD5 hash of a namespace identifier +// and a name. +func NewV3(ns *UUID, name []byte) (u *UUID, err error) { + if ns == nil { + err = errors.New("Invalid namespace UUID") + return + } + u = new(UUID) + // Set all bits to MD5 hash generated from namespace and name. + u.setBytesFromHash(md5.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(3) + return +} + +// Generate a random UUID. +func NewV4() (u *UUID, err error) { + u = new(UUID) + // Set all bits to randomly (or pseudo-randomly) chosen values. + _, err = rand.Read(u[:]) + if err != nil { + return + } + u.setVariant(ReservedRFC4122) + u.setVersion(4) + return +} + +// Generate a UUID based on the SHA-1 hash of a namespace identifier +// and a name. +func NewV5(ns *UUID, name []byte) (u *UUID, err error) { + u = new(UUID) + // Set all bits to truncated SHA1 hash generated from namespace + // and name. + u.setBytesFromHash(sha1.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(5) + return +} + +// Generate a MD5 hash of a namespace and a name, and copy it to the +// UUID slice. +func (u *UUID) setBytesFromHash(hash hash.Hash, ns, name []byte) { + hash.Write(ns[:]) + hash.Write(name) + copy(u[:], hash.Sum([]byte{})[:16]) +} + +// Set the two most significant bits (bits 6 and 7) of the +// clock_seq_hi_and_reserved to zero and one, respectively. +func (u *UUID) setVariant(v byte) { + switch v { + case ReservedNCS: + u[8] = (u[8] | ReservedNCS) & 0xBF + case ReservedRFC4122: + u[8] = (u[8] | ReservedRFC4122) & 0x7F + case ReservedMicrosoft: + u[8] = (u[8] | ReservedMicrosoft) & 0x3F + } +} + +// Variant returns the UUID Variant, which determines the internal +// layout of the UUID. This will be one of the constants: RESERVED_NCS, +// RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE. +func (u *UUID) Variant() byte { + if u[8]&ReservedNCS == ReservedNCS { + return ReservedNCS + } else if u[8]&ReservedRFC4122 == ReservedRFC4122 { + return ReservedRFC4122 + } else if u[8]&ReservedMicrosoft == ReservedMicrosoft { + return ReservedMicrosoft + } + return ReservedFuture +} + +// Set the four most significant bits (bits 12 through 15) of the +// time_hi_and_version field to the 4-bit version number. +func (u *UUID) setVersion(v byte) { + u[6] = (u[6] & 0xF) | (v << 4) +} + +// Version returns a version number of the algorithm used to +// generate the UUID sequence. +func (u *UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Returns unparsed version of the generated UUID sequence. +func (u *UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/context/.gitignore b/server/Godeps/_workspace/src/github.com/root-gg/context/.gitignore deleted file mode 100644 index 485dee64..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/context/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea diff --git a/server/Godeps/_workspace/src/github.com/root-gg/context/README.md b/server/Godeps/_workspace/src/github.com/root-gg/context/README.md deleted file mode 100644 index f69d9101..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/context/README.md +++ /dev/null @@ -1,2 +0,0 @@ -ROOT-GG Application context -=========================== diff --git a/server/Godeps/_workspace/src/github.com/root-gg/context/context.go b/server/Godeps/_workspace/src/github.com/root-gg/context/context.go deleted file mode 100644 index e4c493f3..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/context/context.go +++ /dev/null @@ -1,269 +0,0 @@ -package context - -import ( - "bytes" - "errors" - "fmt" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" - "sync" - "time" -) - -var Running = errors.New("running") -var Success = errors.New("success") -var Canceled = errors.New("canceled") -var Timedout = errors.New("timedout") - -type Context struct { - parent *Context - name string - elapsed utils.SplitTime - splits []*utils.SplitTime - done chan struct{} - children []*Context - timeout time.Duration - timer *time.Timer - status error - lock sync.RWMutex - values map[interface{}]interface{} -} - -func NewContext(name string) (ctx *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - ctx = new(Context) - ctx.status = Running - ctx.elapsed = *utils.NewSplitTime("") - ctx.elapsed.Start() - ctx.name = name - ctx.done = make(chan struct{}) - ctx.children = make([]*Context, 0) - ctx.values = make(map[interface{}]interface{}) - return -} - -func NewContextWithTimeout(name string, timeout time.Duration) (ctx *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - ctx = NewContext(name) - ctx.timeout = timeout - ctx.timer = time.NewTimer(timeout) - go func() { - select { - case <-ctx.timer.C: - ctx.Finalize(Timedout) - case <-ctx.Done(): - ctx.timer.Stop() - } - }() - return -} - -func (ctx *Context) Fork(name string) (fork *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - fork = NewContext(name) - fork.parent = ctx - ctx.children = append(ctx.children, fork) - return -} - -func (ctx *Context) ForkWithTimeout(name string, timeout time.Duration) (fork *Context) { - if name == "" { - _, _, name = utils.GetCaller(2) - _, name = utils.ParseFunction(name) - } - fork = NewContextWithTimeout(name, timeout) - fork.parent = ctx - ctx.children = append(ctx.children, fork) - return -} - -func (ctx *Context) Name() string { - return ctx.name -} - -func (ctx *Context) Done() (done <-chan struct{}) { - done = ctx.done - return -} - -func (ctx *Context) Wait() { - if ctx.status == Running { - <-ctx.done - } -} - -func (ctx *Context) waitAllChildren(root bool) { - for _, child := range ctx.children { - child.waitAllChildren(false) - } - if !root { - ctx.Wait() - } -} - -func (ctx *Context) WaitAllChildren() { - ctx.waitAllChildren(true) - return -} - -func (ctx *Context) Status() (status error) { - if ctx.status == nil { - status = Success - } else { - status = ctx.status - } - return ctx.status -} - -func (ctx *Context) Finalize(err error) { - ctx.lock.Lock() - defer ctx.lock.Unlock() - if ctx.status != Running { - return - } - ctx.status = err - ctx.elapsed.Stop() - close(ctx.done) -} - -func (ctx *Context) Cancel() { - ctx.Finalize(Canceled) - for _, child := range ctx.Children() { - child.Cancel() - } -} - -func (ctx *Context) AutoCancel() *Context { - go func() { - <-ctx.Done() - ctx.Cancel() - }() - return ctx -} - -func (ctx *Context) DetachChild(child *Context) { - for i := 0; i < len(ctx.children); i++ { - if ctx.children[i] == child { - ctx.children = append(ctx.children[:i], ctx.children[i+1:]...) - } - } -} - -func (ctx *Context) AutoDetach() *Context { - go func() { - <-ctx.Done() - if ctx.parent != nil { - ctx.parent.DetachChild(ctx) - } - }() - return ctx -} - -func (ctx *Context) AutoDetachChild(child *Context) { - go func() { - <-child.Done() - ctx.DetachChild(child) - }() -} - -func (ctx *Context) allChildren(children []*Context) []*Context { - children = append(children, ctx.children...) - for _, child := range ctx.children { - children = child.allChildren(children) - } - return children -} - -func (ctx *Context) AllChildren() []*Context { - return ctx.allChildren([]*Context{}) -} - -func (ctx *Context) Children() []*Context { - return ctx.children -} - -func (ctx *Context) Set(key interface{}, value interface{}) { - ctx.values[key] = value -} - -func (ctx *Context) Get(key interface{}) (interface{}, bool) { - if value, ok := ctx.values[key]; ok { - return value, true - } else { - if ctx.parent != nil { - return ctx.parent.Get(key) - } - } - return nil, false -} - -func (ctx *Context) StartDate() *time.Time { - return ctx.elapsed.StartDate() -} - -func (ctx *Context) EndDate() *time.Time { - return ctx.elapsed.StopDate() -} - -func (ctx *Context) Elapsed() time.Duration { - return ctx.elapsed.Elapsed() -} - -func (ctx *Context) Deadline() time.Time { - return ctx.StartDate().Add(ctx.timeout) -} - -func (ctx *Context) Remaining() time.Duration { - return ctx.Deadline().Sub(time.Now()) -} - -func (ctx *Context) Time(name string) (split *utils.SplitTime) { - if ctx.splits == nil { - ctx.splits = make([]*utils.SplitTime, 0) - } - split = utils.NewSplitTime(name) - ctx.splits = append(ctx.splits, split) - split.Start() - return -} - -func (ctx *Context) Timers() []*utils.SplitTime { - return ctx.splits -} - -func (ctx *Context) string(depth int) string { - str := bytes.NewBufferString("") - var pad string - for i := 0; i < depth; i++ { - pad += " " - } - str.WriteString(pad) - if depth > 0 { - str.WriteString("`->") - } - str.WriteString(fmt.Sprintf("%s : status %s, elapsed %s\n", ctx.name, ctx.Status(), ctx.Elapsed().String())) - if ctx.splits != nil { - for _, split := range ctx.splits { - str.WriteString(pad) - str.WriteString(" - ") - str.WriteString(split.String()) - str.WriteString("\n") - } - } - for _, child := range ctx.Children() { - str.WriteString(child.string(depth + 1)) - } - return str.String() -} - -func (ctx *Context) String() string { - return ctx.string(0) -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/context/context_test.go b/server/Godeps/_workspace/src/github.com/root-gg/context/context_test.go deleted file mode 100644 index ca1722ff..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/context/context_test.go +++ /dev/null @@ -1,317 +0,0 @@ -package context - -import ( - "errors" - "fmt" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" - "testing" - "time" -) - -func TestMain(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - child.Fork("fork2") - if child.Status() != Running { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Running) - } - child.Finalize(nil) - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Success) - } - children := root.AllChildren() - if len(children) != 2 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 2) - } -} - -func TestDefaultName(t *testing.T) { - root := NewContext("") - defaultName := "TestDefaultName" - if root.Name() != defaultName { - t.Errorf("Invalid child default name %s instead of %s", root.Name, defaultName) - } - child := root.Fork("") - if child.Name() != defaultName { - t.Errorf("Invalid child default name %s instead of %s", child.Name, defaultName) - } -} - -func TestDates(t *testing.T) { - root := NewContext("ROOT") - fmt.Printf("StartDate : %s\n", root.StartDate().String()) - fmt.Printf("Running since : %s\n", root.Elapsed().String()) - if root.EndDate() != nil { - t.Error("EndDate on running context") - } - root.Finalize(Success) - fmt.Printf("EndDate : %s\n", root.StartDate().String()) - fmt.Printf("Has run : %s\n", root.Elapsed().String()) -} - -func TestTimers(t *testing.T) { - root := NewContext("ROOT") - root.Time("t1").Stop() - root.Time("t2") - timers := root.Timers() - if len(timers) != 2 { - t.Errorf("Invalid timer count %d instead of %d", len(root.Timers()), 2) - } - if timers[0].Status() != utils.Stopped { - t.Errorf("Invalid timer %s status %s instead of %s", timers[0].Name(), timers[0].Status(), utils.Stopped) - } - if timers[1].Status() != utils.Running { - t.Errorf("Invalid timer %s status %s instead of %s", timers[1].Name(), timers[1].Status(), utils.Running) - } -} - -func TestFinalize(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - go func() { child.Finalize(Success) }() - child.Wait() - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Success) - } -} - -func TestWaitAllChildren(t *testing.T) { - root := NewContext("ROOT") - child1 := root.Fork("fork1") - child2 := child1.Fork("fork2") - child3 := child2.Fork("fork3") - go func() { - time.Sleep(100 * time.Millisecond) - child1.Finalize(Success) - time.Sleep(100 * time.Millisecond) - child3.Finalize(Success) - time.Sleep(100 * time.Millisecond) - child2.Finalize(Success) - }() - root.WaitAllChildren() - children := root.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - for _, child := range children { - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Timedout) - } - } -} - -func TestStatusOverride(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - var err = errors.New("error") - go func() { child.Finalize(err) }() - child.Wait() - child.Finalize(Success) - if child.Status() != err { - t.Errorf("Invalid child status %s instead of %s", child.Status(), err) - } -} - -func TestTimeoutOk(t *testing.T) { - root := NewContext("ROOT") - child := root.ForkWithTimeout("", 200*time.Millisecond) - go func() { - time.Sleep(100 * time.Millisecond) - child.Finalize(Success) - }() - <-child.Done() - if child.Status() != Success { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Success) - } -} - -func TestTimeoutKo(t *testing.T) { - root := NewContext("ROOT") - child := root.ForkWithTimeout("", 100*time.Millisecond) - go func() { - time.Sleep(200 * time.Millisecond) - child.Finalize(Success) - }() - <-child.Done() - if child.Status() != Timedout { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Timedout) - } -} - -func TestTimeoutDates(t *testing.T) { - root := NewContextWithTimeout("", 100*time.Millisecond) - fmt.Printf("Deadline is : %s\n", root.Deadline().String()) - fmt.Printf("Remaining time : %s\n", root.Remaining().String()) - child := root.Fork("") - if child.Deadline() != *child.StartDate() { - t.Errorf("Invalid deadline for non timed context : %s\n", child.Deadline().String()) - } - if child.Remaining().Seconds() > 0 { - t.Errorf("Invalid remaining for non timed context : %s\n", child.Remaining().String()) - } -} - -func TestCancel(t *testing.T) { - root := NewContext("ROOT") - root.Fork("").Fork("").Fork("") - root.Cancel() - children := root.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - for _, child := range children { - if child.Status() != Canceled { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Timedout) - } - } -} - -func TestAutoCancel(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1").AutoCancel() - child.Fork("").Fork("").Fork("") - child.Finalize(Success) - time.Sleep(100 * time.Millisecond) - children := child.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - for _, child := range children { - if child.Status() != Canceled { - t.Errorf("Invalid child status %s instead of %s", child.Status(), Canceled) - } - } -} - -func TestDetach(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("") - child.Fork("").Fork("") - children := root.AllChildren() - if len(children) != 3 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 3) - } - root.DetachChild(child) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } -} - -func TestAutoDetach(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1").AutoDetach() - children := root.AllChildren() - if len(children) != 1 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } - child.Finalize(Success) - time.Sleep(100 * time.Millisecond) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 0) - } -} - -func TestDetachChild(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - children := root.AllChildren() - if len(children) != 1 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } - root.DetachChild(child) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 0) - } -} - -func TestAutoDetachChild(t *testing.T) { - root := NewContext("ROOT") - child := root.Fork("fork1") - root.AutoDetachChild(child) - children := root.AllChildren() - if len(children) != 1 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 1) - } - child.Finalize(Success) - time.Sleep(100 * time.Millisecond) - children = root.AllChildren() - if len(children) != 0 { - t.Errorf("Invalid childen count %d instead of %d", len(children), 0) - } -} - -func TestValue(t *testing.T) { - root := NewContext("ROOT") - root.Set("foo", "bar") - value, ok := root.Get("foo") - if !ok { - t.Error("Missing value for key \"foo\"") - } - if value.(string) != "bar" { - t.Error("Invalid value \"%s\" for key \"foo\" sould be \"bar\"", value) - } - child := root.Fork("fork1") - value, ok = child.Get("foo") - if !ok { - t.Error("Missing value for key \"foo\" in child context") - } - if value.(string) != "bar" { - t.Error("Invalid value \"%s\" for key \"foo\" child context sould be \"bar\"", value) - } -} - -func TestMissingValue(t *testing.T) { - root := NewContext("ROOT") - root.Set("go", "lang") - child := root.Fork("scala") - child.Set("sca", "la") - child2 := child.Fork("java") - child2.Get("ja") - value, ok := child.Get("foo") - if ok { - t.Error("Missing key \"ja\" should be missing") - } - if value != nil { - t.Error("Missing value \"%s\" for key \"foo\" should be missing", value) - } -} - -func TestValueOverride(t *testing.T) { - root := NewContext("ROOT") - root.Set("foo", "bar") - child := root.Fork("") - child.Set("foo", "baz") - value, ok := root.Get("foo") - if !ok { - t.Error("Missing value for key foo") - } - if value.(string) != "bar" { - t.Error("Invalid value \"%s\" for key foo sould be \"bar\"", value) - } - value, ok = child.Get("foo") - if !ok { - t.Error("Missing value for key foo in child context") - } - if value.(string) != "baz" { - t.Error("Invalid value \"%s\" for key foo child context sould be \"baz\"", value) - } -} - -func TestDisplay(t *testing.T) { - root := NewContext("ROOT") - fork1 := root.Fork("fork1") - fork1.Fork("fork11") - fork1.Fork("fork12").Fork("fork121") - fork1.Finalize(Success) - fork1.Cancel() - fork2 := root.Fork("fork2") - fork2.Fork("fork21") - fork2.Fork("fork22").Fork("fork221") - fork2.Time("t1").Stop() - fork2.Time("t2").Stop() - fork2.Time("t3") - fmt.Println(root.String()) -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/juliet/.gitignore b/server/Godeps/_workspace/src/github.com/root-gg/juliet/.gitignore new file mode 100644 index 00000000..a388e694 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/juliet/.gitignore @@ -0,0 +1,2 @@ +.idea +juliet diff --git a/server/Godeps/_workspace/src/github.com/root-gg/juliet/.travis.yml b/server/Godeps/_workspace/src/github.com/root-gg/juliet/.travis.yml new file mode 100644 index 00000000..70eed362 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/juliet/.travis.yml @@ -0,0 +1,8 @@ +sudo: false +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip \ No newline at end of file diff --git a/server/Godeps/_workspace/src/github.com/root-gg/juliet/LICENCE b/server/Godeps/_workspace/src/github.com/root-gg/juliet/LICENCE new file mode 100644 index 00000000..6c01cb3e --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/juliet/LICENCE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) <2015> + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/server/Godeps/_workspace/src/github.com/root-gg/juliet/README.md b/server/Godeps/_workspace/src/github.com/root-gg/juliet/README.md new file mode 100644 index 00000000..7eb7f11f --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/juliet/README.md @@ -0,0 +1,60 @@ + +Juliet is a lightweight middleware chaining helper that pass a Context (map) object +from a middleware to the next one. + +This is a fork of [Stack](https://github.com/alexedwards/stack) by Alex Edwards +witch is inspired by [Alice](https://github.com/justinas/alice) by Justinas Stankevicius. + +### Write a ContextMiddleware +``` + // Write a ContextMiddleware + func middleware(ctx *juliet.Context,w next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + // Play with the context + ctx.Set("key", "value") + + // Pass the request to the next middleware / handler + next.ServeHTTP(resp, req) + }) + } + + // To create a new chain + chain := juliet.NewChain(middleware1,middleware2) + + // To append a middleware at the end of the chain + chain = chain.Append(middleware3,middleware4) + + // To append a middleware at the beginning of a chain + chain = juliet.NewChain(firstMiddleware).AppendChain(chain) + + // Classic middleware without context can be added to the chain using the Adapt function + func middlewareWithoutContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // middleware logic + next.ServeHTTP(w, r) + }) + } + + chain = chain.Append(juliet.Adapt(middlewareWithoutContext)) + + // Write a ContextHandler + func handler(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + // play with context + value, _ := ctx.Get("key") + + // write http response + resp.Write([]byte(fmt.Sprintf("value is %v\n", value))) + } + + // Execute a middleware chain + http.Handle("/", chain.Then(handler)) + + // Classic http.Handler without context + http.Handle("/404", chain.ThenHandler(ttp.NotFoundHandler)) + + // Classic http.HandlerFunc without context + func pingHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("pong")) + } + http.Handle("/ping", chain.ThenHandlerFunc(pingHandler)) +``` \ No newline at end of file diff --git a/server/Godeps/_workspace/src/github.com/root-gg/juliet/context.go b/server/Godeps/_workspace/src/github.com/root-gg/juliet/context.go new file mode 100644 index 00000000..15cb1320 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/juliet/context.go @@ -0,0 +1,57 @@ +package juliet + +import ( + "fmt" +) + +// Context hold a map[interface{}]interface{} to pass along the middleware chain. +type Context struct { + values map[interface{}]interface{} +} + +// NewContext creates a new context instance. +func NewContext() (ctx *Context) { + ctx = new(Context) + ctx.values = make(map[interface{}]interface{}) + return +} + +// Get returns the value matching the key from the context. +func (ctx *Context) Get(key interface{}) (value interface{}, ok bool) { + value, ok = ctx.values[key] + return +} + +// Set adds a value to the context or overrides a parent value. +func (ctx *Context) Set(key interface{}, val interface{}) { + ctx.values[key] = val +} + +// Delete remove a value from the context. +func (ctx *Context) Delete(key interface{}) { + delete(ctx.values, key) +} + +// Clear remove all values from the context. +func (ctx *Context) Clear() { + for key := range ctx.values { + delete(ctx.values, key) + } +} + +// Copy creates a new copy of the context. +func (ctx *Context) Copy() *Context { + nc := NewContext() + for key, value := range ctx.values { + nc.values[key] = value + } + return nc +} + +// String returns a string representation of the context values. +func (ctx *Context) String() (str string) { + for key, value := range ctx.values { + str += fmt.Sprintf("%v => %v\n", key, value) + } + return +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/juliet/juliet.go b/server/Godeps/_workspace/src/github.com/root-gg/juliet/juliet.go new file mode 100644 index 00000000..589f3e36 --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/juliet/juliet.go @@ -0,0 +1,164 @@ +package juliet + +import ( + "net/http" +) + +// ContextMiddleware is a constructor to close a Context into a middleware +type ContextMiddleware func(ctx *Context, next http.Handler) http.Handler + +// ContextHandler is a constructor to close a Context into a http.Handler +type ContextHandler func(ctx *Context) http.Handler + +// ContextHandlerFunc is a constructor to close a Context into a http.HandlerFunc +type ContextHandlerFunc func(ctx *Context, resp http.ResponseWriter, req *http.Request) + +// Chain is a wrapper for a contextMiddleware instance. +// Linking to the previous middleware. +type Chain struct { + parent *Chain + middleware ContextMiddleware +} + +// NewChain creates a new contextMiddleware chain. +func NewChain(cm ...ContextMiddleware) (chain *Chain) { + chain = new(Chain) + if len(cm) > 0 { + chain.middleware = cm[0] + if len(cm) > 1 { + chain = chain.Append(cm[1:]...) + } + } + return +} + +// append add a contextMiddleware(s) to the chain. +func (chain *Chain) append(cm ContextMiddleware) (newChain *Chain) { + newChain = NewChain(cm) + newChain.parent = chain + return newChain +} + +// Append adds contextMiddleware(s) to the chain. +func (chain *Chain) Append(cms ...ContextMiddleware) (newChain *Chain) { + newChain = chain + for _, cm := range cms { + newChain = newChain.append(cm) + } + + return newChain +} + +// Adapt adds context to a middleware so it can be added to the chain. +func Adapt(fn func(http.Handler) http.Handler) ContextMiddleware { + return func(ctx *Context, h http.Handler) http.Handler { + return fn(h) + } +} + +// head returns the top/first middleware of the Chain. +func (chain *Chain) head() (head *Chain) { + // Find the head of the chain + head = chain + for head.parent != nil { + head = head.parent + } + return +} + +// copy duplicate the whole chain of contextMiddleware. +func (chain *Chain) copy() (newChain *Chain) { + newChain = NewChain(chain.middleware) + if chain.parent != nil { + newChain.parent = chain.parent.copy() + } + return +} + +// AppendChain duplicates a chain and links it to the current chain +// An append to the old chain don't alter the new one +func (chain *Chain) AppendChain(tail *Chain) (newChain *Chain) { + // Copy the chain to attach + newChain = tail.copy() + + // Attach the chain to extend to the new tail + newChain.head().parent = chain + + // Return the new tail + return +} + +// Then add a contextHandlerFunc to the end of the chain +// and returns a http.Handler compliant ContextHandler +func (chain *Chain) Then(fn ContextHandlerFunc) (ch *ChainHandler) { + ch = newHandler(chain, adaptContextHandlerFunc(fn)) + return +} + +// ThenHandler add a http.Handler to the end of the chain +// and returns a http.Handler compliant ContextHandler +func (chain *Chain) ThenHandler(handler http.Handler) (ch *ChainHandler) { + ch = newHandler(chain, adaptHandler(handler)) + return +} + +// ThenHandlerFunc add a http.HandlerFunc to the end of the chain +// and returns a http.Handler compliant ContextHandler +func (chain *Chain) ThenHandlerFunc(fn func(http.ResponseWriter, *http.Request)) (ch *ChainHandler) { + ch = newHandler(chain, adaptHandlerFunc(fn)) + return +} + +// ChainHandler holds a chain and a final handler +// It satisfy the http.Handler interface and can be +// served directly by a net/http server +type ChainHandler struct { + chain *Chain + handler ContextHandler +} + +// New Handler creates a new handler chain +func newHandler(chain *Chain, handler ContextHandler) (ch *ChainHandler) { + ch = new(ChainHandler) + ch.chain = chain + ch.handler = handler + return +} + +// ServeHTTP builds the chain of handlers in order, closing the context along the way and executes it. +func (ch *ChainHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + ctx := NewContext() + + // Build the context handler chain + handler := ch.handler(ctx) + chain := ch.chain + for chain != nil { + if chain.middleware != nil { + handler = chain.middleware(ctx, handler) + } + chain = chain.parent + } + + handler.ServeHTTP(resp, req) +} + +// Adapt a ContextHandlerFunc into a contextHandler +func adaptContextHandlerFunc(fn ContextHandlerFunc) ContextHandler { + return func(ctx *Context) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fn(ctx, w, r) + }) + } +} + +// Adapt a http.Handler into a contextHandler +func adaptHandler(h http.Handler) ContextHandler { + return func(ctx *Context) http.Handler { + return h + } +} + +// Adapt a http.HandlerFunc into a contextHandler +func adaptHandlerFunc(fn func(w http.ResponseWriter, r *http.Request)) ContextHandler { + return adaptHandler(http.HandlerFunc(fn)) +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/logger/logger_test.go b/server/Godeps/_workspace/src/github.com/root-gg/logger/logger_test.go deleted file mode 100644 index 90887c34..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/logger/logger_test.go +++ /dev/null @@ -1,474 +0,0 @@ -package logger - -import ( - "bytes" - "fmt" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" - "io/ioutil" - "os" - "path" - "testing" - "time" -) - -var logMessage string = "This is a log message\n" - -func TestNew(t *testing.T) { - logger := NewLogger() - if logger.MinLevel != MinLevel { - t.Errorf("Invalid timer default level %s instead of %s", logger.MinLevel, MinLevel) - } - logger.Log(INFO, logMessage) -} - -func TestLogger(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(0) - logger.Log(INFO, logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != logMessage { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), logMessage) - } -} - -func TestAutoNewLine(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(0) - logger.Log(INFO, "This is a log message") - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != logMessage { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), logMessage) - } -} - -func TestPrefix(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - prefix := "prefix" - logger := NewLogger().SetOutput(buffer).SetFlags(0).SetPrefix(prefix) - expected := fmt.Sprintf("[%s] %s", prefix, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestDateFormat(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Fdate).SetDateFormat("01/02/2006") - expected := fmt.Sprintf("[%s] %s", time.Now().Format("01/02/2006"), logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestShortFile(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFile) - file, line, _ := utils.GetCaller(1) - expected := fmt.Sprintf("[%s:%d] %s", path.Base(file), line+2, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestLongFile(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FlongFile) - file, line, _ := utils.GetCaller(1) - expected := fmt.Sprintf("[%s:%d] %s", file, line+2, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestShortFunction(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFunction) - expected := fmt.Sprintf("[%s] %s", "TestShortFunction", logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestLongFunction(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FlongFunction) - expected := fmt.Sprintf("[%s] %s", "github.com/root-gg/logger.TestLongFunction", logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestFileAndFunction(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFile | FshortFunction) - file, line, _ := utils.GetCaller(1) - expected := fmt.Sprintf("[%s:%d TestFileAndFunction] %s", path.Base(file), line+2, logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestCallDepth(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(FshortFunction).SetCallDepth(1) - expected := fmt.Sprintf("[%s] %s", "Log", logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestDebug(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(DEBUG) - expected := fmt.Sprintf("[%s] %s", levels[DEBUG], logMessage) - logger.Debug(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Debugf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(DEBUG) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestInfo(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(INFO) - expected := fmt.Sprintf("[%s] %s", levels[INFO], logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Infof("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(INFO) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestWarning(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(WARNING) - expected := fmt.Sprintf("[%s] %s", levels[WARNING], logMessage) - logger.Warning(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Warningf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(WARNING) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestCritical(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(CRITICAL) - expected := fmt.Sprintf("[%s] %s", levels[CRITICAL], logMessage) - logger.Critical(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - buffer.Reset() - logger.Criticalf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - logIf := logger.LogIf(CRITICAL) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestFatal(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel).SetMinLevel(FATAL) - expected := fmt.Sprintf("[%s] %s", levels[FATAL], logMessage) - var exitcode int = 0 - exiter = func(code int) { - exitcode = code - } - logger.Fatal(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - if exitcode != 1 { - t.Errorf("Invalid exit code %d instead %d", exitcode, 1) - } - exitcode = 0 - buffer.Reset() - logger.Fatalf("%s", logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } - if exitcode != 1 { - t.Errorf("Invalid exit code %d instead %d", exitcode, 1) - } - logIf := logger.LogIf(FATAL) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestFixedSizeLevel(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(Flevel | FfixedSizeLevel) - expected := fmt.Sprintf("[%-8s] %s", levels[INFO], logMessage) - logger.Info(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} - -func TestMinLevel(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetMinLevel(FATAL) - buffer.Reset() - logger.Debug(logMessage) - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf := logger.LogIf(DEBUG) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - buffer.Reset() - logger.Info(logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf = logger.LogIf(INFO) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - buffer.Reset() - logger.Warning(logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf = logger.LogIf(WARNING) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - buffer.Reset() - logger.Critical(logMessage) - output, err = ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if len(output) > 0 { - t.Errorf("Invalid logger output when level < MinLevel") - } - logIf = logger.LogIf(CRITICAL) - if logIf != false { - t.Errorf("Invalid LogIf %t instead of %t", logIf, false) - } - logIf = logger.LogIf(FATAL) - if logIf != true { - t.Errorf("Invalid LogIf %t instead of %t", logIf, true) - } -} - -func TestMinLevelFromString(t *testing.T) { - logger := NewLogger() - logger.SetMinLevelFromString("DEBUG") - if logger.MinLevel != DEBUG { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, DEBUG) - } - logger.SetMinLevelFromString("INVALID") - if logger.MinLevel != DEBUG { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, DEBUG) - } - logger.SetMinLevelFromString("INFO") - if logger.MinLevel != INFO { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, INFO) - } - logger.SetMinLevelFromString("WARNING") - if logger.MinLevel != WARNING { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, WARNING) - } - logger.SetMinLevelFromString("CRITICAL") - if logger.MinLevel != CRITICAL { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, CRITICAL) - } - logger.SetMinLevelFromString("FATAL") - if logger.MinLevel != FATAL { - t.Errorf("Invalid min level %s instead of %s", logger.MinLevel, FATAL) - } -} - -func TestError(t *testing.T) { - devNull, err := os.Open(os.DevNull) - if err != nil { - t.Errorf("Unable to open %s : %s", os.DevNull, err) - } - logger := NewLogger().SetOutput(devNull) - err = logger.EWarning("Oops!") - if err.Error() != "Oops!" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops!") - } - err = logger.EWarningf("Oops : %s", "it's broken") - if err.Error() != "Oops : it's broken" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops : it's broken") - } - err = logger.ECritical("Oops!") - if err.Error() != "Oops!" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops!") - } - err = logger.ECriticalf("Oops : %s", "it's broken") - if err.Error() != "Oops : it's broken" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops : it's broken") - } - err = logger.Error(DEBUG, "Oops!") - if err.Error() != "Oops!" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops!") - } - err = logger.Errorf(DEBUG, "Oops : %s", "it's broken") - if err.Error() != "Oops : it's broken" { - t.Errorf("Invalid error message \"%s\" instead of \"%s\"", err.Error(), "Oops : it's broken") - } -} - -func TestCopy(t *testing.T) { - logger1 := NewLogger().SetPrefix("logger1") - logger2 := logger1.Copy().SetPrefix("logger2") - if logger1.Prefix != "logger1" { - t.Errorf("Invalid logger prefix %t instead of %t", logger1.Prefix, "logger1") - } - if logger2.Prefix != "logger2" { - t.Errorf("Invalid logger prefix %t instead of %t", logger2.Prefix, "logger2") - } -} - -type TestData struct { - Foo string -} - -func TestDump(t *testing.T) { - buffer := bytes.NewBuffer([]byte{}) - logger := NewLogger().SetOutput(buffer).SetFlags(0) - logger.Dump(INFO, TestData{"bar"}) - expected := "{\n \"Foo\": \"bar\"\n}\n" - output, err := ioutil.ReadAll(buffer) - if err != nil { - t.Errorf("Unable to read logger output : %s", err) - } - if string(output) != expected { - t.Errorf("Invalid log message \"%s\" instead of \"%s\"", string(output), expected) - } -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/bytes_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/bytes_test.go deleted file mode 100644 index 80fd9622..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/bytes_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestBytesToString(t *testing.T) { - - // Test for all units - testBytes := BytesToString(123) // Should get : 123 B - testKiloBytes := BytesToString(4755) // Should get : 4.64 KB - testMegaBytes := BytesToString(6541615) // Should get : 6.24 MB - testGigaBytes := BytesToString(2571257332) // Should get : 2.39 GB - - if testBytes != "123 B" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(123)", testBytes, "123 B") - } else if testKiloBytes != "4.64 KB" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(4755)", testBytes, "4.64 KB") - } else if testMegaBytes != "6.24 MB" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(6541615)", testBytes, "6.24 MB") - } else if testGigaBytes != "2.39 GB" { - t.Errorf("Unexpected return for %s, got %s, expecting %s", "BytesToString(2571257332)", testBytes, "2.39 GB") - } -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/caller_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/caller_test.go deleted file mode 100644 index e2f1020a..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/caller_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package utils - -import ( - "fmt" - "path" - "testing" -) - -func TestGetCaller(t *testing.T) { - file, line, function := GetCaller(1) - filename := path.Base(file) - if filename != "caller_test.go" { - t.Errorf("Invalid file name %s instead of %s", filename, "caller_test.go") - } - if line != 10 { - t.Errorf("Invalid line %d instead of %d", line, 10) - } - if function != "github.com/root-gg/utils.TestGetCaller" { - t.Errorf("Invalid function %s instead of %s", function, "github.com/root-gg/utils.TestGetCaller") - } - fmt.Printf("%s:%d : %s\n", file, line, function) - return -} - -func TestParseFunction(t *testing.T) { - _, _, fct := GetCaller(1) - pkg, function := ParseFunction(fct) - if pkg != "github.com/root-gg/utils" { - t.Errorf("Invalid package name %s instead of %s", pkg, "github.com/root-gg/utils") - } - if function != "TestParseFunction" { - t.Errorf("Invalid package name %s instead of %s", function, "TestParseFunction") - } -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/dumper_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/dumper_test.go deleted file mode 100644 index bfec4ee3..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/dumper_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import ( - "testing" -) - -type TestDumper struct { - Foo string -} - -func TestDump(t *testing.T) { - Dump(TestDumper{"bar"}) -} - -func TestSdump(t *testing.T) { - dump := Sdump(TestDumper{"bar"}) - expected := "{\n \"Foo\": \"bar\"\n}" - if dump != expected { - t.Errorf("Invalid dump got %s instead of %s", dump, expected) - } -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/json_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/json_test.go deleted file mode 100644 index ddbdb727..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/json_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package utils - -import "testing" - -type TestJson struct { - Foo string -} - -func TestToJson(t *testing.T) { - data := TestJson{"bar"} - json, err := ToJson(data) - if err != nil { - t.Errorf("Unable to serialize %v to json : %s", data, err) - } - expected := "{\"Foo\":\"bar\"}" - if string(json) != expected { - t.Errorf("Invalid dump got %s instead of %s", string(json), expected) - } -} - -func TestToJsonString(t *testing.T) { - data := TestJson{"bar"} - json, err := ToJsonString(data) - if err != nil { - t.Errorf("Unable to serialize %v to json : %s", data, err) - } - expected := "{\"Foo\":\"bar\"}" - if json != expected { - t.Errorf("Invalid dump got %s instead of %s", json, expected) - } -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum.go index cd36fcd1..b9f920b0 100644 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum.go +++ b/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum.go @@ -4,6 +4,7 @@ import ( "crypto/md5" "fmt" "io" + "os" ) func Md5sum(str string) (md5sum string, err error) { @@ -15,3 +16,19 @@ func Md5sum(str string) (md5sum string, err error) { md5sum = fmt.Sprintf("%x", h.Sum(nil)) return } + +func FileMd5sum(filePath string) (md5sum string, err error) { + file, err := os.Open(filePath) + if err != nil { + return + } + defer file.Close() + + h := md5.New() + if _, err = io.Copy(h, file); err != nil { + return + } + + md5sum = fmt.Sprintf("%x", h.Sum(nil)) + return +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum_test.go deleted file mode 100644 index 008cf427..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/md5sum_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestMd5sum(t *testing.T) { - md5sum, err := Md5sum("Lorem ipsum dolor sit amet") - if err != nil { - t.Errorf("Unable to compute md5sum : %s", err) - } - sum := "fea80f2db003d4ebc4536023814aa885" - if md5sum != sum { - t.Errorf("Invalid md5sum got %s instead of %s", md5sum, sum) - } - return -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/net.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/net.go new file mode 100644 index 00000000..5617559f --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/utils/net.go @@ -0,0 +1,23 @@ +package utils + +import ( + "net" +) + +func NtoI(ip net.IP) (ipInt uint32) { + ip = ip.To4() + ipInt |= uint32(ip[0]) << 24 + ipInt |= uint32(ip[1]) << 16 + ipInt |= uint32(ip[2]) << 8 + ipInt |= uint32(ip[3]) + return +} + +func ItoN(ipInt uint32) net.IP { + bytes := make([]byte, 4) + bytes[0] = byte(ipInt >> 24 & 0xFF) + bytes[1] = byte(ipInt >> 16 & 0xFF) + bytes[2] = byte(ipInt >> 8 & 0xFF) + bytes[3] = byte(ipInt & 0xFF) + return net.IP(bytes) +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/reflect_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/reflect_test.go deleted file mode 100644 index fe7198e4..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/reflect_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package utils - -import ( - "testing" -) - -type TestReflect struct { - Foo string - Map map[string]string -} - -func TestAssign(t *testing.T) { - values := make(map[string]interface{}) - values["Foo"] = "bar" - values["Map"] = map[string]string{"go": "pher"} - values["Ja"] = "va" - test := new(TestReflect) - Assign(test, values) - if test.Foo != "bar" { - t.Errorf("Invalid dume got %s instead of %s", test.Foo, "bar") - } - if test.Map == nil { - t.Error("Missing value for Map") - } - if v, ok := test.Map["go"]; ok { - if v != "pher" { - t.Errorf("Invalid dume got %s instead of %s", v, "pher") - } - } else { - t.Error("Missing value for map key \"go\"") - } - return -} - -func TestToInterfaceArray(t *testing.T) { - ToInterfaceArray([]int{1, 2, 3, 4, 5, 6}) -} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/strings.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/strings.go new file mode 100644 index 00000000..8e31e84e --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/utils/strings.go @@ -0,0 +1,8 @@ +package utils + +func Chomp(str string) string { + if str[len(str)-1] == '\n' { + str = str[:len(str)-1] + } + return str +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/time.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/time.go new file mode 100644 index 00000000..1eafc53c --- /dev/null +++ b/server/Godeps/_workspace/src/github.com/root-gg/utils/time.go @@ -0,0 +1,12 @@ +package utils + +import "time" + +func TruncateDuration(d time.Duration, precision time.Duration) time.Duration { + if d == 0 { + return time.Duration(0) + } + p := float64(precision) + n := float64(int(float64(d)/p)) * p + return time.Duration(n) +} diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/timer.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/timer.go index 91eaf7c0..80f97891 100644 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/timer.go +++ b/server/Godeps/_workspace/src/github.com/root-gg/utils/timer.go @@ -13,6 +13,7 @@ var Uninitalized = errors.New("uninitalized") type SplitTime struct { name string start *time.Time + split *time.Time stop *time.Time } @@ -37,6 +38,22 @@ func (split *SplitTime) StartDate() *time.Time { return split.start } +func (split *SplitTime) Split() (elapsed time.Duration) { + if split.start != nil { + if split.stop == nil { + now := time.Now() + if split.split == nil { + elapsed = now.Sub(*split.start) + } else { + elapsed = now.Sub(*split.split) + } + split.split = &now + return + } + } + return +} + func (split *SplitTime) Stop() { if split.stop == nil { now := time.Now() diff --git a/server/Godeps/_workspace/src/github.com/root-gg/utils/timer_test.go b/server/Godeps/_workspace/src/github.com/root-gg/utils/timer_test.go deleted file mode 100644 index 0ad05cf9..00000000 --- a/server/Godeps/_workspace/src/github.com/root-gg/utils/timer_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package utils - -import ( - "fmt" - "testing" - "time" -) - -func TestNewTimer(t *testing.T) { - timer := NewSplitTime("main") - if timer.Name() != "main" { - t.Errorf("Invalid timer name %s instead of %s", timer.Name(), "main") - } -} - -func TestTimerStatus(t *testing.T) { - timer := NewSplitTime("timer") - if timer.Status() != Uninitalized { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Uninitalized) - } - timer.Start() - if timer.Status() != Running { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Running) - } - timer.Stop() - if timer.Status() != Stopped { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Stopped) - } -} - -func TestTimerDates(t *testing.T) { - timer := NewSplitTime("timer") - if timer.StartDate() != nil { - t.Error("Start date on uninitalized timer : %s", timer.StartDate().String()) - } - if timer.StopDate() != nil { - t.Error("Stop date on uninitalized timer : %s", timer.StopDate().String()) - } - timer.Start() - if timer.StartDate() == nil { - t.Error("Missing start date on running timer") - } - if timer.StopDate() != nil { - t.Error("Stop date on running timer : %s", timer.StopDate().String()) - } - timer.Stop() - if timer.StartDate() == nil { - t.Error("Missing start date on stopped timer") - } - if timer.StopDate() == nil { - t.Error("Missing stop date on stopped timer") - } -} - -func TestTimerImmutability(t *testing.T) { - timer := NewSplitTime("timer") - timer.Start() - startDate1 := timer.StartDate() - timer.Start() - startDate2 := timer.StartDate() - if startDate1 != startDate2 { - t.Errorf("Non immutable start date : %s != %s", startDate1.String(), startDate2.String()) - } - timer.Stop() - stopDate1 := timer.StopDate() - timer.Stop() - stopDate2 := timer.StopDate() - if stopDate1 != stopDate2 { - t.Errorf("Non immutable stop date : %s != %s", stopDate1.String(), stopDate2.String()) - } - timer.Start() - if timer.Status() != Stopped { - t.Errorf("Non immutable timer status %s instead of %s", timer.Status(), Stopped) - } - startDate3 := timer.StartDate() - if startDate1 != startDate3 { - t.Errorf("Non immutable start date : %s != %s", startDate1.String(), startDate3.String()) - } -} - -func TestTimerElapsed(t *testing.T) { - timer := NewSplitTime("timer") - if timer.Elapsed() != time.Duration(0) { - t.Errorf("Invalid uninitialized timer elapsed time %s", timer.Elapsed().String()) - } - timer.Start() - if timer.Elapsed() <= time.Duration(0) { - t.Errorf("Invalid running timer elapsed time %s", timer.Elapsed().String()) - } - timer.Stop() - if timer.Elapsed() <= time.Duration(0) { - t.Errorf("Invalid stopped timer elapsed time %s", timer.Elapsed().String()) - } -} - -func TestTimerStopUninitalizedTimer(t *testing.T) { - timer := NewSplitTime("timer") - timer.Stop() - if timer.Status() != Stopped { - t.Errorf("Invalid timer status %s instead of %s", timer.Status(), Stopped) - } - if timer.Elapsed() != time.Duration(0) { - t.Errorf("Invalid uninitialized stopped timer elapsed time %s", timer.Elapsed().String()) - } -} - -func TestTimerString(t *testing.T) { - timer := NewSplitTime("timer") - fmt.Println(timer.String()) - timer.Start() - fmt.Println(timer.String()) - timer.Stop() - fmt.Println(timer.String()) -} diff --git a/server/Godeps/_workspace/src/golang.org/x/net/LICENSE b/server/Godeps/_workspace/src/golang.org/x/net/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/server/Godeps/_workspace/src/golang.org/x/net/PATENTS b/server/Godeps/_workspace/src/golang.org/x/net/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/server/Godeps/_workspace/src/golang.org/x/net/context/context.go b/server/Godeps/_workspace/src/golang.org/x/net/context/context.go new file mode 100644 index 00000000..11bd8d34 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 00000000..e3170e33 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 00000000..56bcbadb --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000..baf036c2 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,123 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + cancel() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/server/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml new file mode 100644 index 00000000..a035125c --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS b/server/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/server/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000..46aa2b12 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS b/server/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE b/server/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000..d02f24fd --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/server/Godeps/_workspace/src/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..0d514173 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/bitbucket/bitbucket.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/bitbucket/bitbucket.go new file mode 100644 index 00000000..d4064e64 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/bitbucket/bitbucket.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitbucket provides constants for using OAuth2 to access Bitbucket. +package bitbucket + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is Bitbucket's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://bitbucket.org/site/oauth2/authorize", + TokenURL: "https://bitbucket.org/site/oauth2/access_token", +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 00000000..63ef5230 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 00000000..abe683a7 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4 +package clientcredentials + +import ( + "net/http" + "net/url" + "strings" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" +) + +// tokenFromInternal maps an *internal.Token struct into +// an *oauth2.Token struct. +func tokenFromInternal(t *internal.Token) *oauth2.Token { + if t == nil { + return nil + } + tk := &oauth2.Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + } + return tk.WithExtra(t.Raw) +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is +// returned along with an error. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} + +// Client Credentials Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// Token uses client credentials to retrieve a token. +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + return retrieveToken(c.ctx, c.conf, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")), + }) +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go new file mode 100644 index 00000000..8b66a953 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facebook provides constants for using OAuth2 to access Facebook. +package facebook + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is Facebook's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.facebook.com/dialog/oauth", + TokenURL: "https://graph.facebook.com/oauth/access_token", +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go new file mode 100644 index 00000000..aa5fa4bc --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package github provides constants for using OAuth2 to access Github. +package github + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is Github's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://github.com/login/oauth/authorize", + TokenURL: "https://github.com/login/oauth/access_token", +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 00000000..733344cb --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,86 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. +var appengineVM bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 00000000..4f42c8b3 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm_hook.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm_hook.go new file mode 100644 index 00000000..633611cc --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineVM = true + appengineTokenFunc = appengine.AccessToken +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000..f8f372cb --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go @@ -0,0 +1,155 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/jwt" + "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineVM { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go new file mode 100644 index 00000000..c9af1f18 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/jwt" + "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" page under "APIs & Auth" for your +// project at https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var key struct { + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(jsonKey, &key); err != nil { + return nil, err + } + return &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + Scopes: scope, + TokenURL: JWTTokenURL, + }, nil +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 00000000..0304eafb --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000..5518baec --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000..fbe1028d --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000..b095814d --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,221 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.dropbox.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000..e2bba2a8 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 00000000..b46edb27 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides encoding and decoding utilities for +// signed JWS messages. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64Encode(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64Encode(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64Encode(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64Decode(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write([]byte(data)) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// base64Encode returns and Base64url encoded version of the input string with any +// trailing "=" stripped. +func base64Encode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// base64Decode decodes the Base64url encoded string +func base64Decode(s string) ([]byte, error) { + // add back missing padding + switch len(s) % 4 { + case 1: + s += "===" + case 2: + s += "==" + case 3: + s += "=" + } + return base64.URLEncoding.DecodeString(s) +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 00000000..d752c737 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,153 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + payload, err := jws.Encode(defaultHeader, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go new file mode 100644 index 00000000..a8a0883d --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linkedin provides constants for using OAuth2 to access LinkedIn. +package linkedin + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is LinkedIn's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", + TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000..9e0f47a9 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,337 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go new file mode 100644 index 00000000..c147d203 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. +package odnoklassniki + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", + TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go new file mode 100644 index 00000000..04965e36 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package paypal provides constants for using OAuth2 to access PayPal. +package paypal + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", +} + +// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/token.go new file mode 100644 index 00000000..5519e8d4 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000..92ac7e25 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/server/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go b/server/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go new file mode 100644 index 00000000..10210569 --- /dev/null +++ b/server/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vk provides constants for using OAuth2 to access VK.com. +package vk + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" +) + +// Endpoint is VK's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.vk.com/authorize", + TokenURL: "https://oauth.vk.com/access_token", +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/LICENSE b/server/Godeps/_workspace/src/google.golang.org/api/LICENSE new file mode 100644 index 00000000..263aa7a0 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/server/Godeps/_workspace/src/google.golang.org/api/gensupport/doc.go b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/doc.go new file mode 100644 index 00000000..752c4b41 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/server/Godeps/_workspace/src/google.golang.org/api/gensupport/json.go b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/json.go new file mode 100644 index 00000000..dd7bcd2e --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/gensupport/media.go b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/media.go new file mode 100644 index 00000000..985947b7 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/media.go @@ -0,0 +1,164 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func NewContentSniffer(r io.Reader) *ContentSniffer { + return &ContentSniffer{r: r} +} + +// ContentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type ContentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (sct *ContentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = sct.ContentType() + + if len(sct.start) > 0 { + n := copy(p, sct.start) + sct.start = sct.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if sct.err != nil { + return 0, sct.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return sct.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (sct *ContentSniffer) ContentType() (string, bool) { + if sct.sniffed { + return sct.ctype, sct.ctype != "" + } + sct.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + sct.start, sct.err = ioutil.ReadAll(io.LimitReader(sct.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if sct.err != nil { + return "", false + } + + sct.ctype = http.DetectContentType(sct.start) + return sct.ctype, true +} + +// IncludeMedia combines an existing HTTP body with media content to create a multipart/related HTTP body. +// +// bodyp is an in/out parameter. It should initially point to the +// reader of the application/json (or whatever) payload to send in the +// API request. It's updated to point to the multipart body reader. +// +// ctypep is an in/out parameter. It should initially point to the +// content type of the bodyp, usually "application/json". It's updated +// to the "multipart/related" content type, with random boundary. +// +// The return value is a function that can be used to close the bodyp Reader with an error. +func IncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) func() { + var mediaType string + media, mediaType = getMediaType(media) + + body, bodyType := *bodyp, *ctypep + + pr, pw := io.Pipe() + mpw := multipart.NewWriter(pw) + *bodyp = pr + *ctypep = "multipart/related; boundary=" + mpw.Boundary() + go func() { + w, err := mpw.CreatePart(typeHeader(bodyType)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, body) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err)) + return + } + + w, err = mpw.CreatePart(typeHeader(mediaType)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, media) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err)) + return + } + mpw.Close() + pw.Close() + }() + return func() { pw.CloseWithError(errAborted) } +} + +var errAborted = errors.New("googleapi: upload aborted") + +func getMediaType(media io.Reader) (io.Reader, string) { + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := NewContentSniffer(media) + typ, ok := sniffer.ContentType() + if !ok { + // TODO(mcgreevy): Remove this default. It maintains the semantics of the existing code, + // but should not be relied on. + typ = "application/octet-stream" + } + return sniffer, typ +} + +// DetectMediaType detects and returns the content type of the provided media. +// If the type can not be determined, "application/octet-stream" is returned. +func DetectMediaType(media io.ReaderAt) string { + if typer, ok := media.(googleapi.ContentTyper); ok { + return typer.ContentType() + } + + typ := "application/octet-stream" + buf := make([]byte, 1024) + n, err := media.ReadAt(buf, 0) + buf = buf[:n] + if err == nil || err == io.EOF { + typ = http.DetectContentType(buf) + } + return typ +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + h.Set("Content-Type", contentType) + return h +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/gensupport/params.go b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/params.go new file mode 100644 index 00000000..dfad3f41 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/gensupport/params.go @@ -0,0 +1,31 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import "net/url" + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 00000000..bb488af2 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,473 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp" + "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // uploadPause determines the delay between failed upload attempts + uploadPause = 1 * time.Second +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media io.ReaderAt + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + // ContentLength is the full size of the object being uploaded. + ContentLength int64 + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) +} + +var ( + // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded. + rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`) + // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. + // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. + chunkSize int64 = 1 << 18 +) + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +func (rx *ResumableUpload) transferStatus(ctx context.Context) (int64, *http.Response, error) { + req, _ := http.NewRequest("POST", rx.URI, nil) + req.ContentLength = 0 + req.Header.Set("User-Agent", rx.UserAgent) + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength)) + res, err := ctxhttp.Do(ctx, rx.Client, req) + if err != nil || res.StatusCode != statusResumeIncomplete { + return 0, res, err + } + var start int64 + if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 { + start, err = strconv.ParseInt(m[1], 10, 64) + if err != nil { + return 0, nil, fmt.Errorf("unable to parse range size %v", m[1]) + } + start += 1 // Start at the next byte + } + return start, res, nil +} + +type chunk struct { + body io.Reader + size int64 + err error +} + +func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) { + start, res, err := rx.transferStatus(ctx) + if err != nil || res.StatusCode != statusResumeIncomplete { + if err == context.Canceled { + return &http.Response{StatusCode: http.StatusRequestTimeout}, err + } + return res, err + } + + for { + select { // Check for cancellation + case <-ctx.Done(): + res.StatusCode = http.StatusRequestTimeout + return res, ctx.Err() + default: + } + reqSize := rx.ContentLength - start + if reqSize > chunkSize { + reqSize = chunkSize + } + r := io.NewSectionReader(rx.Media, start, reqSize) + req, _ := http.NewRequest("POST", rx.URI, r) + req.ContentLength = reqSize + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + res, err = ctxhttp.Do(ctx, rx.Client, req) + start += reqSize + if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) { + rx.mu.Lock() + rx.progress = start // keep track of number of bytes sent so far + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(start) + } + } + if err != nil || res.StatusCode != statusResumeIncomplete { + break + } + } + return res, err +} + +var sleep = time.Sleep // override in unit tests + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) { + var res *http.Response + var err error + for { + res, err = rx.transferChunks(ctx) + if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK { + return res, err + } + select { // Check for cancellation + case <-ctx.Done(): + res.StatusCode = http.StatusRequestTimeout + return res, ctx.Err() + default: + } + sleep(uploadPause) + } + return res, err +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 00000000..de9c88cb --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 00000000..8a84813f --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 00000000..399ef462 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 00000000..eca1ea25 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go new file mode 100644 index 00000000..a02b4b07 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2/oauth2-api.json b/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2/oauth2-api.json new file mode 100644 index 00000000..1b59fae2 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2/oauth2-api.json @@ -0,0 +1,294 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/I-Kz7nVLqL3C3aFzaPv8bMiNptU\"", + "discoveryVersion": "v1", + "id": "oauth2:v2", + "name": "oauth2", + "version": "v2", + "revision": "20150319", + "title": "Google OAuth2 API", + "description": "Lets you access OAuth2 protocol related APIs.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://developers.google.com/accounts/docs/OAuth2", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/", + "basePath": "/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/plus.login": { + "description": "Know your basic profile info and list of people in your circles." + }, + "https://www.googleapis.com/auth/plus.me": { + "description": "Know who you are on Google" + }, + "https://www.googleapis.com/auth/userinfo.email": { + "description": "View your email address" + }, + "https://www.googleapis.com/auth/userinfo.profile": { + "description": "View your basic profile info" + } + } + } + }, + "schemas": { + "Jwk": { + "id": "Jwk", + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "object", + "properties": { + "alg": { + "type": "string", + "default": "RS256" + }, + "e": { + "type": "string" + }, + "kid": { + "type": "string" + }, + "kty": { + "type": "string", + "default": "RSA" + }, + "n": { + "type": "string" + }, + "use": { + "type": "string", + "default": "sig" + } + } + } + } + } + }, + "Tokeninfo": { + "id": "Tokeninfo", + "type": "object", + "properties": { + "access_type": { + "type": "string", + "description": "The access type granted with this token. It can be offline or online." + }, + "audience": { + "type": "string", + "description": "Who is the intended audience for this token. In general the same as issued_to." + }, + "email": { + "type": "string", + "description": "The email address of the user. Present only if the email scope is present in the request." + }, + "expires_in": { + "type": "integer", + "description": "The expiry time of the token, as number of seconds left until expiry.", + "format": "int32" + }, + "issued_to": { + "type": "string", + "description": "To whom was the token issued to. In general the same as audience." + }, + "scope": { + "type": "string", + "description": "The space separated list of scopes granted to this token." + }, + "token_handle": { + "type": "string", + "description": "The token handle associated with this token." + }, + "user_id": { + "type": "string", + "description": "The obfuscated user id." + }, + "verified_email": { + "type": "boolean", + "description": "Boolean flag which is true if the email address is verified. Present only if the email scope is present in the request." + } + } + }, + "Userinfoplus": { + "id": "Userinfoplus", + "type": "object", + "properties": { + "email": { + "type": "string", + "description": "The user's email address." + }, + "family_name": { + "type": "string", + "description": "The user's last name." + }, + "gender": { + "type": "string", + "description": "The user's gender." + }, + "given_name": { + "type": "string", + "description": "The user's first name." + }, + "hd": { + "type": "string", + "description": "The hosted domain e.g. example.com if the user is Google apps user." + }, + "id": { + "type": "string", + "description": "The obfuscated ID of the user." + }, + "link": { + "type": "string", + "description": "URL of the profile page." + }, + "locale": { + "type": "string", + "description": "The user's preferred locale." + }, + "name": { + "type": "string", + "description": "The user's full name." + }, + "picture": { + "type": "string", + "description": "URL of the user's picture image." + }, + "verified_email": { + "type": "boolean", + "description": "Boolean flag which is true if the email address is verified. Always verified because we only return the user's primary email address.", + "default": "true" + } + } + } + }, + "methods": { + "getCertForOpenIdConnect": { + "id": "oauth2.getCertForOpenIdConnect", + "path": "oauth2/v2/certs", + "httpMethod": "GET", + "response": { + "$ref": "Jwk" + } + }, + "tokeninfo": { + "id": "oauth2.tokeninfo", + "path": "oauth2/v2/tokeninfo", + "httpMethod": "POST", + "parameters": { + "access_token": { + "type": "string", + "location": "query" + }, + "id_token": { + "type": "string", + "location": "query" + }, + "token_handle": { + "type": "string", + "location": "query" + } + }, + "response": { + "$ref": "Tokeninfo" + } + } + }, + "resources": { + "userinfo": { + "methods": { + "get": { + "id": "oauth2.userinfo.get", + "path": "oauth2/v2/userinfo", + "httpMethod": "GET", + "response": { + "$ref": "Userinfoplus" + }, + "scopes": [ + "https://www.googleapis.com/auth/plus.login", + "https://www.googleapis.com/auth/plus.me", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile" + ] + } + }, + "resources": { + "v2": { + "resources": { + "me": { + "methods": { + "get": { + "id": "oauth2.userinfo.v2.me.get", + "path": "userinfo/v2/me", + "httpMethod": "GET", + "response": { + "$ref": "Userinfoplus" + }, + "scopes": [ + "https://www.googleapis.com/auth/plus.login", + "https://www.googleapis.com/auth/plus.me", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile" + ] + } + } + } + } + } + } + } + } +} diff --git a/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2/oauth2-gen.go b/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2/oauth2-gen.go new file mode 100644 index 00000000..73fa7706 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2/oauth2-gen.go @@ -0,0 +1,793 @@ +// Package oauth2 provides access to the Google OAuth2 API. +// +// See https://developers.google.com/accounts/docs/OAuth2 +// +// Usage example: +// +// import "google.golang.org/api/oauth2/v2" +// ... +// oauth2Service, err := oauth2.New(oauthHttpClient) +package oauth2 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + ctxhttp "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp" + gensupport "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/api/gensupport" + googleapi "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "oauth2:v2" +const apiName = "oauth2" +const apiVersion = "v2" +const basePath = "https://www.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // Know your basic profile info and list of people in your circles. + PlusLoginScope = "https://www.googleapis.com/auth/plus.login" + + // Know who you are on Google + PlusMeScope = "https://www.googleapis.com/auth/plus.me" + + // View your email address + UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email" + + // View your basic profile info + UserinfoProfileScope = "https://www.googleapis.com/auth/userinfo.profile" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Userinfo = NewUserinfoService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Userinfo *UserinfoService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewUserinfoService(s *Service) *UserinfoService { + rs := &UserinfoService{s: s} + rs.V2 = NewUserinfoV2Service(s) + return rs +} + +type UserinfoService struct { + s *Service + + V2 *UserinfoV2Service +} + +func NewUserinfoV2Service(s *Service) *UserinfoV2Service { + rs := &UserinfoV2Service{s: s} + rs.Me = NewUserinfoV2MeService(s) + return rs +} + +type UserinfoV2Service struct { + s *Service + + Me *UserinfoV2MeService +} + +func NewUserinfoV2MeService(s *Service) *UserinfoV2MeService { + rs := &UserinfoV2MeService{s: s} + return rs +} + +type UserinfoV2MeService struct { + s *Service +} + +type Jwk struct { + Keys []*JwkKeys `json:"keys,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Jwk) MarshalJSON() ([]byte, error) { + type noMethod Jwk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type JwkKeys struct { + Alg string `json:"alg,omitempty"` + + E string `json:"e,omitempty"` + + Kid string `json:"kid,omitempty"` + + Kty string `json:"kty,omitempty"` + + N string `json:"n,omitempty"` + + Use string `json:"use,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Alg") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *JwkKeys) MarshalJSON() ([]byte, error) { + type noMethod JwkKeys + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type Tokeninfo struct { + // AccessType: The access type granted with this token. It can be + // offline or online. + AccessType string `json:"access_type,omitempty"` + + // Audience: Who is the intended audience for this token. In general the + // same as issued_to. + Audience string `json:"audience,omitempty"` + + // Email: The email address of the user. Present only if the email scope + // is present in the request. + Email string `json:"email,omitempty"` + + // ExpiresIn: The expiry time of the token, as number of seconds left + // until expiry. + ExpiresIn int64 `json:"expires_in,omitempty"` + + // IssuedTo: To whom was the token issued to. In general the same as + // audience. + IssuedTo string `json:"issued_to,omitempty"` + + // Scope: The space separated list of scopes granted to this token. + Scope string `json:"scope,omitempty"` + + // TokenHandle: The token handle associated with this token. + TokenHandle string `json:"token_handle,omitempty"` + + // UserId: The obfuscated user id. + UserId string `json:"user_id,omitempty"` + + // VerifiedEmail: Boolean flag which is true if the email address is + // verified. Present only if the email scope is present in the request. + VerifiedEmail bool `json:"verified_email,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccessType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Tokeninfo) MarshalJSON() ([]byte, error) { + type noMethod Tokeninfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type Userinfoplus struct { + // Email: The user's email address. + Email string `json:"email,omitempty"` + + // FamilyName: The user's last name. + FamilyName string `json:"family_name,omitempty"` + + // Gender: The user's gender. + Gender string `json:"gender,omitempty"` + + // GivenName: The user's first name. + GivenName string `json:"given_name,omitempty"` + + // Hd: The hosted domain e.g. example.com if the user is Google apps + // user. + Hd string `json:"hd,omitempty"` + + // Id: The obfuscated ID of the user. + Id string `json:"id,omitempty"` + + // Link: URL of the profile page. + Link string `json:"link,omitempty"` + + // Locale: The user's preferred locale. + Locale string `json:"locale,omitempty"` + + // Name: The user's full name. + Name string `json:"name,omitempty"` + + // Picture: URL of the user's picture image. + Picture string `json:"picture,omitempty"` + + // VerifiedEmail: Boolean flag which is true if the email address is + // verified. Always verified because we only return the user's primary + // email address. + // + // Default: true + VerifiedEmail *bool `json:"verified_email,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Userinfoplus) MarshalJSON() ([]byte, error) { + type noMethod Userinfoplus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "oauth2.getCertForOpenIdConnect": + +type GetCertForOpenIdConnectCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetCertForOpenIdConnect: +func (s *Service) GetCertForOpenIdConnect() *GetCertForOpenIdConnectCall { + c := &GetCertForOpenIdConnectCall{s: s, urlParams_: make(gensupport.URLParams)} + return c +} + +// QuotaUser sets the optional parameter "quotaUser": Available to use +// for quota purposes for server-side applications. Can be any arbitrary +// string assigned to a user, but should not exceed 40 characters. +// Overrides userIp if both are provided. +func (c *GetCertForOpenIdConnectCall) QuotaUser(quotaUser string) *GetCertForOpenIdConnectCall { + c.urlParams_.Set("quotaUser", quotaUser) + return c +} + +// UserIP sets the optional parameter "userIp": IP address of the site +// where the request originates. Use this if you want to enforce +// per-user limits. +func (c *GetCertForOpenIdConnectCall) UserIP(userIP string) *GetCertForOpenIdConnectCall { + c.urlParams_.Set("userIp", userIP) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GetCertForOpenIdConnectCall) Fields(s ...googleapi.Field) *GetCertForOpenIdConnectCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GetCertForOpenIdConnectCall) IfNoneMatch(entityTag string) *GetCertForOpenIdConnectCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GetCertForOpenIdConnectCall) Context(ctx context.Context) *GetCertForOpenIdConnectCall { + c.ctx_ = ctx + return c +} + +func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/certs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "oauth2.getCertForOpenIdConnect" call. +// Exactly one of *Jwk or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Jwk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *GetCertForOpenIdConnectCall) Do() (*Jwk, error) { + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Jwk{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "httpMethod": "GET", + // "id": "oauth2.getCertForOpenIdConnect", + // "path": "oauth2/v2/certs", + // "response": { + // "$ref": "Jwk" + // } + // } + +} + +// method id "oauth2.tokeninfo": + +type TokeninfoCall struct { + s *Service + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Tokeninfo: +func (s *Service) Tokeninfo() *TokeninfoCall { + c := &TokeninfoCall{s: s, urlParams_: make(gensupport.URLParams)} + return c +} + +// AccessToken sets the optional parameter "access_token": +func (c *TokeninfoCall) AccessToken(accessToken string) *TokeninfoCall { + c.urlParams_.Set("access_token", accessToken) + return c +} + +// IdToken sets the optional parameter "id_token": +func (c *TokeninfoCall) IdToken(idToken string) *TokeninfoCall { + c.urlParams_.Set("id_token", idToken) + return c +} + +// QuotaUser sets the optional parameter "quotaUser": Available to use +// for quota purposes for server-side applications. Can be any arbitrary +// string assigned to a user, but should not exceed 40 characters. +// Overrides userIp if both are provided. +func (c *TokeninfoCall) QuotaUser(quotaUser string) *TokeninfoCall { + c.urlParams_.Set("quotaUser", quotaUser) + return c +} + +// TokenHandle sets the optional parameter "token_handle": +func (c *TokeninfoCall) TokenHandle(tokenHandle string) *TokeninfoCall { + c.urlParams_.Set("token_handle", tokenHandle) + return c +} + +// UserIP sets the optional parameter "userIp": IP address of the site +// where the request originates. Use this if you want to enforce +// per-user limits. +func (c *TokeninfoCall) UserIP(userIP string) *TokeninfoCall { + c.urlParams_.Set("userIp", userIP) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TokeninfoCall) Fields(s ...googleapi.Field) *TokeninfoCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TokeninfoCall) Context(ctx context.Context) *TokeninfoCall { + c.ctx_ = ctx + return c +} + +func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/tokeninfo") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "oauth2.tokeninfo" call. +// Exactly one of *Tokeninfo or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Tokeninfo.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TokeninfoCall) Do() (*Tokeninfo, error) { + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Tokeninfo{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "httpMethod": "POST", + // "id": "oauth2.tokeninfo", + // "parameters": { + // "access_token": { + // "location": "query", + // "type": "string" + // }, + // "id_token": { + // "location": "query", + // "type": "string" + // }, + // "token_handle": { + // "location": "query", + // "type": "string" + // } + // }, + // "path": "oauth2/v2/tokeninfo", + // "response": { + // "$ref": "Tokeninfo" + // } + // } + +} + +// method id "oauth2.userinfo.get": + +type UserinfoGetCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: +func (r *UserinfoService) Get() *UserinfoGetCall { + c := &UserinfoGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// QuotaUser sets the optional parameter "quotaUser": Available to use +// for quota purposes for server-side applications. Can be any arbitrary +// string assigned to a user, but should not exceed 40 characters. +// Overrides userIp if both are provided. +func (c *UserinfoGetCall) QuotaUser(quotaUser string) *UserinfoGetCall { + c.urlParams_.Set("quotaUser", quotaUser) + return c +} + +// UserIP sets the optional parameter "userIp": IP address of the site +// where the request originates. Use this if you want to enforce +// per-user limits. +func (c *UserinfoGetCall) UserIP(userIP string) *UserinfoGetCall { + c.urlParams_.Set("userIp", userIP) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UserinfoGetCall) Fields(s ...googleapi.Field) *UserinfoGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UserinfoGetCall) IfNoneMatch(entityTag string) *UserinfoGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UserinfoGetCall) Context(ctx context.Context) *UserinfoGetCall { + c.ctx_ = ctx + return c +} + +func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/userinfo") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "oauth2.userinfo.get" call. +// Exactly one of *Userinfoplus or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Userinfoplus.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UserinfoGetCall) Do() (*Userinfoplus, error) { + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Userinfoplus{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "httpMethod": "GET", + // "id": "oauth2.userinfo.get", + // "path": "oauth2/v2/userinfo", + // "response": { + // "$ref": "Userinfoplus" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/plus.login", + // "https://www.googleapis.com/auth/plus.me", + // "https://www.googleapis.com/auth/userinfo.email", + // "https://www.googleapis.com/auth/userinfo.profile" + // ] + // } + +} + +// method id "oauth2.userinfo.v2.me.get": + +type UserinfoV2MeGetCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: +func (r *UserinfoV2MeService) Get() *UserinfoV2MeGetCall { + c := &UserinfoV2MeGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// QuotaUser sets the optional parameter "quotaUser": Available to use +// for quota purposes for server-side applications. Can be any arbitrary +// string assigned to a user, but should not exceed 40 characters. +// Overrides userIp if both are provided. +func (c *UserinfoV2MeGetCall) QuotaUser(quotaUser string) *UserinfoV2MeGetCall { + c.urlParams_.Set("quotaUser", quotaUser) + return c +} + +// UserIP sets the optional parameter "userIp": IP address of the site +// where the request originates. Use this if you want to enforce +// per-user limits. +func (c *UserinfoV2MeGetCall) UserIP(userIP string) *UserinfoV2MeGetCall { + c.urlParams_.Set("userIp", userIP) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UserinfoV2MeGetCall) Fields(s ...googleapi.Field) *UserinfoV2MeGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UserinfoV2MeGetCall) IfNoneMatch(entityTag string) *UserinfoV2MeGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UserinfoV2MeGetCall) Context(ctx context.Context) *UserinfoV2MeGetCall { + c.ctx_ = ctx + return c +} + +func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "userinfo/v2/me") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "oauth2.userinfo.v2.me.get" call. +// Exactly one of *Userinfoplus or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Userinfoplus.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UserinfoV2MeGetCall) Do() (*Userinfoplus, error) { + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Userinfoplus{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "httpMethod": "GET", + // "id": "oauth2.userinfo.v2.me.get", + // "path": "userinfo/v2/me", + // "response": { + // "$ref": "Userinfoplus" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/plus.login", + // "https://www.googleapis.com/auth/plus.me", + // "https://www.googleapis.com/auth/userinfo.email", + // "https://www.googleapis.com/auth/userinfo.profile" + // ] + // } + +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/LICENSE b/server/Godeps/_workspace/src/google.golang.org/cloud/LICENSE new file mode 100644 index 00000000..a4c5efd8 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/server/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 00000000..bf3245f6 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,327 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/cloud/internal" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + }, +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag. This func is otherwise equivalent to Get. +func getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = "169.254.169.254" + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := metaClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + + // We use the DNS name of the metadata service here instead of the IP address + // because we expect that to fail faster in the not-on-GCE case. + res, err := metaClient.Get("http://metadata.google.internal") + if err != nil { + return false + } + onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" + return onGCE.v +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + suffix += "?wait_for_change=true&last_etag=" + for { + val, etag, err := getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go new file mode 100644 index 00000000..a1375dd6 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go @@ -0,0 +1,128 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" + "sync" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" +) + +type contextKey struct{} + +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + if c == nil { + panic("nil *http.Client passed to WithContext") + } + if projID == "" { + panic("empty project ID passed to WithContext") + } + return context.WithValue(parent, contextKey{}, &cloudContext{ + ProjectID: projID, + HTTPClient: c, + }) +} + +const userAgent = "gcloud-golang/0.1" + +type cloudContext struct { + ProjectID string + HTTPClient *http.Client + + mu sync.Mutex // guards svc + svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +} + +// Service returns the result of the fill function if it's never been +// called before for the given name (which is assumed to be an API +// service name, like "datastore"). If it has already been cached, the fill +// func is not run. +// It's safe for concurrent use by multiple goroutines. +func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { + return cc(ctx).service(name, fill) +} + +func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { + c.mu.Lock() + defer c.mu.Unlock() + + if c.svc == nil { + c.svc = make(map[string]interface{}) + } else if v, ok := c.svc[name]; ok { + return v + } + v := fill(c.HTTPClient) + c.svc[name] = v + return v +} + +// Transport is an http.RoundTripper that appends +// Google Cloud client's user-agent to the original +// request's user-agent header. +type Transport struct { + // Base represents the actual http.RoundTripper + // the requests will be delegated to. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +func ProjID(ctx context.Context) string { + return cc(ctx).ProjectID +} + +func HTTPClient(ctx context.Context) *http.Client { + return cc(ctx).HTTPClient +} + +// cc returns the internal *cloudContext (cc) state for a context.Context. +// It panics if the user did it wrong. +func cc(ctx context.Context) *cloudContext { + if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { + return c + } + panic("invalid context.Context type; it should be created with cloud.NewContext") +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go new file mode 100644 index 00000000..9cb9be52 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. +// source: datastore_v1.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + datastore_v1.proto + +It has these top-level messages: + PartitionId + Key + Value + Property + Entity + EntityResult + Query + KindExpression + PropertyReference + PropertyExpression + PropertyOrder + Filter + CompositeFilter + PropertyFilter + GqlQuery + GqlQueryArg + QueryResultBatch + Mutation + MutationResult + ReadOptions + LookupRequest + LookupResponse + RunQueryRequest + RunQueryResponse + BeginTransactionRequest + BeginTransactionResponse + RollbackRequest + RollbackResponse + CommitRequest + CommitResponse + AllocateIdsRequest + AllocateIdsResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// Specifies what data the 'entity' field contains. +// A ResultType is either implied (for example, in LookupResponse.found it +// is always FULL) or specified by context (for example, in message +// QueryResultBatch, field 'entity_result_type' specifies a ResultType +// for all the values in field 'entity_result'). +type EntityResult_ResultType int32 + +const ( + EntityResult_FULL EntityResult_ResultType = 1 + EntityResult_PROJECTION EntityResult_ResultType = 2 + // The entity may have no key. + // A property value may have meaning 18. + EntityResult_KEY_ONLY EntityResult_ResultType = 3 +) + +var EntityResult_ResultType_name = map[int32]string{ + 1: "FULL", + 2: "PROJECTION", + 3: "KEY_ONLY", +} +var EntityResult_ResultType_value = map[string]int32{ + "FULL": 1, + "PROJECTION": 2, + "KEY_ONLY": 3, +} + +func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { + p := new(EntityResult_ResultType) + *p = x + return p +} +func (x EntityResult_ResultType) String() string { + return proto.EnumName(EntityResult_ResultType_name, int32(x)) +} +func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") + if err != nil { + return err + } + *x = EntityResult_ResultType(value) + return nil +} + +type PropertyExpression_AggregationFunction int32 + +const ( + PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 +) + +var PropertyExpression_AggregationFunction_name = map[int32]string{ + 1: "FIRST", +} +var PropertyExpression_AggregationFunction_value = map[string]int32{ + "FIRST": 1, +} + +func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { + p := new(PropertyExpression_AggregationFunction) + *p = x + return p +} +func (x PropertyExpression_AggregationFunction) String() string { + return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) +} +func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") + if err != nil { + return err + } + *x = PropertyExpression_AggregationFunction(value) + return nil +} + +type PropertyOrder_Direction int32 + +const ( + PropertyOrder_ASCENDING PropertyOrder_Direction = 1 + PropertyOrder_DESCENDING PropertyOrder_Direction = 2 +) + +var PropertyOrder_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var PropertyOrder_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { + p := new(PropertyOrder_Direction) + *p = x + return p +} +func (x PropertyOrder_Direction) String() string { + return proto.EnumName(PropertyOrder_Direction_name, int32(x)) +} +func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") + if err != nil { + return err + } + *x = PropertyOrder_Direction(value) + return nil +} + +type CompositeFilter_Operator int32 + +const ( + CompositeFilter_AND CompositeFilter_Operator = 1 +) + +var CompositeFilter_Operator_name = map[int32]string{ + 1: "AND", +} +var CompositeFilter_Operator_value = map[string]int32{ + "AND": 1, +} + +func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { + p := new(CompositeFilter_Operator) + *p = x + return p +} +func (x CompositeFilter_Operator) String() string { + return proto.EnumName(CompositeFilter_Operator_name, int32(x)) +} +func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") + if err != nil { + return err + } + *x = CompositeFilter_Operator(value) + return nil +} + +type PropertyFilter_Operator int32 + +const ( + PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 + PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 + PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 + PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 + PropertyFilter_EQUAL PropertyFilter_Operator = 5 + PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 +) + +var PropertyFilter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 11: "HAS_ANCESTOR", +} +var PropertyFilter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "HAS_ANCESTOR": 11, +} + +func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { + p := new(PropertyFilter_Operator) + *p = x + return p +} +func (x PropertyFilter_Operator) String() string { + return proto.EnumName(PropertyFilter_Operator_name, int32(x)) +} +func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") + if err != nil { + return err + } + *x = PropertyFilter_Operator(value) + return nil +} + +// The possible values for the 'more_results' field. +type QueryResultBatch_MoreResultsType int32 + +const ( + QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 + QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 + // results after the limit. + QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 +) + +var QueryResultBatch_MoreResultsType_name = map[int32]string{ + 1: "NOT_FINISHED", + 2: "MORE_RESULTS_AFTER_LIMIT", + 3: "NO_MORE_RESULTS", +} +var QueryResultBatch_MoreResultsType_value = map[string]int32{ + "NOT_FINISHED": 1, + "MORE_RESULTS_AFTER_LIMIT": 2, + "NO_MORE_RESULTS": 3, +} + +func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { + p := new(QueryResultBatch_MoreResultsType) + *p = x + return p +} +func (x QueryResultBatch_MoreResultsType) String() string { + return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) +} +func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") + if err != nil { + return err + } + *x = QueryResultBatch_MoreResultsType(value) + return nil +} + +type ReadOptions_ReadConsistency int32 + +const ( + ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 + ReadOptions_STRONG ReadOptions_ReadConsistency = 1 + ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 +) + +var ReadOptions_ReadConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "STRONG", + 2: "EVENTUAL", +} +var ReadOptions_ReadConsistency_value = map[string]int32{ + "DEFAULT": 0, + "STRONG": 1, + "EVENTUAL": 2, +} + +func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { + p := new(ReadOptions_ReadConsistency) + *p = x + return p +} +func (x ReadOptions_ReadConsistency) String() string { + return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) +} +func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") + if err != nil { + return err + } + *x = ReadOptions_ReadConsistency(value) + return nil +} + +type BeginTransactionRequest_IsolationLevel int32 + +const ( + BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 + // conflict if their mutations conflict. For example: + // Read(A),Write(B) may not conflict with Read(B),Write(A), + // but Read(B),Write(B) does conflict with Read(B),Write(B). + BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 +) + +var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ + 0: "SNAPSHOT", + 1: "SERIALIZABLE", +} +var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ + "SNAPSHOT": 0, + "SERIALIZABLE": 1, +} + +func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { + p := new(BeginTransactionRequest_IsolationLevel) + *p = x + return p +} +func (x BeginTransactionRequest_IsolationLevel) String() string { + return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) +} +func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") + if err != nil { + return err + } + *x = BeginTransactionRequest_IsolationLevel(value) + return nil +} + +type CommitRequest_Mode int32 + +const ( + CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 + CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 +) + +var CommitRequest_Mode_name = map[int32]string{ + 1: "TRANSACTIONAL", + 2: "NON_TRANSACTIONAL", +} +var CommitRequest_Mode_value = map[string]int32{ + "TRANSACTIONAL": 1, + "NON_TRANSACTIONAL": 2, +} + +func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { + p := new(CommitRequest_Mode) + *p = x + return p +} +func (x CommitRequest_Mode) String() string { + return proto.EnumName(CommitRequest_Mode_name, int32(x)) +} +func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") + if err != nil { + return err + } + *x = CommitRequest_Mode(value) + return nil +} + +// An identifier for a particular subset of entities. +// +// Entities are partitioned into various subsets, each used by different +// datasets and different namespaces within a dataset and so forth. +// +// All input partition IDs are normalized before use. +// A partition ID is normalized as follows: +// If the partition ID is unset or is set to an empty partition ID, replace it +// with the context partition ID. +// Otherwise, if the partition ID has no dataset ID, assign it the context +// partition ID's dataset ID. +// Unless otherwise documented, the context partition ID has the dataset ID set +// to the context dataset ID and no other partition dimension set. +// +// A partition ID is empty if all of its fields are unset. +// +// Partition dimension: +// A dimension may be unset. +// A dimension's value must never be "". +// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +// If the value of any dimension matches regex "__.*__", +// the partition is reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented contexts. +// +// Dataset ID: +// A dataset id's value must never be "". +// A dataset id's value must match +// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +type PartitionId struct { + // The dataset ID. + DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` + // The namespace. + Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} + +func (m *PartitionId) GetDatasetId() string { + if m != nil && m.DatasetId != nil { + return *m.DatasetId + } + return "" +} + +func (m *PartitionId) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +// A unique identifier for an entity. +// If a key's partition id or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a dataset + // (usually implicitly specified by the project) and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a root entity, the second element identifies + // a child of the root entity, the third element a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's ancestors. + // An entity path is always fully complete: ALL of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. A path can never + // be empty. + PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPathElement() []*Key_PathElement { + if m != nil { + return m.PathElement + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// At most one of name or ID may be set. +// If either is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex "__.*__" is reserved/read-only. + // A kind must not contain more than 500 characters. + // Cannot be "". + Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` + // The ID of the entity. + // Never equal to zero. Values less than zero are discouraged and will not + // be supported in the future. + Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` + // The name of the entity. + // A name matching regex "__.*__" is reserved/read-only. + // A name must not be more than 500 characters. + // Cannot be "". + Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} + +func (m *Key_PathElement) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A message that can hold any of the supported value types and associated +// metadata. +// +// At most one of the Value fields may be set. +// If none are set the value is "null". +// +type Value struct { + // A boolean value. + BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` + // An integer value. + IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` + // A double value. + DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` + // A timestamp value. + TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` + // A key value. + KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` + // A blob key value. + BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` + // A UTF-8 encoded string value. + StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` + // A blob value. + BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` + // An entity value. + // May have no key. + // May have a key with an incomplete key path. + // May have a reserved/read-only key. + EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` + // A list value. + // Cannot contain another list value. + // Cannot also have a meaning and indexing set. + ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` + // The meaning field is reserved and should not be used. + Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` + // If the value should be indexed. + // + // The indexed property may be set for a + // null value. + // When indexed is true, stringValue + // is limited to 500 characters and the blob value is limited to 500 bytes. + // Exception: If meaning is set to 2, string_value is limited to 2038 + // characters regardless of indexed. + // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 + // will be ignored on input (and will never be set on output). + // Input values by default have indexed set to + // true; however, you can explicitly set indexed to + // true if you want. (An output value never has + // indexed explicitly set to true.) If a value is + // itself an entity, it cannot have indexed set to + // true. + // Exception: An entity value with meaning 9, 20 or 21 may be indexed. + Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} + +const Default_Value_Indexed bool = true + +func (m *Value) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Value) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Value) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *Value) GetTimestampMicrosecondsValue() int64 { + if m != nil && m.TimestampMicrosecondsValue != nil { + return *m.TimestampMicrosecondsValue + } + return 0 +} + +func (m *Value) GetKeyValue() *Key { + if m != nil { + return m.KeyValue + } + return nil +} + +func (m *Value) GetBlobKeyValue() string { + if m != nil && m.BlobKeyValue != nil { + return *m.BlobKeyValue + } + return "" +} + +func (m *Value) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Value) GetBlobValue() []byte { + if m != nil { + return m.BlobValue + } + return nil +} + +func (m *Value) GetEntityValue() *Entity { + if m != nil { + return m.EntityValue + } + return nil +} + +func (m *Value) GetListValue() []*Value { + if m != nil { + return m.ListValue + } + return nil +} + +func (m *Value) GetMeaning() int32 { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return 0 +} + +func (m *Value) GetIndexed() bool { + if m != nil && m.Indexed != nil { + return *m.Indexed + } + return Default_Value_Indexed +} + +// An entity property. +type Property struct { + // The name of the property. + // A property name matching regex "__.*__" is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // Cannot be "". + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + // The value(s) of the property. + // Each value can have only one value property populated. For example, + // you cannot have a values list of { value: { integerValue: 22, + // stringValue: "a" } }, but you can have { value: { listValue: + // [ { integerValue: 22 }, { stringValue: "a" } ] }. + Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// An entity. +// +// An entity is limited to 1 megabyte when stored. That roughly +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +type Entity struct { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in Value.entityValue may have no key). + // An entity's kind is its key's path's last element's kind, + // or null if it has no key. + Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The entity's properties. + // Each property's name must be unique for its entity. + Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} + +func (m *Entity) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +// The result of fetching an entity from the datastore. +type EntityResult struct { + // The resulting entity. + Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityResult) Reset() { *m = EntityResult{} } +func (m *EntityResult) String() string { return proto.CompactTextString(m) } +func (*EntityResult) ProtoMessage() {} + +func (m *EntityResult) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +// A query. +type Query struct { + // The projection to return. If not set the entire entity is returned. + Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` + // The kinds to query (if empty, returns entities from all kinds). + Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` + // The filter to apply (optional). + Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + // The order to apply to the query results (if empty, order is unspecified). + Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` + // The properties to group by (if empty, no grouping is applied to the + // result set). + GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` + // A starting point for the query results. Optional. Query cursors are + // returned in query result batches. + StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` + // An ending point for the query results. Optional. Query cursors are + // returned in query result batches. + EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` + // The number of results to skip. Applies before limit, but after all other + // constraints (optional, defaults to 0). + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + // The maximum number of results to return. Applies after all other + // constraints. Optional. + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 + +func (m *Query) GetProjection() []*PropertyExpression { + if m != nil { + return m.Projection + } + return nil +} + +func (m *Query) GetKind() []*KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Query) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetOrder() []*PropertyOrder { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetGroupBy() []*PropertyReference { + if m != nil { + return m.GroupBy + } + return nil +} + +func (m *Query) GetStartCursor() []byte { + if m != nil { + return m.StartCursor + } + return nil +} + +func (m *Query) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +// A representation of a kind. +type KindExpression struct { + // The name of the kind. + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} + +func (m *KindExpression) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A reference to a property relative to the kind expressions. +// exactly. +type PropertyReference struct { + // The name of the property. + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyReference) Reset() { *m = PropertyReference{} } +func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +func (*PropertyReference) ProtoMessage() {} + +func (m *PropertyReference) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A representation of a property in a projection. +type PropertyExpression struct { + // The property to project. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The aggregation function to apply to the property. Optional. + // Can only be used when grouping by at least one property. Must + // then be set on all properties in the projection that are not + // being grouped by. + AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } +func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } +func (*PropertyExpression) ProtoMessage() {} + +func (m *PropertyExpression) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { + if m != nil && m.AggregationFunction != nil { + return *m.AggregationFunction + } + return PropertyExpression_FIRST +} + +// The desired order for a specific property. +type PropertyOrder struct { + // The property to order by. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The direction to order by. + Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } +func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } +func (*PropertyOrder) ProtoMessage() {} + +const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING + +func (m *PropertyOrder) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_PropertyOrder_Direction +} + +// A holder for any type of filter. Exactly one field should be specified. +type Filter struct { + // A composite filter. + CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` + // A filter on a property. + PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} + +func (m *Filter) GetCompositeFilter() *CompositeFilter { + if m != nil { + return m.CompositeFilter + } + return nil +} + +func (m *Filter) GetPropertyFilter() *PropertyFilter { + if m != nil { + return m.PropertyFilter + } + return nil +} + +// A filter that merges the multiple other filters using the given operation. +type CompositeFilter struct { + // The operator for combining multiple filters. + Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator" json:"operator,omitempty"` + // The list of filters to combine. + // Must contain at least one filter. + Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } +func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeFilter) ProtoMessage() {} + +func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return CompositeFilter_AND +} + +func (m *CompositeFilter) GetFilter() []*Filter { + if m != nil { + return m.Filter + } + return nil +} + +// A filter on a specific property. +type PropertyFilter struct { + // The property to filter by. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The operator to filter by. + Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator" json:"operator,omitempty"` + // The value to compare the property to. + Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } +func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } +func (*PropertyFilter) ProtoMessage() {} + +func (m *PropertyFilter) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return PropertyFilter_LESS_THAN +} + +func (m *PropertyFilter) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A GQL query. +type GqlQuery struct { + QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` + // When false, the query string must not contain a literal. + AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` + // A named argument must set field GqlQueryArg.name. + // No two named arguments may have the same name. + // For each non-reserved named binding site in the query string, + // there must be a named argument with that name, + // but not necessarily the inverse. + NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` + // Numbered binding site @1 references the first numbered argument, + // effectively using 1-based indexing, rather than the usual 0. + // A numbered argument must NOT set field GqlQueryArg.name. + // For each binding site numbered i in query_string, + // there must be an ith numbered argument. + // The inverse must also be true. + NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GqlQuery) Reset() { *m = GqlQuery{} } +func (m *GqlQuery) String() string { return proto.CompactTextString(m) } +func (*GqlQuery) ProtoMessage() {} + +const Default_GqlQuery_AllowLiteral bool = false + +func (m *GqlQuery) GetQueryString() string { + if m != nil && m.QueryString != nil { + return *m.QueryString + } + return "" +} + +func (m *GqlQuery) GetAllowLiteral() bool { + if m != nil && m.AllowLiteral != nil { + return *m.AllowLiteral + } + return Default_GqlQuery_AllowLiteral +} + +func (m *GqlQuery) GetNameArg() []*GqlQueryArg { + if m != nil { + return m.NameArg + } + return nil +} + +func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { + if m != nil { + return m.NumberArg + } + return nil +} + +// A binding argument for a GQL query. +// Exactly one of fields value and cursor must be set. +type GqlQueryArg struct { + // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". + // Must not match regex "__.*__". + // Must not be "". + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } +func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } +func (*GqlQueryArg) ProtoMessage() {} + +func (m *GqlQueryArg) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *GqlQueryArg) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *GqlQueryArg) GetCursor() []byte { + if m != nil { + return m.Cursor + } + return nil +} + +// A batch of results produced by a query. +type QueryResultBatch struct { + // The result type for every entity in entityResults. + EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType" json:"entity_result_type,omitempty"` + // The results for this batch. + EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` + // A cursor that points to the position after the last result in the batch. + // May be absent. + EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` + // The state of the query after the current batch. + MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` + // The number of results skipped because of Query.offset. + SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } +func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } +func (*QueryResultBatch) ProtoMessage() {} + +func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { + if m != nil && m.EntityResultType != nil { + return *m.EntityResultType + } + return EntityResult_FULL +} + +func (m *QueryResultBatch) GetEntityResult() []*EntityResult { + if m != nil { + return m.EntityResult + } + return nil +} + +func (m *QueryResultBatch) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return QueryResultBatch_NOT_FINISHED +} + +func (m *QueryResultBatch) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +// A set of changes to apply. +// +// No entity in this message may have a reserved property name, +// not even a property in an entity in a value. +// No value in this message may have meaning 18, +// not even a value in an entity in another value. +// +// If entities with duplicate keys are present, an arbitrary choice will +// be made as to which is written. +type Mutation struct { + // Entities to upsert. + // Each upserted entity's key must have a complete path and + // must not be reserved/read-only. + Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` + // Entities to update. + // Each updated entity's key must have a complete path and + // must not be reserved/read-only. + Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` + // Entities to insert. + // Each inserted entity's key must have a complete path and + // must not be reserved/read-only. + Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` + // Insert entities with a newly allocated ID. + // Each inserted entity's key must omit the final identifier in its path and + // must not be reserved/read-only. + InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` + // Keys of entities to delete. + // Each key must have a complete key path and must not be reserved/read-only. + Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` + // Ignore a user specified read-only period. Optional. + Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} + +func (m *Mutation) GetUpsert() []*Entity { + if m != nil { + return m.Upsert + } + return nil +} + +func (m *Mutation) GetUpdate() []*Entity { + if m != nil { + return m.Update + } + return nil +} + +func (m *Mutation) GetInsert() []*Entity { + if m != nil { + return m.Insert + } + return nil +} + +func (m *Mutation) GetInsertAutoId() []*Entity { + if m != nil { + return m.InsertAutoId + } + return nil +} + +func (m *Mutation) GetDelete() []*Key { + if m != nil { + return m.Delete + } + return nil +} + +func (m *Mutation) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return false +} + +// The result of applying a mutation. +type MutationResult struct { + // Number of index writes. + IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` + // Keys for insertAutoId entities. One per entity from the + // request, in the same order. + InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutationResult) Reset() { *m = MutationResult{} } +func (m *MutationResult) String() string { return proto.CompactTextString(m) } +func (*MutationResult) ProtoMessage() {} + +func (m *MutationResult) GetIndexUpdates() int32 { + if m != nil && m.IndexUpdates != nil { + return *m.IndexUpdates + } + return 0 +} + +func (m *MutationResult) GetInsertAutoIdKey() []*Key { + if m != nil { + return m.InsertAutoIdKey + } + return nil +} + +// Options shared by read requests. +type ReadOptions struct { + // The read consistency to use. + // Cannot be set when transaction is set. + // Lookup and ancestor queries default to STRONG, global queries default to + // EVENTUAL and cannot be set to STRONG. + ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` + // The transaction to use. Optional. + Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReadOptions) Reset() { *m = ReadOptions{} } +func (m *ReadOptions) String() string { return proto.CompactTextString(m) } +func (*ReadOptions) ProtoMessage() {} + +const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT + +func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { + if m != nil && m.ReadConsistency != nil { + return *m.ReadConsistency + } + return Default_ReadOptions_ReadConsistency +} + +func (m *ReadOptions) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for Lookup. +type LookupRequest struct { + // Options for this lookup request. Optional. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` + // Keys of entities to look up from the datastore. + Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LookupRequest) Reset() { *m = LookupRequest{} } +func (m *LookupRequest) String() string { return proto.CompactTextString(m) } +func (*LookupRequest) ProtoMessage() {} + +func (m *LookupRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *LookupRequest) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +// The response for Lookup. +type LookupResponse struct { + // Entities found as ResultType.FULL entities. + Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + // Entities not found as ResultType.KEY_ONLY entities. + Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` + // A list of keys that were not looked up due to resource constraints. + Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LookupResponse) Reset() { *m = LookupResponse{} } +func (m *LookupResponse) String() string { return proto.CompactTextString(m) } +func (*LookupResponse) ProtoMessage() {} + +func (m *LookupResponse) GetFound() []*EntityResult { + if m != nil { + return m.Found + } + return nil +} + +func (m *LookupResponse) GetMissing() []*EntityResult { + if m != nil { + return m.Missing + } + return nil +} + +func (m *LookupResponse) GetDeferred() []*Key { + if m != nil { + return m.Deferred + } + return nil +} + +// The request for RunQuery. +type RunQueryRequest struct { + // The options for this query. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` + // Entities are partitioned into subsets, identified by a dataset (usually + // implicitly specified by the project) and namespace ID. Queries are scoped + // to a single partition. + // This partition ID is normalized with the standard default context + // partition ID, but all other partition IDs in RunQueryRequest are + // normalized with this partition ID as the context partition ID. + PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` + // The query to run. + // Either this field or field gql_query must be set, but not both. + Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` + // The GQL query to run. + // Either this field or field query must be set, but not both. + GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +func (*RunQueryRequest) ProtoMessage() {} + +func (m *RunQueryRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *RunQueryRequest) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *RunQueryRequest) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { + if m != nil { + return m.GqlQuery + } + return nil +} + +// The response for RunQuery. +type RunQueryResponse struct { + // A batch of query results (always present). + Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +func (*RunQueryResponse) ProtoMessage() {} + +func (m *RunQueryResponse) GetBatch() *QueryResultBatch { + if m != nil { + return m.Batch + } + return nil +} + +// The request for BeginTransaction. +type BeginTransactionRequest struct { + // The transaction isolation level. + IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT + +func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { + if m != nil && m.IsolationLevel != nil { + return *m.IsolationLevel + } + return Default_BeginTransactionRequest_IsolationLevel +} + +// The response for BeginTransaction. +type BeginTransactionResponse struct { + // The transaction identifier (always present). + Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionResponse) ProtoMessage() {} + +func (m *BeginTransactionResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for Rollback. +type RollbackRequest struct { + // The transaction identifier, returned by a call to + // beginTransaction. + Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} + +func (m *RollbackRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The response for Rollback. +type RollbackResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackResponse) ProtoMessage() {} + +// The request for Commit. +type CommitRequest struct { + // The transaction identifier, returned by a call to + // beginTransaction. Must be set when mode is TRANSACTIONAL. + Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` + // The mutation to perform. Optional. + Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` + // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. + Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1" json:"mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} + +const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL + +func (m *CommitRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *CommitRequest) GetMutation() *Mutation { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *CommitRequest) GetMode() CommitRequest_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_CommitRequest_Mode +} + +// The response for Commit. +type CommitResponse struct { + // The result of performing the mutation (if any). + MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetMutationResult() *MutationResult { + if m != nil { + return m.MutationResult + } + return nil +} + +// The request for AllocateIds. +type AllocateIdsRequest struct { + // A list of keys with incomplete key paths to allocate IDs for. + // No key may be reserved/read-only. + Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +// The response for AllocateIds. +type AllocateIdsResponse struct { + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +func init() { + proto.RegisterEnum("datastore.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) + proto.RegisterEnum("datastore.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) + proto.RegisterEnum("datastore.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) + proto.RegisterEnum("datastore.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) + proto.RegisterEnum("datastore.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) + proto.RegisterEnum("datastore.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) + proto.RegisterEnum("datastore.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) + proto.RegisterEnum("datastore.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) + proto.RegisterEnum("datastore.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto new file mode 100644 index 00000000..d752beaa --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto @@ -0,0 +1,606 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// The datastore v1 service proto definitions + +syntax = "proto2"; + +package datastore; +option java_package = "com.google.api.services.datastore"; + + +// An identifier for a particular subset of entities. +// +// Entities are partitioned into various subsets, each used by different +// datasets and different namespaces within a dataset and so forth. +// +// All input partition IDs are normalized before use. +// A partition ID is normalized as follows: +// If the partition ID is unset or is set to an empty partition ID, replace it +// with the context partition ID. +// Otherwise, if the partition ID has no dataset ID, assign it the context +// partition ID's dataset ID. +// Unless otherwise documented, the context partition ID has the dataset ID set +// to the context dataset ID and no other partition dimension set. +// +// A partition ID is empty if all of its fields are unset. +// +// Partition dimension: +// A dimension may be unset. +// A dimension's value must never be "". +// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +// If the value of any dimension matches regex "__.*__", +// the partition is reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented contexts. +// +// Dataset ID: +// A dataset id's value must never be "". +// A dataset id's value must match +// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +message PartitionId { + // The dataset ID. + optional string dataset_id = 3; + // The namespace. + optional string namespace = 4; +} + +// A unique identifier for an entity. +// If a key's partition id or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +message Key { + // Entities are partitioned into subsets, currently identified by a dataset + // (usually implicitly specified by the project) and namespace ID. + // Queries are scoped to a single partition. + optional PartitionId partition_id = 1; + + // A (kind, ID/name) pair used to construct a key path. + // + // At most one of name or ID may be set. + // If either is set, the element is complete. + // If neither is set, the element is incomplete. + message PathElement { + // The kind of the entity. + // A kind matching regex "__.*__" is reserved/read-only. + // A kind must not contain more than 500 characters. + // Cannot be "". + required string kind = 1; + // The ID of the entity. + // Never equal to zero. Values less than zero are discouraged and will not + // be supported in the future. + optional int64 id = 2; + // The name of the entity. + // A name matching regex "__.*__" is reserved/read-only. + // A name must not be more than 500 characters. + // Cannot be "". + optional string name = 3; + } + + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a root entity, the second element identifies + // a child of the root entity, the third element a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's ancestors. + // An entity path is always fully complete: ALL of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. A path can never + // be empty. + repeated PathElement path_element = 2; +} + +// A message that can hold any of the supported value types and associated +// metadata. +// +// At most one of the Value fields may be set. +// If none are set the value is "null". +// +message Value { + // A boolean value. + optional bool boolean_value = 1; + // An integer value. + optional int64 integer_value = 2; + // A double value. + optional double double_value = 3; + // A timestamp value. + optional int64 timestamp_microseconds_value = 4; + // A key value. + optional Key key_value = 5; + // A blob key value. + optional string blob_key_value = 16; + // A UTF-8 encoded string value. + optional string string_value = 17; + // A blob value. + optional bytes blob_value = 18; + // An entity value. + // May have no key. + // May have a key with an incomplete key path. + // May have a reserved/read-only key. + optional Entity entity_value = 6; + // A list value. + // Cannot contain another list value. + // Cannot also have a meaning and indexing set. + repeated Value list_value = 7; + + // The meaning field is reserved and should not be used. + optional int32 meaning = 14; + + // If the value should be indexed. + // + // The indexed property may be set for a + // null value. + // When indexed is true, stringValue + // is limited to 500 characters and the blob value is limited to 500 bytes. + // Exception: If meaning is set to 2, string_value is limited to 2038 + // characters regardless of indexed. + // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 + // will be ignored on input (and will never be set on output). + // Input values by default have indexed set to + // true; however, you can explicitly set indexed to + // true if you want. (An output value never has + // indexed explicitly set to true.) If a value is + // itself an entity, it cannot have indexed set to + // true. + // Exception: An entity value with meaning 9, 20 or 21 may be indexed. + optional bool indexed = 15 [default = true]; +} + +// An entity property. +message Property { + // The name of the property. + // A property name matching regex "__.*__" is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // Cannot be "". + required string name = 1; + + // The value(s) of the property. + // Each value can have only one value property populated. For example, + // you cannot have a values list of { value: { integerValue: 22, + // stringValue: "a" } }, but you can have { value: { listValue: + // [ { integerValue: 22 }, { stringValue: "a" } ] }. + required Value value = 4; +} + +// An entity. +// +// An entity is limited to 1 megabyte when stored. That roughly +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +message Entity { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in Value.entityValue may have no key). + // An entity's kind is its key's path's last element's kind, + // or null if it has no key. + optional Key key = 1; + // The entity's properties. + // Each property's name must be unique for its entity. + repeated Property property = 2; +} + +// The result of fetching an entity from the datastore. +message EntityResult { + // Specifies what data the 'entity' field contains. + // A ResultType is either implied (for example, in LookupResponse.found it + // is always FULL) or specified by context (for example, in message + // QueryResultBatch, field 'entity_result_type' specifies a ResultType + // for all the values in field 'entity_result'). + enum ResultType { + FULL = 1; // The entire entity. + PROJECTION = 2; // A projected subset of properties. + // The entity may have no key. + // A property value may have meaning 18. + KEY_ONLY = 3; // Only the key. + } + + // The resulting entity. + required Entity entity = 1; +} + +// A query. +message Query { + // The projection to return. If not set the entire entity is returned. + repeated PropertyExpression projection = 2; + + // The kinds to query (if empty, returns entities from all kinds). + repeated KindExpression kind = 3; + + // The filter to apply (optional). + optional Filter filter = 4; + + // The order to apply to the query results (if empty, order is unspecified). + repeated PropertyOrder order = 5; + + // The properties to group by (if empty, no grouping is applied to the + // result set). + repeated PropertyReference group_by = 6; + + // A starting point for the query results. Optional. Query cursors are + // returned in query result batches. + optional bytes /* serialized QueryCursor */ start_cursor = 7; + + // An ending point for the query results. Optional. Query cursors are + // returned in query result batches. + optional bytes /* serialized QueryCursor */ end_cursor = 8; + + // The number of results to skip. Applies before limit, but after all other + // constraints (optional, defaults to 0). + optional int32 offset = 10 [default=0]; + + // The maximum number of results to return. Applies after all other + // constraints. Optional. + optional int32 limit = 11; +} + +// A representation of a kind. +message KindExpression { + // The name of the kind. + required string name = 1; +} + +// A reference to a property relative to the kind expressions. +// exactly. +message PropertyReference { + // The name of the property. + required string name = 2; +} + +// A representation of a property in a projection. +message PropertyExpression { + enum AggregationFunction { + FIRST = 1; + } + // The property to project. + required PropertyReference property = 1; + // The aggregation function to apply to the property. Optional. + // Can only be used when grouping by at least one property. Must + // then be set on all properties in the projection that are not + // being grouped by. + optional AggregationFunction aggregation_function = 2; +} + +// The desired order for a specific property. +message PropertyOrder { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + // The property to order by. + required PropertyReference property = 1; + // The direction to order by. + optional Direction direction = 2 [default=ASCENDING]; +} + +// A holder for any type of filter. Exactly one field should be specified. +message Filter { + // A composite filter. + optional CompositeFilter composite_filter = 1; + // A filter on a property. + optional PropertyFilter property_filter = 2; +} + +// A filter that merges the multiple other filters using the given operation. +message CompositeFilter { + enum Operator { + AND = 1; + } + + // The operator for combining multiple filters. + required Operator operator = 1; + // The list of filters to combine. + // Must contain at least one filter. + repeated Filter filter = 2; +} + +// A filter on a specific property. +message PropertyFilter { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + + HAS_ANCESTOR = 11; + } + + // The property to filter by. + required PropertyReference property = 1; + // The operator to filter by. + required Operator operator = 2; + // The value to compare the property to. + required Value value = 3; +} + +// A GQL query. +message GqlQuery { + required string query_string = 1; + // When false, the query string must not contain a literal. + optional bool allow_literal = 2 [default = false]; + // A named argument must set field GqlQueryArg.name. + // No two named arguments may have the same name. + // For each non-reserved named binding site in the query string, + // there must be a named argument with that name, + // but not necessarily the inverse. + repeated GqlQueryArg name_arg = 3; + // Numbered binding site @1 references the first numbered argument, + // effectively using 1-based indexing, rather than the usual 0. + // A numbered argument must NOT set field GqlQueryArg.name. + // For each binding site numbered i in query_string, + // there must be an ith numbered argument. + // The inverse must also be true. + repeated GqlQueryArg number_arg = 4; +} + +// A binding argument for a GQL query. +// Exactly one of fields value and cursor must be set. +message GqlQueryArg { + // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". + // Must not match regex "__.*__". + // Must not be "". + optional string name = 1; + optional Value value = 2; + optional bytes cursor = 3; +} + +// A batch of results produced by a query. +message QueryResultBatch { + // The possible values for the 'more_results' field. + enum MoreResultsType { + NOT_FINISHED = 1; // There are additional batches to fetch from this query. + MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more + // results after the limit. + NO_MORE_RESULTS = 3; // The query has been exhausted. + } + + // The result type for every entity in entityResults. + required EntityResult.ResultType entity_result_type = 1; + // The results for this batch. + repeated EntityResult entity_result = 2; + + // A cursor that points to the position after the last result in the batch. + // May be absent. + optional bytes /* serialized QueryCursor */ end_cursor = 4; + + // The state of the query after the current batch. + required MoreResultsType more_results = 5; + + // The number of results skipped because of Query.offset. + optional int32 skipped_results = 6; +} + +// A set of changes to apply. +// +// No entity in this message may have a reserved property name, +// not even a property in an entity in a value. +// No value in this message may have meaning 18, +// not even a value in an entity in another value. +// +// If entities with duplicate keys are present, an arbitrary choice will +// be made as to which is written. +message Mutation { + // Entities to upsert. + // Each upserted entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity upsert = 1; + // Entities to update. + // Each updated entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity update = 2; + // Entities to insert. + // Each inserted entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity insert = 3; + // Insert entities with a newly allocated ID. + // Each inserted entity's key must omit the final identifier in its path and + // must not be reserved/read-only. + repeated Entity insert_auto_id = 4; + // Keys of entities to delete. + // Each key must have a complete key path and must not be reserved/read-only. + repeated Key delete = 5; + // Ignore a user specified read-only period. Optional. + optional bool force = 6; +} + +// The result of applying a mutation. +message MutationResult { + // Number of index writes. + required int32 index_updates = 1; + // Keys for insertAutoId entities. One per entity from the + // request, in the same order. + repeated Key insert_auto_id_key = 2; +} + +// Options shared by read requests. +message ReadOptions { + enum ReadConsistency { + DEFAULT = 0; + STRONG = 1; + EVENTUAL = 2; + } + + // The read consistency to use. + // Cannot be set when transaction is set. + // Lookup and ancestor queries default to STRONG, global queries default to + // EVENTUAL and cannot be set to STRONG. + optional ReadConsistency read_consistency = 1 [default=DEFAULT]; + + // The transaction to use. Optional. + optional bytes /* serialized Transaction */ transaction = 2; +} + +// The request for Lookup. +message LookupRequest { + + // Options for this lookup request. Optional. + optional ReadOptions read_options = 1; + // Keys of entities to look up from the datastore. + repeated Key key = 3; +} + +// The response for Lookup. +message LookupResponse { + + // The order of results in these fields is undefined and has no relation to + // the order of the keys in the input. + + // Entities found as ResultType.FULL entities. + repeated EntityResult found = 1; + + // Entities not found as ResultType.KEY_ONLY entities. + repeated EntityResult missing = 2; + + // A list of keys that were not looked up due to resource constraints. + repeated Key deferred = 3; +} + + +// The request for RunQuery. +message RunQueryRequest { + + // The options for this query. + optional ReadOptions read_options = 1; + + // Entities are partitioned into subsets, identified by a dataset (usually + // implicitly specified by the project) and namespace ID. Queries are scoped + // to a single partition. + // This partition ID is normalized with the standard default context + // partition ID, but all other partition IDs in RunQueryRequest are + // normalized with this partition ID as the context partition ID. + optional PartitionId partition_id = 2; + + // The query to run. + // Either this field or field gql_query must be set, but not both. + optional Query query = 3; + // The GQL query to run. + // Either this field or field query must be set, but not both. + optional GqlQuery gql_query = 7; +} + +// The response for RunQuery. +message RunQueryResponse { + + // A batch of query results (always present). + optional QueryResultBatch batch = 1; + +} + +// The request for BeginTransaction. +message BeginTransactionRequest { + + enum IsolationLevel { + SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions + // conflict if their mutations conflict. For example: + // Read(A),Write(B) may not conflict with Read(B),Write(A), + // but Read(B),Write(B) does conflict with Read(B),Write(B). + SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent + // transactions conflict if they cannot be serialized. + // For example Read(A),Write(B) does conflict with + // Read(B),Write(A) but Read(A) may not conflict with + // Write(A). + } + + // The transaction isolation level. + optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; +} + +// The response for BeginTransaction. +message BeginTransactionResponse { + + // The transaction identifier (always present). + optional bytes /* serialized Transaction */ transaction = 1; +} + +// The request for Rollback. +message RollbackRequest { + + // The transaction identifier, returned by a call to + // beginTransaction. + required bytes /* serialized Transaction */ transaction = 1; +} + +// The response for Rollback. +message RollbackResponse { +// Empty +} + +// The request for Commit. +message CommitRequest { + + enum Mode { + TRANSACTIONAL = 1; + NON_TRANSACTIONAL = 2; + } + + // The transaction identifier, returned by a call to + // beginTransaction. Must be set when mode is TRANSACTIONAL. + optional bytes /* serialized Transaction */ transaction = 1; + // The mutation to perform. Optional. + optional Mutation mutation = 2; + // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. + optional Mode mode = 5 [default=TRANSACTIONAL]; +} + +// The response for Commit. +message CommitResponse { + + // The result of performing the mutation (if any). + optional MutationResult mutation_result = 1; +} + +// The request for AllocateIds. +message AllocateIdsRequest { + + // A list of keys with incomplete key paths to allocate IDs for. + // No key may be reserved/read-only. + repeated Key key = 1; +} + +// The response for AllocateIds. +message AllocateIdsResponse { + + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + repeated Key key = 1; +} + +// Each rpc normalizes the partition IDs of the keys in its input entities, +// and always returns entities with keys with normalized partition IDs. +// (Note that applies to all entities, including entities in values.) +service DatastoreService { + // Look up some entities by key. + rpc Lookup(LookupRequest) returns (LookupResponse) { + }; + // Query for entities. + rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { + }; + // Begin a new transaction. + rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { + }; + // Commit a transaction, optionally creating, deleting or modifying some + // entities. + rpc Commit(CommitRequest) returns (CommitResponse) { + }; + // Roll back a transaction. + rpc Rollback(RollbackRequest) returns (RollbackResponse) { + }; + // Allocate IDs for incomplete keys (useful for referencing an entity before + // it is inserted). + rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { + }; +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/opts/option.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/opts/option.go new file mode 100644 index 00000000..33f9bf4f --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/opts/option.go @@ -0,0 +1,25 @@ +// Package opts holds the DialOpts struct, configurable by +// cloud.ClientOptions to set up transports for cloud packages. +// +// This is a separate page to prevent cycles between the core +// cloud packages. +package opts + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "google.golang.org/grpc" +) + +type DialOpt struct { + Endpoint string + Scopes []string + UserAgent string + + TokenSource oauth2.TokenSource + + HTTPClient *http.Client + GRPCClient *grpc.ClientConn + GRPCDialOpts []grpc.DialOption +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go new file mode 100644 index 00000000..0ad9f863 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go @@ -0,0 +1,69 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "io/ioutil" + "log" + "net/http" + "os" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/google" + "google.golang.org/cloud" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +func ProjID() string { + projID := os.Getenv(envProjID) + if projID == "" { + log.Fatal(envProjID + " must be set. See CONTRIBUTING.md for details.") + } + return projID +} + +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envPrivateKey) + if key == "" { + log.Fatal(envPrivateKey + " must be set. See CONTRIBUTING.md for details.") + } + jsonKey, err := ioutil.ReadFile(key) + if err != nil { + log.Fatalf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + log.Fatalf("google.JWTConfigFromJSON: %v", err) + } + return conf.TokenSource(ctx) +} + +// TODO(djd): Delete this function when it's no longer used. +func Context(scopes ...string) context.Context { + ctx := oauth2.NoContext + ts := TokenSource(ctx, scopes...) + return cloud.NewContext(ProjID(), oauth2.NewClient(ctx, ts)) +} + +// TODO(djd): Delete this function when it's no longer used. +func NoAuthContext() context.Context { + return cloud.NewContext(ProjID(), &http.Client{Transport: http.DefaultTransport}) +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq.go new file mode 100644 index 00000000..ddae71cc --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq.go @@ -0,0 +1,29 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.5 + +package transport + +import "net/http" + +// makeReqCancel returns a closure that cancels the given http.Request +// when called. +func makeReqCancel(req *http.Request) func(http.RoundTripper) { + c := make(chan struct{}) + req.Cancel = c + return func(http.RoundTripper) { + close(c) + } +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go new file mode 100644 index 00000000..c11a4dde --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go @@ -0,0 +1,31 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.5 + +package transport + +import "net/http" + +// makeReqCancel returns a closure that cancels the given http.Request +// when called. +func makeReqCancel(req *http.Request) func(http.RoundTripper) { + // Go 1.4 and prior do not have a reliable way of cancelling a request. + // Transport.CancelRequest will only work if the request is already in-flight. + return func(r http.RoundTripper) { + if t, ok := r.(*http.Transport); ok { + t.CancelRequest(req) + } + } +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/dial.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/dial.go new file mode 100644 index 00000000..9d2db300 --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/dial.go @@ -0,0 +1,135 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "errors" + "fmt" + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/google" + "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/cloud/internal/opts" + "google.golang.org/cloud" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" +) + +// ErrHTTP is returned when on a non-200 HTTP response. +type ErrHTTP struct { + StatusCode int + Body []byte + err error +} + +func (e *ErrHTTP) Error() string { + if e.err == nil { + return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body) + } + return e.err.Error() +} + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.GRPCClient != nil { + return nil, "", errors.New("unsupported GRPC base transport specified") + } + // TODO(djd): Wrap all http.Clients with appropriate internal version to add + // UserAgent header and prepend correct endpoint. + if o.HTTPClient != nil { + return o.HTTPClient, o.Endpoint, nil + } + if o.TokenSource == nil { + var err error + o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) + if err != nil { + return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err) + } + } + return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil +} + +// NewProtoClient returns a ProtoClient for communicating with a Google cloud service, +// configured with the given ClientOptions. +func NewProtoClient(ctx context.Context, opt ...cloud.ClientOption) (*ProtoClient, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.GRPCClient != nil { + return nil, errors.New("unsupported GRPC base transport specified") + } + var client *http.Client + switch { + case o.HTTPClient != nil: + if o.TokenSource != nil { + return nil, errors.New("at most one of WithTokenSource or WithBaseHTTP may be provided") + } + client = o.HTTPClient + case o.TokenSource != nil: + client = oauth2.NewClient(ctx, o.TokenSource) + default: + var err error + client, err = google.DefaultClient(ctx, o.Scopes...) + if err != nil { + return nil, err + } + } + + return &ProtoClient{ + client: client, + endpoint: o.Endpoint, + userAgent: o.UserAgent, + }, nil +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP base transport specified") + } + if o.GRPCClient != nil { + return o.GRPCClient, nil + } + if o.TokenSource == nil { + var err error + o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) + if err != nil { + return nil, fmt.Errorf("google.DefaultTokenSource: %v", err) + } + } + grpcOpts := []grpc.DialOption{ + grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + return grpc.Dial(o.Endpoint, grpcOpts...) +} diff --git a/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/proto.go b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/proto.go new file mode 100644 index 00000000..0c2e66ad --- /dev/null +++ b/server/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/proto.go @@ -0,0 +1,80 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/net/context" +) + +type ProtoClient struct { + client *http.Client + endpoint string + userAgent string +} + +func (c *ProtoClient) Call(ctx context.Context, method string, req, resp proto.Message) error { + payload, err := proto.Marshal(req) + if err != nil { + return err + } + + httpReq, err := http.NewRequest("POST", c.endpoint+method, bytes.NewReader(payload)) + if err != nil { + return err + } + httpReq.Header.Set("Content-Type", "application/x-protobuf") + if ua := c.userAgent; ua != "" { + httpReq.Header.Set("User-Agent", ua) + } + + errc := make(chan error, 1) + cancel := makeReqCancel(httpReq) + + go func() { + r, err := c.client.Do(httpReq) + if err != nil { + errc <- err + return + } + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err = &ErrHTTP{ + StatusCode: r.StatusCode, + Body: body, + err: err, + } + } + if err != nil { + errc <- err + return + } + errc <- proto.Unmarshal(body, resp) + }() + + select { + case <-ctx.Done(): + cancel(c.client.Transport) // Cancel the HTTP request. + return ctx.Err() + case err := <-errc: + return err + } +} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/auth_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/auth_test.go deleted file mode 100644 index c25e47c8..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/auth_test.go +++ /dev/null @@ -1,1180 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "crypto/tls" - "flag" - "fmt" - "io/ioutil" - "net" - "net/url" - "os" - "runtime" - "sync" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" // Test both with a normal database and with an authenticated shard. - . "gopkg.in/check.v1" -) - -func (s *S) TestAuthLoginDatabase(c *C) { - - for _, addr := range []string{"localhost:40002", "localhost:40203"} { - session, err := mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - admindb := session.DB("admin") - - err = admindb.Login("root", "wrong") - c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") - - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - } -} - -func (s *S) TestAuthLoginSession(c *C) { - // Test both with a normal database and with an authenticated shard. - for _, addr := range []string{"localhost:40002", "localhost:40203"} { - session, err := mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - cred := mgo.Credential{ - Username: "root", - Password: "wrong", - } - err = session.Login(&cred) - c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") - - cred.Password = "rapadura" - - err = session.Login(&cred) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - } -} - -func (s *S) TestAuthLoginLogout(c *C) { - // Test both with a normal database and with an authenticated shard. - for _, addr := range []string{"localhost:40002", "localhost:40203"} { - session, err := mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - admindb.Logout() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - // Must have dropped auth from the session too. - session = session.Copy() - defer session.Close() - - coll = session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - } -} - -func (s *S) TestAuthLoginLogoutAll(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session.LogoutAll() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - // Must have dropped auth from the session too. - session = session.Copy() - defer session.Close() - - coll = session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") -} - -func (s *S) TestAuthUpsertUserErrors(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - - err = mydb.UpsertUser(&mgo.User{}) - c.Assert(err, ErrorMatches, "user has no Username") - - err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"}) - c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set") - - err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}}) - c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in the admin or \\$external databases") -} - -func (s *S) TestAuthUpsertUser(c *C) { - if !s.versionAtLeast(2, 4) { - c.Skip("UpsertUser only works on 2.4+") - } - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - - ruser := &mgo.User{ - Username: "myruser", - Password: "mypass", - Roles: []mgo.Role{mgo.RoleRead}, - } - rwuser := &mgo.User{ - Username: "myrwuser", - Password: "mypass", - Roles: []mgo.Role{mgo.RoleReadWrite}, - } - - err = mydb.UpsertUser(ruser) - c.Assert(err, IsNil) - err = mydb.UpsertUser(rwuser) - c.Assert(err, IsNil) - - err = mydb.Login("myruser", "mypass") - c.Assert(err, IsNil) - - admindb.Logout() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - err = mydb.Login("myrwuser", "mypass") - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - myotherdb := session.DB("myotherdb") - - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - // Test UserSource. - rwuserother := &mgo.User{ - Username: "myrwuser", - UserSource: "mydb", - Roles: []mgo.Role{mgo.RoleRead}, - } - - err = myotherdb.UpsertUser(rwuserother) - if s.versionAtLeast(2, 6) { - c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`) - return - } - c.Assert(err, IsNil) - - admindb.Logout() - - // Test indirection via UserSource: we can't write to it, because - // the roles for myrwuser are different there. - othercoll := myotherdb.C("myothercoll") - err = othercoll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // Reading works, though. - err = othercoll.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - - // Can't login directly into the database using UserSource, though. - err = myotherdb.Login("myrwuser", "mypass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") -} - -func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) { - if !s.versionAtLeast(2, 4) { - c.Skip("UpsertUser only works on 2.4+") - } - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - ruser := &mgo.User{ - Username: "myruser", - Password: "mypass", - OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}}, - } - - err = admindb.UpsertUser(ruser) - c.Assert(err, IsNil) - defer admindb.RemoveUser("myruser") - - admindb.Logout() - err = admindb.Login("myruser", "mypass") - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - err = coll.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestAuthUpsertUserUpdates(c *C) { - if !s.versionAtLeast(2, 4) { - c.Skip("UpsertUser only works on 2.4+") - } - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - - // Insert a user that can read. - user := &mgo.User{ - Username: "myruser", - Password: "mypass", - Roles: []mgo.Role{mgo.RoleRead}, - } - err = mydb.UpsertUser(user) - c.Assert(err, IsNil) - - // Now update the user password. - user = &mgo.User{ - Username: "myruser", - Password: "mynewpass", - } - err = mydb.UpsertUser(user) - c.Assert(err, IsNil) - - // Login with the new user. - usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb") - c.Assert(err, IsNil) - defer usession.Close() - - // Can read, but not write. - err = usession.DB("mydb").C("mycoll").Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // Update the user role. - user = &mgo.User{ - Username: "myruser", - Roles: []mgo.Role{mgo.RoleReadWrite}, - } - err = mydb.UpsertUser(user) - c.Assert(err, IsNil) - - // Dial again to ensure the password hasn't changed. - usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb") - c.Assert(err, IsNil) - defer usession.Close() - - // Now it can write. - err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthAddUser(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myruser", "mypass", true) - c.Assert(err, IsNil) - err = mydb.AddUser("mywuser", "mypass", false) - c.Assert(err, IsNil) - - err = mydb.Login("myruser", "mypass") - c.Assert(err, IsNil) - - admindb.Logout() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - err = mydb.Login("mywuser", "mypass") - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthAddUserReplaces(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "myoldpass", false) - c.Assert(err, IsNil) - err = mydb.AddUser("myuser", "mynewpass", true) - c.Assert(err, IsNil) - - admindb.Logout() - - err = mydb.Login("myuser", "myoldpass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") - err = mydb.Login("myuser", "mynewpass") - c.Assert(err, IsNil) - - // ReadOnly flag was changed too. - err = mydb.C("mycoll").Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") -} - -func (s *S) TestAuthRemoveUser(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "mypass", true) - c.Assert(err, IsNil) - err = mydb.RemoveUser("myuser") - c.Assert(err, IsNil) - err = mydb.RemoveUser("myuser") - c.Assert(err, Equals, mgo.ErrNotFound) - - err = mydb.Login("myuser", "mypass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") -} - -func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - oldStats := mgo.GetStats() - - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) -} - -func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - oldStats := mgo.GetStats() - - admindb.Logout() - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) -} - -func (s *S) TestAuthLoginSwitchUser(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - err = admindb.Login("reader", "rapadura") - c.Assert(err, IsNil) - - // Can't write. - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // But can read. - result := struct{ N int }{} - err = coll.Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) -} - -func (s *S) TestAuthLoginChangePassword(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "myoldpass", false) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "myoldpass") - c.Assert(err, IsNil) - - err = mydb.AddUser("myuser", "mynewpass", true) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mynewpass") - c.Assert(err, IsNil) - - admindb.Logout() - - // The second login must be in effect, which means read-only. - err = mydb.C("mycoll").Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") -} - -func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session.Refresh() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session = session.Copy() - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingWithSessionClone(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session = session.Clone() - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingWithNewSession(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session = session.New() - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized for .*") -} - -func (s *S) TestAuthLoginCachingAcrossPool(c *C) { - // Logins are cached even when the conenction goes back - // into the pool. - - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - // Add another user to test the logout case at the same time. - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "mypass", false) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Logout root explicitly, to test both cases. - admindb.Logout() - - // Give socket back to pool. - session.Refresh() - - // Brand new session, should use socket from the pool. - other := session.New() - defer other.Close() - - oldStats := mgo.GetStats() - - err = other.DB("admin").Login("root", "rapadura") - c.Assert(err, IsNil) - err = other.DB("mydb").Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Both logins were cached, so no ops. - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) - - // And they actually worked. - err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - - other.DB("admin").Logout() - - err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) { - // Now verify that logouts are properly flushed if they - // are not revalidated after leaving the pool. - - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - // Add another user to test the logout case at the same time. - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "mypass", true) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Just some data to query later. - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - - // Give socket back to pool. - session.Refresh() - - // Brand new session, should use socket from the pool. - other := session.New() - defer other.Close() - - oldStats := mgo.GetStats() - - err = other.DB("mydb").Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Login was cached, so no ops. - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) - - // Can't write, since root has been implicitly logged out - // when the collection went into the pool, and not revalidated. - err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // But can read due to the revalidated myuser login. - result := struct{ N int }{} - err = other.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) -} - -func (s *S) TestAuthEventual(c *C) { - // Eventual sessions don't keep sockets around, so they are - // an interesting test case. - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - - var wg sync.WaitGroup - wg.Add(20) - - for i := 0; i != 10; i++ { - go func() { - defer wg.Done() - var result struct{ N int } - err := session.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) - }() - } - - for i := 0; i != 10; i++ { - go func() { - defer wg.Done() - err := session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - }() - } - - wg.Wait() -} - -func (s *S) TestAuthURL(c *C) { - session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") - c.Assert(err, IsNil) - defer session.Close() - - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthURLWrongCredentials(c *C) { - session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/") - if session != nil { - session.Close() - } - c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") - c.Assert(session, IsNil) -} - -func (s *S) TestAuthURLWithNewSession(c *C) { - // When authentication is in the URL, the new session will - // actually carry it on as well, even if logged out explicitly. - session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") - c.Assert(err, IsNil) - defer session.Close() - - session.DB("admin").Logout() - - // Do it twice to ensure it passes the needed data on. - session = session.New() - defer session.Close() - session = session.New() - defer session.Close() - - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthURLWithDatabase(c *C) { - session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - mydb := session.DB("mydb") - err = mydb.AddUser("myruser", "mypass", true) - c.Assert(err, IsNil) - - // Test once with database, and once with source. - for i := 0; i < 2; i++ { - var url string - if i == 0 { - url = "mongodb://myruser:mypass@localhost:40002/mydb" - } else { - url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb" - } - usession, err := mgo.Dial(url) - c.Assert(err, IsNil) - defer usession.Close() - - ucoll := usession.DB("mydb").C("mycoll") - err = ucoll.FindId(0).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - err = ucoll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - } -} - -func (s *S) TestDefaultDatabase(c *C) { - tests := []struct{ url, db string }{ - {"mongodb://root:rapadura@localhost:40002", "test"}, - {"mongodb://root:rapadura@localhost:40002/admin", "admin"}, - {"mongodb://localhost:40001", "test"}, - {"mongodb://localhost:40001/", "test"}, - {"mongodb://localhost:40001/mydb", "mydb"}, - } - - for _, test := range tests { - session, err := mgo.Dial(test.url) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("test: %#v", test) - c.Assert(session.DB("").Name, Equals, test.db) - - scopy := session.Copy() - c.Check(scopy.DB("").Name, Equals, test.db) - scopy.Close() - } -} - -func (s *S) TestAuthDirect(c *C) { - // Direct connections must work to the master and slaves. - for _, port := range []string{"40031", "40032", "40033"} { - url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port) - session, err := mgo.Dial(url) - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - var result struct{} - err = session.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - } -} - -func (s *S) TestAuthDirectWithLogin(c *C) { - // Direct connections must work to the master and slaves. - for _, port := range []string{"40031", "40032", "40033"} { - url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port) - session, err := mgo.Dial(url) - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - session.SetSyncTimeout(3 * time.Second) - - err = session.DB("admin").Login("root", "rapadura") - c.Assert(err, IsNil) - - var result struct{} - err = session.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - } -} - -func (s *S) TestAuthScramSha1Cred(c *C) { - if !s.versionAtLeast(2, 7, 7) { - c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") - } - cred := &mgo.Credential{ - Username: "root", - Password: "rapadura", - Mechanism: "SCRAM-SHA-1", - Source: "admin", - } - host := "localhost:40002" - c.Logf("Connecting to %s...", host) - session, err := mgo.Dial(host) - c.Assert(err, IsNil) - defer session.Close() - - mycoll := session.DB("admin").C("mycoll") - - c.Logf("Connected! Testing the need for authentication...") - err = mycoll.Find(nil).One(nil) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - c.Logf("Connected! Testing the need for authentication...") - err = mycoll.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestAuthScramSha1URL(c *C) { - if !s.versionAtLeast(2, 7, 7) { - c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") - } - host := "localhost:40002" - c.Logf("Connecting to %s...", host) - session, err := mgo.Dial(fmt.Sprintf("root:rapadura@%s?authMechanism=SCRAM-SHA-1", host)) - c.Assert(err, IsNil) - defer session.Close() - - mycoll := session.DB("admin").C("mycoll") - - c.Logf("Connected! Testing the need for authentication...") - err = mycoll.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestAuthX509Cred(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - binfo, err := session.BuildInfo() - c.Assert(err, IsNil) - if binfo.OpenSSLVersion == "" { - c.Skip("server does not support SSL") - } - - clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") - c.Assert(err, IsNil) - - clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) - c.Assert(err, IsNil) - - tlsConfig := &tls.Config{ - // Isolating tests to client certs, don't care about server validation. - InsecureSkipVerify: true, - Certificates: []tls.Certificate{clientCert}, - } - - var host = "localhost:40003" - c.Logf("Connecting to %s...", host) - session, err = mgo.DialWithInfo(&mgo.DialInfo{ - Addrs: []string{host}, - DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { - return tls.Dial("tcp", addr.String(), tlsConfig) - }, - }) - c.Assert(err, IsNil) - defer session.Close() - - err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"}) - c.Assert(err, IsNil) - - // This needs to be kept in sync with client.pem - x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" - - externalDB := session.DB("$external") - var x509User mgo.User = mgo.User{ - Username: x509Subject, - OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}, - } - err = externalDB.UpsertUser(&x509User) - c.Assert(err, IsNil) - - session.LogoutAll() - - c.Logf("Connected! Ensuring authentication is required...") - names, err := session.DatabaseNames() - c.Assert(err, ErrorMatches, "not authorized .*") - - cred := &mgo.Credential{ - Username: x509Subject, - Mechanism: "MONGODB-X509", - Source: "$external", - } - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - names, err = session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) -} - -var ( - plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") - plainUser = "einstein" - plainPass = "password" -) - -func (s *S) TestAuthPlainCred(c *C) { - if *plainFlag == "" { - c.Skip("no -plain") - } - cred := &mgo.Credential{ - Username: plainUser, - Password: plainPass, - Source: "$external", - Mechanism: "PLAIN", - } - c.Logf("Connecting to %s...", *plainFlag) - session, err := mgo.Dial(*plainFlag) - c.Assert(err, IsNil) - defer session.Close() - - records := session.DB("records").C("records") - - c.Logf("Connected! Testing the need for authentication...") - err = records.Find(nil).One(nil) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - c.Logf("Connected! Testing the need for authentication...") - err = records.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestAuthPlainURL(c *C) { - if *plainFlag == "" { - c.Skip("no -plain") - } - c.Logf("Connecting to %s...", *plainFlag) - session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag)) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Connected! Testing the need for authentication...") - err = session.DB("records").C("records").Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -var ( - kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)") - kerberosHost = "ldaptest.10gen.cc" - kerberosUser = "drivers@LDAPTEST.10GEN.CC" - - winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" -) - -// Kerberos has its own suite because it talks to a remote server -// that is prepared to authenticate against a kerberos deployment. -type KerberosSuite struct{} - -var _ = Suite(&KerberosSuite{}) - -func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) { - mgo.SetDebug(true) - mgo.SetStats(true) -} - -func (kerberosSuite *KerberosSuite) TearDownSuite(c *C) { - mgo.SetDebug(false) - mgo.SetStats(false) -} - -func (kerberosSuite *KerberosSuite) SetUpTest(c *C) { - mgo.SetLogger((*cLogger)(c)) - mgo.ResetStats() -} - -func (kerberosSuite *KerberosSuite) TearDownTest(c *C) { - mgo.SetLogger(nil) -} - -func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) { - if !*kerberosFlag { - c.Skip("no -kerberos") - } - cred := &mgo.Credential{ - Username: kerberosUser, - Mechanism: "GSSAPI", - } - windowsAppendPasswordToCredential(cred) - c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(kerberosHost) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Connected! Testing the need for authentication...") - n, err := session.DB("kerberos").C("test").Find(M{}).Count() - c.Assert(err, ErrorMatches, ".*authorized.*") - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - n, err = session.DB("kerberos").C("test").Find(M{}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 1) -} - -func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) { - if !*kerberosFlag { - c.Skip("no -kerberos") - } - c.Logf("Connecting to %s...", kerberosHost) - connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI" - if runtime.GOOS == "windows" { - connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI" - } - session, err := mgo.Dial(connectUri) - c.Assert(err, IsNil) - defer session.Close() - n, err := session.DB("kerberos").C("test").Find(M{}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 1) -} - -func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) { - if !*kerberosFlag { - c.Skip("no -kerberos") - } - - wrongServiceName := "wrong" - rightServiceName := "mongodb" - - cred := &mgo.Credential{ - Username: kerberosUser, - Mechanism: "GSSAPI", - Service: wrongServiceName, - } - windowsAppendPasswordToCredential(cred) - - c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(kerberosHost) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Authenticating with incorrect service name...") - err = session.Login(cred) - c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") - - cred.Service = rightServiceName - c.Logf("Authenticating with correct service name...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - n, err := session.DB("kerberos").C("test").Find(M{}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 1) -} - -func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceHost(c *C) { - if !*kerberosFlag { - c.Skip("no -kerberos") - } - - wrongServiceHost := "eggs.bacon.tk" - rightServiceHost := kerberosHost - - cred := &mgo.Credential{ - Username: kerberosUser, - Mechanism: "GSSAPI", - ServiceHost: wrongServiceHost, - } - windowsAppendPasswordToCredential(cred) - - c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(kerberosHost) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Authenticating with incorrect service host...") - err = session.Login(cred) - c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") - - cred.ServiceHost = rightServiceHost - c.Logf("Authenticating with correct service host...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - n, err := session.DB("kerberos").C("test").Find(M{}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 1) -} - -// No kinit on SSPI-style Kerberos, so we need to provide a password. In order -// to avoid inlining password, require it to be set as an environment variable, -// for instance: `SET MGO_KERBEROS_PASSWORD=this_isnt_the_password` -func getWindowsKerberosPassword() string { - pw := os.Getenv(winKerberosPasswordEnv) - if pw == "" { - panic(fmt.Sprintf("Need to set %v environment variable to run Kerberos tests on Windows", winKerberosPasswordEnv)) - } - return pw -} - -func windowsAppendPasswordToCredential(cred *mgo.Credential) { - if runtime.GOOS == "windows" { - cred.Password = getWindowsKerberosPassword() - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go index 41816b87..f1f9ab74 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go @@ -189,15 +189,25 @@ func IsObjectIdHex(s string) bool { // objectIdCounter is atomically incremented when generating a new ObjectId // using NewObjectId() function. It's used as a counter part of an id. -var objectIdCounter uint32 = 0 +var objectIdCounter uint32 = readRandomUint32() + +// readRandomUint32 returns a random objectIdCounter. +func readRandomUint32() uint32 { + var b [4]byte + _, err := io.ReadFull(rand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("cannot read random object id: %v", err)) + } + return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) +} + // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() -// readMachineId generates machine id and puts it into the machineId global -// variable. If this function fails to get the hostname, it will cause -// a runtime error. +// readMachineId generates and returns a machine id. +// If this function fails to get the hostname it will cause a runtime error. func readMachineId() []byte { var sum [3]byte id := sum[:] @@ -421,7 +431,8 @@ func handleErr(err *error) { } // Marshal serializes the in value, which may be a map or a struct value. -// In the case of struct values, only exported fields will be serialized. +// In the case of struct values, only exported fields will be serialized, +// and the order of serialized fields will match that of the struct itself. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson_test.go deleted file mode 100644 index 9a5c347d..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson_test.go +++ /dev/null @@ -1,1582 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson_test - -import ( - "encoding/binary" - "encoding/json" - "errors" - "net/url" - "reflect" - "testing" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" -) - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct{} - -var _ = Suite(&S{}) - -// Wrap up the document elements contained in data, prepending the int32 -// length of the data, and appending the '\x00' value closing the document. -func wrapInDoc(data string) string { - result := make([]byte, len(data)+5) - binary.LittleEndian.PutUint32(result, uint32(len(result))) - copy(result[4:], []byte(data)) - return string(result) -} - -func makeZeroDoc(value interface{}) (zero interface{}) { - v := reflect.ValueOf(value) - t := v.Type() - switch t.Kind() { - case reflect.Map: - mv := reflect.MakeMap(t) - zero = mv.Interface() - case reflect.Ptr: - pv := reflect.New(v.Type().Elem()) - zero = pv.Interface() - case reflect.Slice, reflect.Int: - zero = reflect.New(t).Interface() - default: - panic("unsupported doc type") - } - return zero -} - -func testUnmarshal(c *C, data string, obj interface{}) { - zero := makeZeroDoc(obj) - err := bson.Unmarshal([]byte(data), zero) - c.Assert(err, IsNil) - c.Assert(zero, DeepEquals, obj) -} - -type testItemType struct { - obj interface{} - data string -} - -// -------------------------------------------------------------------------- -// Samples from bsonspec.org: - -var sampleItems = []testItemType{ - {bson.M{"hello": "world"}, - "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, - {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}}, - "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + - "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, -} - -func (s *S) TestMarshalSampleItems(c *C) { - for i, item := range sampleItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalSampleItems(c *C) { - for i, item := range sampleItems { - value := bson.M{} - err := bson.Unmarshal([]byte(item.data), value) - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i)) - } -} - -// -------------------------------------------------------------------------- -// Every type, ordered by the type flag. These are not wrapped with the -// length and last \x00 from the document. wrapInDoc() computes them. -// Note that all of them should be supported as two-way conversions. - -var allItems = []testItemType{ - {bson.M{}, - ""}, - {bson.M{"_": float64(5.05)}, - "\x01_\x00333333\x14@"}, - {bson.M{"_": "yo"}, - "\x02_\x00\x03\x00\x00\x00yo\x00"}, - {bson.M{"_": bson.M{"a": true}}, - "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"}, - {bson.M{"_": []interface{}{true, false}}, - "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - {bson.M{"_": []byte("yo")}, - "\x05_\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, - "\x05_\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. - "\x06_\x00"}, - {bson.M{"_": bson.ObjectId("0123456789ab")}, - "\x07_\x000123456789ab"}, - {bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}}, - "\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"}, - {bson.M{"_": false}, - "\x08_\x00\x00"}, - {bson.M{"_": true}, - "\x08_\x00\x01"}, - {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. - "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": nil}, - "\x0A_\x00"}, - {bson.M{"_": bson.RegEx{"ab", "cd"}}, - "\x0B_\x00ab\x00cd\x00"}, - {bson.M{"_": bson.JavaScript{"code", nil}}, - "\x0D_\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"_": bson.Symbol("sym")}, - "\x0E_\x00\x04\x00\x00\x00sym\x00"}, - {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, - "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + - "\x07\x00\x00\x00\x0A\x00\x00"}, - {bson.M{"_": 258}, - "\x10_\x00\x02\x01\x00\x00"}, - {bson.M{"_": bson.MongoTimestamp(258)}, - "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": int64(258)}, - "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": int64(258 << 32)}, - "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, - {bson.M{"_": bson.MaxKey}, - "\x7F_\x00"}, - {bson.M{"_": bson.MinKey}, - "\xFF_\x00"}, -} - -func (s *S) TestMarshalAllItems(c *C) { - for i, item := range allItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalAllItems(c *C) { - for i, item := range allItems { - value := bson.M{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value) - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawAllItems(c *C) { - for i, item := range allItems { - if len(item.data) == 0 { - continue - } - value := item.obj.(bson.M)["_"] - if value == nil { - continue - } - pv := reflect.New(reflect.ValueOf(value).Type()) - raw := bson.Raw{item.data[0], []byte(item.data[3:])} - c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) - err := raw.Unmarshal(pv.Interface()) - c.Assert(err, IsNil) - c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawIncompatible(c *C) { - raw := bson.Raw{0x08, []byte{0x01}} // true - err := raw.Unmarshal(&struct{}{}) - c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") -} - -func (s *S) TestUnmarshalZeroesStruct(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - type T struct{ A, B int } - v := T{A: 1} - err = bson.Unmarshal(data, &v) - c.Assert(err, IsNil) - c.Assert(v.A, Equals, 0) - c.Assert(v.B, Equals, 2) -} - -func (s *S) TestUnmarshalZeroesMap(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - m := bson.M{"a": 1} - err = bson.Unmarshal(data, &m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, bson.M{"b": 2}) -} - -func (s *S) TestUnmarshalNonNilInterface(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - m := bson.M{"a": 1} - var i interface{} - i = m - err = bson.Unmarshal(data, &i) - c.Assert(err, IsNil) - c.Assert(i, DeepEquals, bson.M{"b": 2}) - c.Assert(m, DeepEquals, bson.M{"a": 1}) -} - -// -------------------------------------------------------------------------- -// Some one way marshaling operations which would unmarshal differently. - -var oneWayMarshalItems = []testItemType{ - // These are being passed as pointers, and will unmarshal as values. - {bson.M{"": &bson.Binary{0x02, []byte("old")}}, - "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, - "\x05\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"": &bson.RegEx{"ab", "cd"}}, - "\x0B\x00ab\x00cd\x00"}, - {bson.M{"": &bson.JavaScript{"code", nil}}, - "\x0D\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, - "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + - "\x07\x00\x00\x00\x0A\x00\x00"}, - - // There's no float32 type in BSON. Will encode as a float64. - {bson.M{"": float32(5.05)}, - "\x01\x00\x00\x00\x00@33\x14@"}, - - // The array will be unmarshaled as a slice instead. - {bson.M{"": [2]bool{true, false}}, - "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - - // The typed slice will be unmarshaled as []interface{}. - {bson.M{"": []bool{true, false}}, - "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - - // Will unmarshal as a []byte. - {bson.M{"": bson.Binary{0x00, []byte("yo")}}, - "\x05\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"": bson.Binary{0x02, []byte("old")}}, - "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - - // No way to preserve the type information here. We might encode as a zero - // value, but this would mean that pointer values in structs wouldn't be - // able to correctly distinguish between unset and set to the zero value. - {bson.M{"": (*byte)(nil)}, - "\x0A\x00"}, - - // No int types smaller than int32 in BSON. Could encode this as a char, - // but it would still be ambiguous, take more, and be awkward in Go when - // loaded without typing information. - {bson.M{"": byte(8)}, - "\x10\x00\x08\x00\x00\x00"}, - - // There are no unsigned types in BSON. Will unmarshal as int32 or int64. - {bson.M{"": uint32(258)}, - "\x10\x00\x02\x01\x00\x00"}, - {bson.M{"": uint64(258)}, - "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"": uint64(258 << 32)}, - "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, - - // This will unmarshal as int. - {bson.M{"": int32(258)}, - "\x10\x00\x02\x01\x00\x00"}, - - // That's a special case. The unsigned value is too large for an int32, - // so an int64 is used instead. - {bson.M{"": uint32(1<<32 - 1)}, - "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, - {bson.M{"": uint(1<<32 - 1)}, - "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, -} - -func (s *S) TestOneWayMarshalItems(c *C) { - for i, item := range oneWayMarshalItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item %d", i)) - } -} - -// -------------------------------------------------------------------------- -// Two-way tests for user-defined structures using the samples -// from bsonspec.org. - -type specSample1 struct { - Hello string -} - -type specSample2 struct { - BSON []interface{} "BSON" -} - -var structSampleItems = []testItemType{ - {&specSample1{"world"}, - "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, - {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}}, - "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + - "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, -} - -func (s *S) TestMarshalStructSampleItems(c *C) { - for i, item := range structSampleItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data, - Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalStructSampleItems(c *C) { - for _, item := range structSampleItems { - testUnmarshal(c, item.data, item.obj) - } -} - -func (s *S) Test64bitInt(c *C) { - var i int64 = (1 << 31) - if int(i) > 0 { - data, err := bson.Marshal(bson.M{"i": int(i)}) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00")) - - var result struct{ I int } - err = bson.Unmarshal(data, &result) - c.Assert(err, IsNil) - c.Assert(int64(result.I), Equals, i) - } -} - -// -------------------------------------------------------------------------- -// Generic two-way struct marshaling tests. - -var bytevar = byte(8) -var byteptr = &bytevar - -var structItems = []testItemType{ - {&struct{ Ptr *byte }{nil}, - "\x0Aptr\x00"}, - {&struct{ Ptr *byte }{&bytevar}, - "\x10ptr\x00\x08\x00\x00\x00"}, - {&struct{ Ptr **byte }{&byteptr}, - "\x10ptr\x00\x08\x00\x00\x00"}, - {&struct{ Byte byte }{8}, - "\x10byte\x00\x08\x00\x00\x00"}, - {&struct{ Byte byte }{0}, - "\x10byte\x00\x00\x00\x00\x00"}, - {&struct { - V byte "Tag" - }{8}, - "\x10Tag\x00\x08\x00\x00\x00"}, - {&struct { - V *struct { - Byte byte - } - }{&struct{ Byte byte }{8}}, - "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ priv byte }{}, ""}, - - // The order of the dumped fields should be the same in the struct. - {&struct{ A, C, B, D, F, E *byte }{}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, - - {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, - "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, - "\x10v\x00" + "\x00\x00\x00\x00"}, - - // Byte arrays. - {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, - "\x05v\x00\x02\x00\x00\x00\x00yo"}, -} - -func (s *S) TestMarshalStructItems(c *C) { - for i, item := range structItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalStructItems(c *C) { - for _, item := range structItems { - testUnmarshal(c, wrapInDoc(item.data), item.obj) - } -} - -func (s *S) TestUnmarshalRawStructItems(c *C) { - for i, item := range structItems { - raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} - zero := makeZeroDoc(item.obj) - err := raw.Unmarshal(zero) - c.Assert(err, IsNil) - c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawNil(c *C) { - // Regression test: shouldn't try to nil out the pointer itself, - // as it's not settable. - raw := bson.Raw{0x0A, []byte{}} - err := raw.Unmarshal(&struct{}{}) - c.Assert(err, IsNil) -} - -// -------------------------------------------------------------------------- -// One-way marshaling tests. - -type dOnIface struct { - D interface{} -} - -type ignoreField struct { - Before string - Ignore string `bson:"-"` - After string -} - -var marshalItems = []testItemType{ - // Ordered document dump. Will unmarshal as a dictionary by default. - {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - - {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, - "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, - "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, - - {&ignoreField{"before", "ignore", "after"}, - "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, - - // Marshalling a Raw document does nothing. - {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, - "anything"}, - {bson.Raw{Data: []byte(wrapInDoc("anything"))}, - "anything"}, -} - -func (s *S) TestMarshalOneWayItems(c *C) { - for _, item := range marshalItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data)) - } -} - -// -------------------------------------------------------------------------- -// One-way unmarshaling tests. - -var unmarshalItems = []testItemType{ - // Field is private. Should not attempt to unmarshal it. - {&struct{ priv byte }{}, - "\x10priv\x00\x08\x00\x00\x00"}, - - // Wrong casing. Field names are lowercased. - {&struct{ Byte byte }{}, - "\x10Byte\x00\x08\x00\x00\x00"}, - - // Ignore non-existing field. - {&struct{ Byte byte }{9}, - "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"}, - - // Do not unmarshal on ignored field. - {&ignoreField{"before", "", "after"}, - "\x02before\x00\a\x00\x00\x00before\x00" + - "\x02-\x00\a\x00\x00\x00ignore\x00" + - "\x02after\x00\x06\x00\x00\x00after\x00"}, - - // Ignore unsuitable types silently. - {map[string]string{"str": "s"}, - "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"}, - {map[string][]int{"array": []int{5, 9}}, - "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")}, - - // Wrong type. Shouldn't init pointer. - {&struct{ Str *byte }{}, - "\x02str\x00\x02\x00\x00\x00s\x00"}, - {&struct{ Str *struct{ Str string } }{}, - "\x02str\x00\x02\x00\x00\x00s\x00"}, - - // Ordered document. - {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - - // Raw document. - {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, - "\x10byte\x00\x08\x00\x00\x00"}, - - // RawD document. - {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, - "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, - - // Decode old binary. - {bson.M{"_": []byte("old")}, - "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - - // Decode old binary without length. According to the spec, this shouldn't happen. - {bson.M{"_": []byte("old")}, - "\x05_\x00\x03\x00\x00\x00\x02old"}, -} - -func (s *S) TestUnmarshalOneWayItems(c *C) { - for _, item := range unmarshalItems { - testUnmarshal(c, wrapInDoc(item.data), item.obj) - } -} - -func (s *S) TestUnmarshalNilInStruct(c *C) { - // Nil is the default value, so we need to ensure it's indeed being set. - b := byte(1) - v := &struct{ Ptr *byte }{&b} - err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil}) -} - -// -------------------------------------------------------------------------- -// Marshalling error cases. - -type structWithDupKeys struct { - Name byte - Other byte "name" // Tag should precede. -} - -var marshalErrorItems = []testItemType{ - {bson.M{"": uint64(1 << 63)}, - "BSON has no uint64 type, and value is too large to fit correctly in an int64"}, - {bson.M{"": bson.ObjectId("tooshort")}, - "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"}, - {int64(123), - "Can't marshal int64 as a BSON document"}, - {bson.M{"": 1i}, - "Can't marshal complex128 in a BSON document"}, - {&structWithDupKeys{}, - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - {bson.Raw{0x0A, []byte{}}, - "Attempted to unmarshal Raw kind 10 as a document"}, - {&inlineCantPtr{&struct{ A, B int }{1, 2}}, - "Option ,inline needs a struct value or map field"}, - {&inlineDupName{1, struct{ A, B int }{2, 3}}, - "Duplicated key 'a' in struct bson_test.inlineDupName"}, - {&inlineDupMap{}, - "Multiple ,inline maps in struct bson_test.inlineDupMap"}, - {&inlineBadKeyMap{}, - "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"}, - {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}}, - `Can't have key "a" in inlined map; conflicts with struct field`}, -} - -func (s *S) TestMarshalErrorItems(c *C) { - for _, item := range marshalErrorItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, ErrorMatches, item.data) - c.Assert(data, IsNil) - } -} - -// -------------------------------------------------------------------------- -// Unmarshalling error cases. - -type unmarshalErrorType struct { - obj interface{} - data string - error string -} - -var unmarshalErrorItems = []unmarshalErrorType{ - // Tag name conflicts with existing parameter. - {&structWithDupKeys{}, - "\x10name\x00\x08\x00\x00\x00", - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - - // Non-string map key. - {map[int]interface{}{}, - "\x10name\x00\x08\x00\x00\x00", - "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"}, - - {nil, - "\xEEname\x00", - "Unknown element kind \\(0xEE\\)"}, - - {struct{ Name bool }{}, - "\x10name\x00\x08\x00\x00\x00", - "Unmarshal can't deal with struct values. Use a pointer."}, - - {123, - "\x10name\x00\x08\x00\x00\x00", - "Unmarshal needs a map or a pointer to a struct."}, -} - -func (s *S) TestUnmarshalErrorItems(c *C) { - for _, item := range unmarshalErrorItems { - data := []byte(wrapInDoc(item.data)) - var value interface{} - switch reflect.ValueOf(item.obj).Kind() { - case reflect.Map, reflect.Ptr: - value = makeZeroDoc(item.obj) - case reflect.Invalid: - value = bson.M{} - default: - value = item.obj - } - err := bson.Unmarshal(data, value) - c.Assert(err, ErrorMatches, item.error) - } -} - -type unmarshalRawErrorType struct { - obj interface{} - raw bson.Raw - error string -} - -var unmarshalRawErrorItems = []unmarshalRawErrorType{ - // Tag name conflicts with existing parameter. - {&structWithDupKeys{}, - bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - - {&struct{}{}, - bson.Raw{0xEE, []byte{}}, - "Unknown element kind \\(0xEE\\)"}, - - {struct{ Name bool }{}, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal can't deal with struct values. Use a pointer."}, - - {123, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal needs a map or a valid pointer."}, -} - -func (s *S) TestUnmarshalRawErrorItems(c *C) { - for i, item := range unmarshalRawErrorItems { - err := item.raw.Unmarshal(item.obj) - c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item)) - } -} - -var corruptedData = []string{ - "\x04\x00\x00\x00\x00", // Shorter than minimum - "\x06\x00\x00\x00\x00", // Not enough data - "\x05\x00\x00", // Broken length - "\x05\x00\x00\x00\xff", // Corrupted termination - "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string - - // Array end past end of string (s[2]=0x07 is correct) - wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"), - - // Array end within string, but past acceptable. - wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"), - - // Document end within string, but past acceptable. - wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"), - - // String with corrupted end. - wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), -} - -func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { - for _, data := range corruptedData { - err := bson.Unmarshal([]byte(data), bson.M{}) - c.Assert(err, ErrorMatches, "Document is corrupted") - - err = bson.Unmarshal([]byte(data), &struct{}{}) - c.Assert(err, ErrorMatches, "Document is corrupted") - } -} - -// -------------------------------------------------------------------------- -// Setter test cases. - -var setterResult = map[string]error{} - -type setterType struct { - received interface{} -} - -func (o *setterType) SetBSON(raw bson.Raw) error { - err := raw.Unmarshal(&o.received) - if err != nil { - panic("The panic:" + err.Error()) - } - if s, ok := o.received.(string); ok { - if result, ok := setterResult[s]; ok { - return result - } - } - return nil -} - -type ptrSetterDoc struct { - Field *setterType "_" -} - -type valSetterDoc struct { - Field setterType "_" -} - -func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { - for _, item := range allItems { - for i := 0; i != 2; i++ { - var field *setterType - if i == 0 { - obj := &ptrSetterDoc{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) - c.Assert(err, IsNil) - field = obj.Field - } else { - obj := &valSetterDoc{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) - c.Assert(err, IsNil) - field = &obj.Field - } - if item.data == "" { - // Nothing to unmarshal. Should be untouched. - if i == 0 { - c.Assert(field, IsNil) - } else { - c.Assert(field.received, IsNil) - } - } else { - expected := item.obj.(bson.M)["_"] - c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected)) - c.Assert(field.received, DeepEquals, expected) - } - } - } -} - -func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { - obj := &setterType{} - err := bson.Unmarshal([]byte(sampleItems[0].data), obj) - c.Assert(err, IsNil) - c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"}) -} - -func (s *S) TestUnmarshalSetterOmits(c *C) { - setterResult["2"] = &bson.TypeError{} - setterResult["4"] = &bson.TypeError{} - defer func() { - delete(setterResult, "2") - delete(setterResult, "4") - }() - - m := map[string]*setterType{} - data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + - "\x02def\x00\x02\x00\x00\x002\x00" + - "\x02ghi\x00\x02\x00\x00\x003\x00" + - "\x02jkl\x00\x02\x00\x00\x004\x00") - err := bson.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], NotNil) - c.Assert(m["jkl"], IsNil) - - c.Assert(m["abc"].received, Equals, "1") - c.Assert(m["ghi"].received, Equals, "3") -} - -func (s *S) TestUnmarshalSetterErrors(c *C) { - boom := errors.New("BOOM") - setterResult["2"] = boom - defer delete(setterResult, "2") - - m := map[string]*setterType{} - data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + - "\x02def\x00\x02\x00\x00\x002\x00" + - "\x02ghi\x00\x02\x00\x00\x003\x00") - err := bson.Unmarshal([]byte(data), m) - c.Assert(err, Equals, boom) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], IsNil) - - c.Assert(m["abc"].received, Equals, "1") -} - -func (s *S) TestDMap(c *C) { - d := bson.D{{"a", 1}, {"b", 2}} - c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) -} - -func (s *S) TestUnmarshalSetterSetZero(c *C) { - setterResult["foo"] = bson.SetZero - defer delete(setterResult, "field") - - data, err := bson.Marshal(bson.M{"field": "foo"}) - c.Assert(err, IsNil) - - m := map[string]*setterType{} - err = bson.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - - value, ok := m["field"] - c.Assert(ok, Equals, true) - c.Assert(value, IsNil) -} - -// -------------------------------------------------------------------------- -// Getter test cases. - -type typeWithGetter struct { - result interface{} - err error -} - -func (t *typeWithGetter) GetBSON() (interface{}, error) { - if t == nil { - return "", nil - } - return t.result, t.err -} - -type docWithGetterField struct { - Field *typeWithGetter "_" -} - -func (s *S) TestMarshalAllItemsWithGetter(c *C) { - for i, item := range allItems { - if item.data == "" { - continue - } - obj := &docWithGetterField{} - obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item #%d", i)) - } -} - -func (s *S) TestMarshalWholeDocumentWithGetter(c *C) { - obj := &typeWithGetter{result: sampleItems[0].obj} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, sampleItems[0].data) -} - -func (s *S) TestGetterErrors(c *C) { - e := errors.New("oops") - - obj1 := &docWithGetterField{} - obj1.Field = &typeWithGetter{sampleItems[0].obj, e} - data, err := bson.Marshal(obj1) - c.Assert(err, ErrorMatches, "oops") - c.Assert(data, IsNil) - - obj2 := &typeWithGetter{sampleItems[0].obj, e} - data, err = bson.Marshal(obj2) - c.Assert(err, ErrorMatches, "oops") - c.Assert(data, IsNil) -} - -type intGetter int64 - -func (t intGetter) GetBSON() (interface{}, error) { - return int64(t), nil -} - -type typeWithIntGetter struct { - V intGetter ",minsize" -} - -func (s *S) TestMarshalShortWithGetter(c *C) { - obj := typeWithIntGetter{42} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - m := bson.M{} - err = bson.Unmarshal(data, m) - c.Assert(err, IsNil) - c.Assert(m["v"], Equals, 42) -} - -func (s *S) TestMarshalWithGetterNil(c *C) { - obj := docWithGetterField{} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - m := bson.M{} - err = bson.Unmarshal(data, m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, bson.M{"_": ""}) -} - -// -------------------------------------------------------------------------- -// Cross-type conversion tests. - -type crossTypeItem struct { - obj1 interface{} - obj2 interface{} -} - -type condStr struct { - V string ",omitempty" -} -type condStrNS struct { - V string `a:"A" bson:",omitempty" b:"B"` -} -type condBool struct { - V bool ",omitempty" -} -type condInt struct { - V int ",omitempty" -} -type condUInt struct { - V uint ",omitempty" -} -type condFloat struct { - V float64 ",omitempty" -} -type condIface struct { - V interface{} ",omitempty" -} -type condPtr struct { - V *bool ",omitempty" -} -type condSlice struct { - V []string ",omitempty" -} -type condMap struct { - V map[string]int ",omitempty" -} -type namedCondStr struct { - V string "myv,omitempty" -} -type condTime struct { - V time.Time ",omitempty" -} -type condStruct struct { - V struct{ A []int } ",omitempty" -} - -type shortInt struct { - V int64 ",minsize" -} -type shortUint struct { - V uint64 ",minsize" -} -type shortIface struct { - V interface{} ",minsize" -} -type shortPtr struct { - V *int64 ",minsize" -} -type shortNonEmptyInt struct { - V int64 ",minsize,omitempty" -} - -type inlineInt struct { - V struct{ A, B int } ",inline" -} -type inlineCantPtr struct { - V *struct{ A, B int } ",inline" -} -type inlineDupName struct { - A int - V struct{ A, B int } ",inline" -} -type inlineMap struct { - A int - M map[string]interface{} ",inline" -} -type inlineMapInt struct { - A int - M map[string]int ",inline" -} -type inlineMapMyM struct { - A int - M MyM ",inline" -} -type inlineDupMap struct { - M1 map[string]interface{} ",inline" - M2 map[string]interface{} ",inline" -} -type inlineBadKeyMap struct { - M map[int]int ",inline" -} - -type getterSetterD bson.D - -func (s getterSetterD) GetBSON() (interface{}, error) { - if len(s) == 0 { - return bson.D{}, nil - } - return bson.D(s[:len(s)-1]), nil -} - -func (s *getterSetterD) SetBSON(raw bson.Raw) error { - var doc bson.D - err := raw.Unmarshal(&doc) - doc = append(doc, bson.DocElem{"suffix", true}) - *s = getterSetterD(doc) - return err -} - -type getterSetterInt int - -func (i getterSetterInt) GetBSON() (interface{}, error) { - return bson.D{{"a", int(i)}}, nil -} - -func (i *getterSetterInt) SetBSON(raw bson.Raw) error { - var doc struct{ A int } - err := raw.Unmarshal(&doc) - *i = getterSetterInt(doc.A) - return err -} - -type ( - MyString string - MyBytes []byte - MyBool bool - MyD []bson.DocElem - MyRawD []bson.RawDocElem - MyM map[string]interface{} -) - -var ( - truevar = true - falsevar = false - - int64var = int64(42) - int64ptr = &int64var - intvar = int(42) - intptr = &intvar - - gsintvar = getterSetterInt(42) -) - -func parseURL(s string) *url.URL { - u, err := url.Parse(s) - if err != nil { - panic(err) - } - return u -} - -// That's a pretty fun test. It will dump the first item, generate a zero -// value equivalent to the second one, load the dumped data onto it, and then -// verify that the resulting value is deep-equal to the untouched second value. -// Then, it will do the same in the *opposite* direction! -var twoWayCrossItems = []crossTypeItem{ - // int<=>int - {&struct{ I int }{42}, &struct{ I int8 }{42}}, - {&struct{ I int }{42}, &struct{ I int32 }{42}}, - {&struct{ I int }{42}, &struct{ I int64 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, - - // uint<=>uint - {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, - - // float32<=>float64 - {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, - - // int<=>uint - {&struct{ I uint }{42}, &struct{ I int }{42}}, - {&struct{ I uint }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, - - // int <=> float - {&struct{ I int }{42}, &struct{ I float64 }{42}}, - - // int <=> bool - {&struct{ I int }{1}, &struct{ I bool }{true}}, - {&struct{ I int }{0}, &struct{ I bool }{false}}, - - // uint <=> float64 - {&struct{ I uint }{42}, &struct{ I float64 }{42}}, - - // uint <=> bool - {&struct{ I uint }{1}, &struct{ I bool }{true}}, - {&struct{ I uint }{0}, &struct{ I bool }{false}}, - - // float64 <=> bool - {&struct{ I float64 }{1}, &struct{ I bool }{true}}, - {&struct{ I float64 }{0}, &struct{ I bool }{false}}, - - // string <=> string and string <=> []byte - {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, - {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, - {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, - - // map <=> struct - {&struct { - A struct { - B, C int - } - }{struct{ B, C int }{1, 2}}, - map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, - - {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, - {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, - {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, - {&struct{ A uint }{42}, map[string]int{"a": 42}}, - {&struct{ A uint }{42}, map[string]float64{"a": 42}}, - {&struct{ A uint }{1}, map[string]bool{"a": true}}, - {&struct{ A int }{42}, map[string]uint{"a": 42}}, - {&struct{ A int }{42}, map[string]float64{"a": 42}}, - {&struct{ A int }{1}, map[string]bool{"a": true}}, - {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, - {&struct{ A float64 }{42}, map[string]int{"a": 42}}, - {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, - {&struct{ A float64 }{1}, map[string]bool{"a": true}}, - {&struct{ A bool }{true}, map[string]int{"a": 1}}, - {&struct{ A bool }{true}, map[string]uint{"a": 1}}, - {&struct{ A bool }{true}, map[string]float64{"a": 1}}, - {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, - - // url.URL <=> string - {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - - // Slices - {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - - // Conditionals - {&condBool{true}, map[string]bool{"v": true}}, - {&condBool{}, map[string]bool{}}, - {&condInt{1}, map[string]int{"v": 1}}, - {&condInt{}, map[string]int{}}, - {&condUInt{1}, map[string]uint{"v": 1}}, - {&condUInt{}, map[string]uint{}}, - {&condFloat{}, map[string]int{}}, - {&condStr{"yo"}, map[string]string{"v": "yo"}}, - {&condStr{}, map[string]string{}}, - {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, - {&condStrNS{}, map[string]string{}}, - {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, - {&condSlice{}, map[string][]string{}}, - {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, - {&condMap{}, map[string][]string{}}, - {&condIface{"yo"}, map[string]string{"v": "yo"}}, - {&condIface{""}, map[string]string{"v": ""}}, - {&condIface{}, map[string]string{}}, - {&condPtr{&truevar}, map[string]bool{"v": true}}, - {&condPtr{&falsevar}, map[string]bool{"v": false}}, - {&condPtr{}, map[string]string{}}, - - {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, - {&condTime{}, map[string]string{}}, - - {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, - {&condStruct{struct{ A []int }{}}, bson.M{}}, - - {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, - {&namedCondStr{}, map[string]string{}}, - - {&shortInt{1}, map[string]interface{}{"v": 1}}, - {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, - - {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, - {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortNonEmptyInt{}, map[string]interface{}{}}, - - {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, - {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, - {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, - {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, - - // []byte <=> MyBytes - {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, - {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, - {&struct{ B MyBytes }{}, map[string]bool{}}, - {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, - - // bool <=> MyBool - {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, - {&struct{ B MyBool }{}, map[string]bool{"b": false}}, - {&struct{ B MyBool }{}, map[string]string{}}, - {&struct{ B bool }{}, map[string]MyBool{"b": false}}, - - // arrays - {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, - - // zero time - {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, - - // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds - {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, - map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, - - // bson.D <=> []DocElem - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, - {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, - - // bson.RawD <=> []RawDocElem - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - - // bson.M <=> map - {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, - - // bson.M <=> map[MyString] - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, - - // json.Number <=> int64, float64 - {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, - {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, - {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, - - // bson.D <=> non-struct getter/setter - {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, - {&bson.D{{"a", 42}}, &gsintvar}, -} - -// Same thing, but only one way (obj1 => obj2). -var oneWayCrossItems = []crossTypeItem{ - // map <=> struct - {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}}, - - // inline map elides badly typed values - {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}}, - - // Can't decode int into struct. - {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}}, - - // Would get decoded into a int32 too in the opposite direction. - {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, - - // Ensure omitempty on struct with private fields works properly. - {&struct { - V struct{ v time.Time } ",omitempty" - }{}, map[string]interface{}{}}, -} - -func testCrossPair(c *C, dump interface{}, load interface{}) { - c.Logf("Dump: %#v", dump) - c.Logf("Load: %#v", load) - zero := makeZeroDoc(load) - data, err := bson.Marshal(dump) - c.Assert(err, IsNil) - c.Logf("Dumped: %#v", string(data)) - err = bson.Unmarshal(data, zero) - c.Assert(err, IsNil) - c.Logf("Loaded: %#v", zero) - c.Assert(zero, DeepEquals, load) -} - -func (s *S) TestTwoWayCrossPairs(c *C) { - for _, item := range twoWayCrossItems { - testCrossPair(c, item.obj1, item.obj2) - testCrossPair(c, item.obj2, item.obj1) - } -} - -func (s *S) TestOneWayCrossPairs(c *C) { - for _, item := range oneWayCrossItems { - testCrossPair(c, item.obj1, item.obj2) - } -} - -// -------------------------------------------------------------------------- -// ObjectId hex representation test. - -func (s *S) TestObjectIdHex(c *C) { - id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") - c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`) - c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9") -} - -func (s *S) TestIsObjectIdHex(c *C) { - test := []struct { - id string - valid bool - }{ - {"4d88e15b60f486e428412dc9", true}, - {"4d88e15b60f486e428412dc", false}, - {"4d88e15b60f486e428412dc9e", false}, - {"4d88e15b60f486e428412dcx", false}, - } - for _, t := range test { - c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid) - } -} - -// -------------------------------------------------------------------------- -// ObjectId parts extraction tests. - -type objectIdParts struct { - id bson.ObjectId - timestamp int64 - machine []byte - pid uint16 - counter int32 -} - -var objectIds = []objectIdParts{ - objectIdParts{ - bson.ObjectIdHex("4d88e15b60f486e428412dc9"), - 1300816219, - []byte{0x60, 0xf4, 0x86}, - 0xe428, - 4271561, - }, - objectIdParts{ - bson.ObjectIdHex("000000000000000000000000"), - 0, - []byte{0x00, 0x00, 0x00}, - 0x0000, - 0, - }, - objectIdParts{ - bson.ObjectIdHex("00000000aabbccddee000001"), - 0, - []byte{0xaa, 0xbb, 0xcc}, - 0xddee, - 1, - }, -} - -func (s *S) TestObjectIdPartsExtraction(c *C) { - for i, v := range objectIds { - t := time.Unix(v.timestamp, 0) - c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i)) - c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i)) - c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i)) - c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i)) - } -} - -func (s *S) TestNow(c *C) { - before := time.Now() - time.Sleep(1e6) - now := bson.Now() - time.Sleep(1e6) - after := time.Now() - c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after)) -} - -// -------------------------------------------------------------------------- -// ObjectId generation tests. - -func (s *S) TestNewObjectId(c *C) { - // Generate 10 ids - ids := make([]bson.ObjectId, 10) - for i := 0; i < 10; i++ { - ids[i] = bson.NewObjectId() - } - for i := 1; i < 10; i++ { - prevId := ids[i-1] - id := ids[i] - // Test for uniqueness among all other 9 generated ids - for j, tid := range ids { - if j != i { - c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) - } - } - // Check that timestamp was incremented and is within 30 seconds of the previous one - secs := id.Time().Sub(prevId.Time()).Seconds() - c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) - // Check that machine ids are the same - c.Assert(id.Machine(), DeepEquals, prevId.Machine()) - // Check that pids are the same - c.Assert(id.Pid(), Equals, prevId.Pid()) - // Test for proper increment - delta := int(id.Counter() - prevId.Counter()) - c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) - } -} - -func (s *S) TestNewObjectIdWithTime(c *C) { - t := time.Unix(12345678, 0) - id := bson.NewObjectIdWithTime(t) - c.Assert(id.Time(), Equals, t) - c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00}) - c.Assert(int(id.Pid()), Equals, 0) - c.Assert(int(id.Counter()), Equals, 0) -} - -// -------------------------------------------------------------------------- -// ObjectId JSON marshalling. - -type jsonType struct { - Id bson.ObjectId -} - -var jsonIdTests = []struct { - value jsonType - json string - marshal bool - unmarshal bool - error string -}{{ - value: jsonType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")}, - json: `{"Id":"4d88e15b60f486e428412dc9"}`, - marshal: true, - unmarshal: true, -}, { - value: jsonType{}, - json: `{"Id":""}`, - marshal: true, - unmarshal: true, -}, { - value: jsonType{}, - json: `{"Id":null}`, - marshal: false, - unmarshal: true, -}, { - json: `{"Id":"4d88e15b60f486e428412dc9A"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, - marshal: false, - unmarshal: true, -}, { - json: `{"Id":"4d88e15b60f486e428412dcZ"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, - marshal: false, - unmarshal: true, -}} - -func (s *S) TestObjectIdJSONMarshaling(c *C) { - for _, test := range jsonIdTests { - if test.marshal { - data, err := json.Marshal(&test.value) - if test.error == "" { - c.Assert(err, IsNil) - c.Assert(string(data), Equals, test.json) - } else { - c.Assert(err, ErrorMatches, test.error) - } - } - - if test.unmarshal { - var value jsonType - err := json.Unmarshal([]byte(test.json), &value) - if test.error == "" { - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, test.value) - } else { - c.Assert(err, ErrorMatches, test.error) - } - } - } -} - -// -------------------------------------------------------------------------- -// Some simple benchmarks. - -type BenchT struct { - A, B, C, D, E, F string -} - -type BenchRawT struct { - A string - B int - C bson.M - D []float64 -} - -func (s *S) BenchmarkUnmarhsalStruct(c *C) { - v := BenchT{A: "A", D: "D", E: "E"} - data, err := bson.Marshal(&v) - if err != nil { - panic(err) - } - c.ResetTimer() - for i := 0; i < c.N; i++ { - err = bson.Unmarshal(data, &v) - } - if err != nil { - panic(err) - } -} - -func (s *S) BenchmarkUnmarhsalMap(c *C) { - m := bson.M{"a": "a", "d": "d", "e": "e"} - data, err := bson.Marshal(&m) - if err != nil { - panic(err) - } - c.ResetTimer() - for i := 0; i < c.N; i++ { - err = bson.Unmarshal(data, &m) - } - if err != nil { - panic(err) - } -} - -func (s *S) BenchmarkUnmarshalRaw(c *C) { - var err error - m := BenchRawT{ - A: "test_string", - B: 123, - C: bson.M{ - "subdoc_int": 12312, - "subdoc_doc": bson.M{"1": 1}, - }, - D: []float64{0.0, 1.3333, -99.9997, 3.1415}, - } - data, err := bson.Marshal(&m) - if err != nil { - panic(err) - } - raw := bson.Raw{} - c.ResetTimer() - for i := 0; i < c.N; i++ { - err = bson.Unmarshal(data, &raw) - } - if err != nil { - panic(err) - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go index 782e9338..0ee8d22d 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -325,6 +325,10 @@ func (d *decoder) readArrayDocTo(out reflect.Value) { func (d *decoder) readSliceDoc(t reflect.Type) interface{} { tmp := make([]reflect.Value, 0, 8) elemType := t.Elem() + if elemType == typeRawDocElem { + d.dropElem(0x04) + return reflect.Zero(t).Interface() + } end := int(d.readInt32()) end += d.i - 4 @@ -437,7 +441,7 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { start := d.i - if kind == '\x03' { + if kind == 0x03 { // Delegate unmarshaling of documents. outt := out.Type() outk := out.Kind() @@ -474,6 +478,11 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { panic("Can't happen. Handled above.") case 0x04: // Array outt := out.Type() + if setterStyle(outt) != setterNone { + // Skip the value so its data is handed to the setter below. + d.dropElem(kind) + break + } for outt.Kind() == reflect.Ptr { outt = outt.Elem() } @@ -718,6 +727,12 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { out.Set(reflect.ValueOf(u).Elem()) return true } + if outt == typeBinary { + if b, ok := in.([]byte); ok { + out.Set(reflect.ValueOf(Binary{Data: b})) + return true + } + } } return false @@ -771,10 +786,14 @@ func (d *decoder) readCStr() string { } func (d *decoder) readBool() bool { - if d.readByte() == 1 { + b := d.readByte() + if b == 0 { + return false + } + if b == 1 { return true } - return false + panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) } func (d *decoder) readFloat64() float64 { @@ -811,9 +830,12 @@ func (d *decoder) readByte() byte { } func (d *decoder) readBytes(length int32) []byte { + if length < 0 { + corrupted() + } start := d.i d.i += int(length) - if d.i > len(d.in) { + if d.i < start || d.i > len(d.in) { corrupted() } return d.in[start : start+int(length)] diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go index 81a13add..36eb29ce 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -101,7 +101,10 @@ func (e *encoder) addDoc(v reflect.Value) { if v.Type() == typeRaw { raw := v.Interface().(Raw) if raw.Kind != 0x03 && raw.Kind != 0x00 { - panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + } + if len(raw.Data) == 0 { + panic("Attempted to marshal empty Raw document") } e.addBytes(raw.Data...) return @@ -212,7 +215,7 @@ func (e *encoder) addSlice(v reflect.Value) { return } l := v.Len() - et := v.Type().Elem() + et := v.Type().Elem() if et == typeDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(DocElem) @@ -365,7 +368,17 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { et := v.Type().Elem() if et.Kind() == reflect.Uint8 { e.addElemName('\x05', name) - e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + if v.CanAddr() { + e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + } else { + n := v.Len() + e.addInt32(int32(n)) + e.addBytes('\x00') + for i := 0; i < n; i++ { + el := v.Index(i) + e.addBytes(byte(el.Uint())) + } + } } else { e.addElemName('\x04', name) e.addDoc(v) @@ -379,6 +392,9 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if kind == 0x00 { kind = 0x03 } + if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { + panic("Attempted to marshal empty Raw document") + } e.addElemName(kind, name) e.addBytes(s.Data...) @@ -415,7 +431,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case time.Time: // MongoDB handles timestamps as milliseconds. e.addElemName('\x09', name) - e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6)) + e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: e.addElemName('\x02', name) diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/specdata/update.sh b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/specdata/update.sh new file mode 100644 index 00000000..1efd3d3b --- /dev/null +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson/specdata/update.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +if [ ! -d specifications ]; then + git clone -b bson git@github.com:jyemin/specifications +fi + +TESTFILE="../specdata_test.go" + +cat < $TESTFILE +package bson_test + +var specTests = []string{ +END + +for file in specifications/source/bson/tests/*.yml; do + ( + echo '`' + cat $file + echo -n '`,' + ) >> $TESTFILE +done + +echo '}' >> $TESTFILE + +gofmt -w $TESTFILE diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go index 23f45085..42e82d75 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go @@ -1,10 +1,14 @@ package mgo +import ( + "bytes" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" +) + // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // -// WARNING: This API is still experimental. -// // Relevant documentation: // // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api @@ -12,19 +16,39 @@ package mgo type Bulk struct { c *Collection ordered bool - inserts []interface{} + actions []bulkAction +} + +type bulkOp int + +const ( + bulkInsert bulkOp = iota + 1 + bulkUpdate + bulkUpdateAll + bulkRemove +) + +type bulkAction struct { + op bulkOp + docs []interface{} } +type bulkUpdateOp []interface{} +type bulkDeleteOp []interface{} + // BulkError holds an error returned from running a Bulk operation. // // TODO: This is private for the moment, until we understand exactly how // to report these multi-errors in a useful and convenient way. type bulkError struct { - err error + errs []error } // BulkResult holds the results for a bulk operation. type BulkResult struct { + Matched int + Modified int // Available only for MongoDB 2.6+ + // Be conservative while we understand exactly how to report these // results in a useful and convenient way, and also how to emulate // them with prior servers. @@ -32,13 +56,35 @@ type BulkResult struct { } func (e *bulkError) Error() string { - return e.err.Error() + if len(e.errs) == 0 { + return "invalid bulkError instance: no errors" + } + if len(e.errs) == 1 { + return e.errs[0].Error() + } + msgs := make([]string, 0, len(e.errs)) + seen := make(map[string]bool) + for _, err := range e.errs { + msg := err.Error() + if !seen[msg] { + seen[msg] = true + msgs = append(msgs, msg) + } + } + if len(msgs) == 1 { + return msgs[0] + } + var buf bytes.Buffer + buf.WriteString("multiple errors in bulk operation:\n") + for _, msg := range msgs { + buf.WriteString(" - ") + buf.WriteString(msg) + buf.WriteByte('\n') + } + return buf.String() } // Bulk returns a value to prepare the execution of a bulk operation. -// -// WARNING: This API is still experimental. -// func (c *Collection) Bulk() *Bulk { return &Bulk{c: c, ordered: true} } @@ -52,20 +98,197 @@ func (b *Bulk) Unordered() { b.ordered = false } +func (b *Bulk) action(op bulkOp) *bulkAction { + if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { + return &b.actions[len(b.actions)-1] + } + if !b.ordered { + for i := range b.actions { + if b.actions[i].op == op { + return &b.actions[i] + } + } + } + b.actions = append(b.actions, bulkAction{op: op}) + return &b.actions[len(b.actions)-1] +} + // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { - b.inserts = append(b.inserts, docs...) + action := b.action(bulkInsert) + action.docs = append(action.docs, docs...) +} + +// Remove queues up the provided selectors for removing matching documents. +// Each selector will remove only a single matching document. +func (b *Bulk) Remove(selectors ...interface{}) { + action := b.action(bulkRemove) + for _, selector := range selectors { + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &deleteOp{ + Collection: b.c.FullName, + Selector: selector, + Flags: 1, + Limit: 1, + }) + } +} + +// RemoveAll queues up the provided selectors for removing all matching documents. +// Each selector will remove all matching documents. +func (b *Bulk) RemoveAll(selectors ...interface{}) { + action := b.action(bulkRemove) + for _, selector := range selectors { + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &deleteOp{ + Collection: b.c.FullName, + Selector: selector, + Flags: 0, + Limit: 0, + }) + } +} + +// Update queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Update(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + }) + } +} + +// UpdateAll queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair updates all documents matching the selector. +func (b *Bulk) UpdateAll(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.UpdateAll requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 2, + Multi: true, + }) + } +} + +// Upsert queues up the provided pairs of upserting instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Upsert(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 1, + Upsert: true, + }) + } } // Run runs all the operations queued up. +// +// If an error is reported on an unordered bulk operation, the error value may +// be an aggregation of all issues observed. As an exception to that, Insert +// operations running on MongoDB versions prior to 2.6 will report the last +// error only due to a limitation in the wire protocol. func (b *Bulk) Run() (*BulkResult, error) { - op := &insertOp{b.c.FullName, b.inserts, 0} + var result BulkResult + var berr bulkError + var failed bool + for i := range b.actions { + action := &b.actions[i] + var ok bool + switch action.op { + case bulkInsert: + ok = b.runInsert(action, &result, &berr) + case bulkUpdate: + ok = b.runUpdate(action, &result, &berr) + case bulkRemove: + ok = b.runRemove(action, &result, &berr) + default: + panic("unknown bulk operation") + } + if !ok { + failed = true + if b.ordered { + break + } + } + } + if failed { + return nil, &berr + } + return &result, nil +} + +func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool { + op := &insertOp{b.c.FullName, action.docs, 0} if !b.ordered { op.flags = 1 // ContinueOnError } - _, err := b.c.writeQuery(op) - if err != nil { - return nil, &bulkError{err} + lerr, err := b.c.writeOp(op, b.ordered) + return b.checkSuccess(berr, lerr, err) +} + +func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { + lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) + result.Matched += lerr.N + result.Modified += lerr.modified + return b.checkSuccess(berr, lerr, err) +} + +func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *bulkError) bool { + lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) + result.Matched += lerr.N + result.Modified += lerr.modified + return b.checkSuccess(berr, lerr, err) +} + +func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { + if lerr != nil && len(lerr.errors) > 0 { + berr.errs = append(berr.errs, lerr.errors...) + return false + } else if err != nil { + berr.errs = append(berr.errs, err) + return false } - return &BulkResult{}, nil + return true } diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk_test.go deleted file mode 100644 index d0607b78..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bulk_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2014 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - . "gopkg.in/check.v1" -) - -func (s *S) TestBulkInsert(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - bulk := coll.Bulk() - bulk.Insert(M{"n": 1}) - bulk.Insert(M{"n": 2}, M{"n": 3}) - r, err := bulk.Run() - c.Assert(err, IsNil) - c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) - - type doc struct{ N int } - var res []doc - err = coll.Find(nil).Sort("n").All(&res) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) -} - -func (s *S) TestBulkInsertError(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - bulk := coll.Bulk() - bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"n": 3}) - _, err = bulk.Run() - c.Assert(err, ErrorMatches, ".*duplicate key.*") - - type doc struct { - N int `_id` - } - var res []doc - err = coll.Find(nil).Sort("_id").All(&res) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []doc{{1}, {2}}) -} - -func (s *S) TestBulkInsertErrorUnordered(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - bulk := coll.Bulk() - bulk.Unordered() - bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) - _, err = bulk.Run() - c.Assert(err, ErrorMatches, ".*duplicate key.*") - - type doc struct { - N int `_id` - } - var res []doc - err = coll.Find(nil).Sort("_id").All(&res) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster.go index eeeaadba..4ae424fa 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster.go @@ -30,6 +30,8 @@ import ( "errors" "fmt" "net" + "strconv" + "strings" "sync" "time" @@ -209,17 +211,18 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI if result.IsMaster { debugf("SYNC %s is a master.", addr) - // Made an incorrect assumption above, so fix stats. - stats.conn(-1, false) - stats.conn(+1, true) + if !server.info.Master { + // Made an incorrect assumption above, so fix stats. + stats.conn(-1, false) + stats.conn(+1, true) + } } else if result.Secondary { debugf("SYNC %s is a slave.", addr) } else if cluster.direct { logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) } else { logf("SYNC %s is neither a master nor a slave.", addr) - // Made an incorrect assumption above, so fix stats. - stats.conn(-1, false) + // Let stats track it as whatever was known before. return nil, nil, errors.New(addr + " is not a master nor slave") } @@ -407,14 +410,59 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer } func resolveAddr(addr string) (*net.TCPAddr, error) { - // This hack allows having a timeout on resolution. - conn, err := net.DialTimeout("udp", addr, 10*time.Second) - if err != nil { + // Simple cases that do not need actual resolution. Works with IPv4 and v6. + if host, port, err := net.SplitHostPort(addr); err == nil { + if port, _ := strconv.Atoi(port); port > 0 { + zone := "" + if i := strings.LastIndex(host, "%"); i >= 0 { + zone = host[i+1:] + host = host[:i] + } + ip := net.ParseIP(host) + if ip != nil { + return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil + } + } + } + + // Attempt to resolve IPv4 and v6 concurrently. + addrChan := make(chan *net.TCPAddr, 2) + for _, network := range []string{"udp4", "udp6"} { + network := network + go func() { + // The unfortunate UDP dialing hack allows having a timeout on address resolution. + conn, err := net.DialTimeout(network, addr, 10*time.Second) + if err != nil { + addrChan <- nil + } else { + addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) + conn.Close() + } + }() + } + + // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. + tcpaddr := <-addrChan + if tcpaddr == nil || len(tcpaddr.IP) != 4 { + var timeout <-chan time.Time + if tcpaddr != nil { + // Don't wait too long if an IPv6 address is known. + timeout = time.After(50 * time.Millisecond) + } + select { + case <-timeout: + case tcpaddr2 := <-addrChan: + if tcpaddr == nil || tcpaddr2 != nil { + // It's an IPv4 address or the only known address. Use it. + tcpaddr = tcpaddr2 + } + } + } + + if tcpaddr == nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) } - tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) - conn.Close() if tcpaddr.String() != addr { debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) } @@ -511,8 +559,8 @@ func (cluster *mongoCluster) syncServersIteration(direct bool) { } cluster.Lock() - ml := cluster.masters.Len() - logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml) + mastersLen := cluster.masters.Len() + logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) // Update dynamic seeds, but only if we have any good servers. Otherwise, // leave them alone for better chances of a successful sync in the future. @@ -530,17 +578,17 @@ func (cluster *mongoCluster) syncServersIteration(direct bool) { // AcquireSocket returns a socket to a server in the cluster. If slaveOk is // true, it will attempt to return a socket to a slave server. If it is // false, the socket will necessarily be to a master server. -func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { +func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { var started time.Time var syncCount uint warnedLimit := false for { cluster.RLock() for { - ml := cluster.masters.Len() - sl := cluster.servers.Len() - debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml) - if ml > 0 || slaveOk && sl > 0 { + mastersLen := cluster.masters.Len() + slavesLen := cluster.servers.Len() - mastersLen + debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) + if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 { break } if started.IsZero() { @@ -560,9 +608,9 @@ func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Durati var server *mongoServer if slaveOk { - server = cluster.servers.BestFit(serverTags) + server = cluster.servers.BestFit(mode, serverTags) } else { - server = cluster.masters.BestFit(nil) + server = cluster.masters.BestFit(mode, nil) } cluster.RUnlock() diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster_test.go deleted file mode 100644 index 96ccc941..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/cluster_test.go +++ /dev/null @@ -1,1659 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "fmt" - "io" - "net" - "strings" - "sync" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" -) - -func (s *S) TestNewSession(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Do a dummy operation to wait for connection. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Tweak safety and query settings to ensure other has copied those. - session.SetSafe(nil) - session.SetBatch(-1) - other := session.New() - defer other.Close() - session.SetSafe(&mgo.Safe{}) - - // Clone was copied while session was unsafe, so no errors. - otherColl := other.DB("mydb").C("mycoll") - err = otherColl.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Original session was made safe again. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, NotNil) - - // With New(), each session has its own socket now. - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 2) - - // Ensure query parameters were cloned. - err = otherColl.Insert(M{"_id": 2}) - c.Assert(err, IsNil) - - // Ping the database to ensure the nonce has been received already. - c.Assert(other.Ping(), IsNil) - - mgo.ResetStats() - - iter := otherColl.Find(M{}).Iter() - c.Assert(err, IsNil) - - m := M{} - ok := iter.Next(m) - c.Assert(ok, Equals, true) - err = iter.Close() - c.Assert(err, IsNil) - - // If Batch(-1) is in effect, a single document must have been received. - stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 1) -} - -func (s *S) TestCloneSession(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Do a dummy operation to wait for connection. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Tweak safety and query settings to ensure clone is copying those. - session.SetSafe(nil) - session.SetBatch(-1) - clone := session.Clone() - defer clone.Close() - session.SetSafe(&mgo.Safe{}) - - // Clone was copied while session was unsafe, so no errors. - cloneColl := clone.DB("mydb").C("mycoll") - err = cloneColl.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Original session was made safe again. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, NotNil) - - // With Clone(), same socket is shared between sessions now. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - c.Assert(stats.SocketRefs, Equals, 2) - - // Refreshing one of them should let the original socket go, - // while preserving the safety settings. - clone.Refresh() - err = cloneColl.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Must have used another connection now. - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 2) - c.Assert(stats.SocketRefs, Equals, 2) - - // Ensure query parameters were cloned. - err = cloneColl.Insert(M{"_id": 2}) - c.Assert(err, IsNil) - - // Ping the database to ensure the nonce has been received already. - c.Assert(clone.Ping(), IsNil) - - mgo.ResetStats() - - iter := cloneColl.Find(M{}).Iter() - c.Assert(err, IsNil) - - m := M{} - ok := iter.Next(m) - c.Assert(ok, Equals, true) - err = iter.Close() - c.Assert(err, IsNil) - - // If Batch(-1) is in effect, a single document must have been received. - stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 1) -} - -func (s *S) TestSetModeStrong(c *C) { - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - session.SetMode(mgo.Strong, false) - - c.Assert(session.Mode(), Equals, mgo.Strong) - - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 1) - - session.SetMode(mgo.Strong, true) - - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSetModeMonotonic(c *C) { - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - - c.Assert(session.Mode(), Equals, mgo.Monotonic) - - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - result = M{} - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 2) - - session.SetMode(mgo.Monotonic, true) - - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSetModeMonotonicAfterStrong(c *C) { - // Test that a strong session shifting to a monotonic - // one preserves the socket untouched. - - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - // Insert something to force a connection to the master. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - session.SetMode(mgo.Monotonic, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // Master socket should still be reserved. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - - // Confirm it's the master even though it's Monotonic by now. - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) -} - -func (s *S) TestSetModeStrongAfterMonotonic(c *C) { - // Test that shifting from Monotonic to Strong while - // using a slave socket will keep the socket reserved - // until the master socket is necessary, so that no - // switch over occurs unless it's actually necessary. - - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - - // Ensure we're talking to a slave, and reserve the socket. - result := M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - // Switch to a Strong session. - session.SetMode(mgo.Strong, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // Slave socket should still be reserved. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - - // But any operation will switch it to the master. - result = M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) -} - -func (s *S) TestSetModeMonotonicWriteOnIteration(c *C) { - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - - c.Assert(session.Mode(), Equals, mgo.Monotonic) - - coll1 := session.DB("mydb").C("mycoll1") - coll2 := session.DB("mydb").C("mycoll2") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll1.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - // Release master so we can grab a slave again. - session.Refresh() - - // Wait until synchronization is done. - for { - n, err := coll1.Count() - c.Assert(err, IsNil) - if n == len(ns) { - break - } - } - - iter := coll1.Find(nil).Batch(2).Iter() - i := 0 - m := M{} - for iter.Next(&m) { - i++ - if i > 3 { - err := coll2.Insert(M{"n": 47 + i}) - c.Assert(err, IsNil) - } - } - c.Assert(i, Equals, len(ns)) -} - -func (s *S) TestSetModeEventual(c *C) { - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Eventual, false) - - c.Assert(session.Mode(), Equals, mgo.Eventual) - - result := M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - result = M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSetModeEventualAfterStrong(c *C) { - // Test that a strong session shifting to an eventual - // one preserves the socket untouched. - - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - // Insert something to force a connection to the master. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - session.SetMode(mgo.Eventual, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // Master socket should still be reserved. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - - // Confirm it's the master even though it's Eventual by now. - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) - - session.SetMode(mgo.Eventual, true) - - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestPrimaryShutdownStrong(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - // With strong consistency, this will open a socket to the master. - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Kill the master. - host := result.Host - s.Stop(host) - - // This must fail, since the connection was broken. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // With strong consistency, it fails again until reset. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - session.Refresh() - - // Now we should be able to talk to the new master. - // Increase the timeout since this may take quite a while. - session.SetSyncTimeout(3 * time.Minute) - - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(result.Host, Not(Equals), host) - - // Insert some data to confirm it's indeed a master. - err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) - c.Assert(err, IsNil) -} - -func (s *S) TestPrimaryHiccup(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - // With strong consistency, this will open a socket to the master. - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Establish a few extra sessions to create spare sockets to - // the master. This increases a bit the chances of getting an - // incorrect cached socket. - var sessions []*mgo.Session - for i := 0; i < 20; i++ { - sessions = append(sessions, session.Copy()) - err = sessions[len(sessions)-1].Run("serverStatus", result) - c.Assert(err, IsNil) - } - for i := range sessions { - sessions[i].Close() - } - - // Kill the master, but bring it back immediatelly. - host := result.Host - s.Stop(host) - s.StartAll() - - // This must fail, since the connection was broken. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // With strong consistency, it fails again until reset. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - session.Refresh() - - // Now we should be able to talk to the new master. - // Increase the timeout since this may take quite a while. - session.SetSyncTimeout(3 * time.Minute) - - // Insert some data to confirm it's indeed a master. - err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) - c.Assert(err, IsNil) -} - -func (s *S) TestPrimaryShutdownMonotonic(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - // Insert something to force a switch to the master. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Wait a bit for this to be synchronized to slaves. - time.Sleep(3 * time.Second) - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Kill the master. - host := result.Host - s.Stop(host) - - // This must fail, since the connection was broken. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // With monotonic consistency, it fails again until reset. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - session.Refresh() - - // Now we should be able to talk to the new master. - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(result.Host, Not(Equals), host) -} - -func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - ssresult := &struct{ Host string }{} - imresult := &struct{ IsMaster bool }{} - - // Figure the master while still using the strong session. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - master := ssresult.Host - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - // Create new monotonic session with an explicit address to ensure - // a slave is synchronized before the master, otherwise a connection - // with the master may be used below for lack of other options. - var addr string - switch { - case strings.HasSuffix(ssresult.Host, ":40021"): - addr = "localhost:40022" - case strings.HasSuffix(ssresult.Host, ":40022"): - addr = "localhost:40021" - case strings.HasSuffix(ssresult.Host, ":40023"): - addr = "localhost:40021" - default: - c.Fatal("Unknown host: ", ssresult.Host) - } - - session, err = mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - // Check the address of the socket associated with the monotonic session. - c.Log("Running serverStatus and isMaster with monotonic session") - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - slave := ssresult.Host - c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave)) - - c.Assert(master, Not(Equals), slave) - - // Kill the master. - s.Stop(master) - - // Session must still be good, since we were talking to a slave. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - - c.Assert(ssresult.Host, Equals, slave, - Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host)) - - // If we try to insert something, it'll have to hold until the new - // master is available to move the connection, and work correctly. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Must now be talking to the new master. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - // ... which is not the old one, since it's still dead. - c.Assert(ssresult.Host, Not(Equals), master) -} - -func (s *S) TestPrimaryShutdownEventual(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - master := result.Host - - session.SetMode(mgo.Eventual, true) - - // Should connect to the master when needed. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Wait a bit for this to be synchronized to slaves. - time.Sleep(3 * time.Second) - - // Kill the master. - s.Stop(master) - - // Should still work, with the new master now. - coll = session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(result.Host, Not(Equals), master) -} - -func (s *S) TestPreserveSocketCountOnSync(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 3 { - stats = mgo.GetStats() - c.Log("Waiting for all connections to be established...") - time.Sleep(5e8) - } - - c.Assert(stats.SocketsAlive, Equals, 3) - - // Kill the master (with rs1, 'a' is always the master). - s.Stop("localhost:40011") - - // Wait for the logic to run for a bit and bring it back. - startedAll := make(chan bool) - go func() { - time.Sleep(5e9) - s.StartAll() - startedAll <- true - }() - - // Do not allow the test to return before the goroutine above is done. - defer func() { - <-startedAll - }() - - // Do an action to kick the resync logic in, and also to - // wait until the cluster recognizes the server is back. - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, true) - - for i := 0; i != 20; i++ { - stats = mgo.GetStats() - if stats.SocketsAlive == 3 { - break - } - c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive) - time.Sleep(5e8) - } - - // Ensure the number of sockets is preserved after syncing. - stats = mgo.GetStats() - c.Assert(stats.SocketsAlive, Equals, 3) - c.Assert(stats.SocketsInUse, Equals, 1) - c.Assert(stats.SocketRefs, Equals, 1) -} - -// Connect to the master of a deployment with a single server, -// run an insert, and then ensure the insert worked and that a -// single connection was established. -func (s *S) TestTopologySyncWithSingleMaster(c *C) { - // Use hostname here rather than IP, to make things trickier. - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - - // One connection used for discovery. Master socket recycled for - // insert. Socket is reserved after insert. - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 0) - c.Assert(stats.SocketsInUse, Equals, 1) - - // Refresh session and socket must be released. - session.Refresh() - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestTopologySyncWithSlaveSeed(c *C) { - // That's supposed to be a slave. Must run discovery - // and find out master to insert successfully. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, true) - - // One connection to each during discovery. Master - // socket recycled for insert. - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - - // Only one socket reference alive, in the master socket owned - // by the above session. - c.Assert(stats.SocketsInUse, Equals, 1) - - // Refresh it, and it must be gone. - session.Refresh() - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSyncTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - s.Stop("localhost:40001") - - timeout := 3 * time.Second - session.SetSyncTimeout(timeout) - started := time.Now() - - // Do something. - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, ErrorMatches, "no reachable servers") - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) -} - -func (s *S) TestDialWithTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - timeout := 2 * time.Second - started := time.Now() - - // 40009 isn't used by the test servers. - session, err := mgo.DialWithTimeout("localhost:40009", timeout) - if session != nil { - session.Close() - } - c.Assert(err, ErrorMatches, "no reachable servers") - c.Assert(session, IsNil) - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) -} - -func (s *S) TestSocketTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - s.Freeze("localhost:40001") - - timeout := 3 * time.Second - session.SetSocketTimeout(timeout) - started := time.Now() - - // Do something. - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, ErrorMatches, ".*: i/o timeout") - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) -} - -func (s *S) TestSocketTimeoutOnDial(c *C) { - if *fast { - c.Skip("-fast") - } - - timeout := 1 * time.Second - - defer mgo.HackSyncSocketTimeout(timeout)() - - s.Freeze("localhost:40001") - - started := time.Now() - - session, err := mgo.DialWithTimeout("localhost:40001", timeout) - c.Assert(err, ErrorMatches, "no reachable servers") - c.Assert(session, IsNil) - - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true) -} - -func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - timeout := 2 * time.Second - session.SetSocketTimeout(timeout) - - // Do something that relies on the timeout and works. - c.Assert(session.Ping(), IsNil) - - // Freeze and wait for the timeout to go by. - s.Freeze("localhost:40001") - time.Sleep(timeout + 500*time.Millisecond) - s.Thaw("localhost:40001") - - // Do something again. The timeout above should not have killed - // the socket as there was nothing to be done. - c.Assert(session.Ping(), IsNil) -} - -func (s *S) TestDialWithReplicaSetName(c *C) { - seedLists := [][]string{ - // rs1 primary and rs2 primary - []string{"localhost:40011", "localhost:40021"}, - // rs1 primary and rs2 secondary - []string{"localhost:40011", "localhost:40022"}, - // rs1 secondary and rs2 primary - []string{"localhost:40012", "localhost:40021"}, - // rs1 secondary and rs2 secondary - []string{"localhost:40012", "localhost:40022"}, - } - - rs2Members := []string{":40021", ":40022", ":40023"} - - verifySyncedServers := func(session *mgo.Session, numServers int) { - // wait for the server(s) to be synced - for len(session.LiveServers()) != numServers { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // ensure none of the rs2 set members are communicated with - for _, addr := range session.LiveServers() { - for _, rs2Member := range rs2Members { - c.Assert(strings.HasSuffix(addr, rs2Member), Equals, false) - } - } - } - - // only communication with rs1 members is expected - for _, seedList := range seedLists { - info := mgo.DialInfo{ - Addrs: seedList, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - verifySyncedServers(session, 3) - session.Close() - - info.Direct = true - session, err = mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - verifySyncedServers(session, 1) - session.Close() - - connectionUrl := fmt.Sprintf("mongodb://%v/?replicaSet=rs1", strings.Join(seedList, ",")) - session, err = mgo.Dial(connectionUrl) - c.Assert(err, IsNil) - verifySyncedServers(session, 3) - session.Close() - - connectionUrl += "&connect=direct" - session, err = mgo.Dial(connectionUrl) - c.Assert(err, IsNil) - verifySyncedServers(session, 1) - session.Close() - } - -} - -func (s *S) TestDirect(c *C) { - session, err := mgo.Dial("localhost:40012?connect=direct") - c.Assert(err, IsNil) - defer session.Close() - - // We know that server is a slave. - session.SetMode(mgo.Monotonic, true) - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) - - stats := mgo.GetStats() - c.Assert(stats.SocketsAlive, Equals, 1) - c.Assert(stats.SocketsInUse, Equals, 1) - c.Assert(stats.SocketRefs, Equals, 1) - - // We've got no master, so it'll timeout. - session.SetSyncTimeout(5e8 * time.Nanosecond) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"test": 1}) - c.Assert(err, ErrorMatches, "no reachable servers") - - // Writing to the local database is okay. - coll = session.DB("local").C("mycoll") - defer coll.RemoveAll(nil) - id := bson.NewObjectId() - err = coll.Insert(M{"_id": id}) - c.Assert(err, IsNil) - - // Data was stored in the right server. - n, err := coll.Find(M{"_id": id}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 1) - - // Server hasn't changed. - result.Host = "" - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) -} - -func (s *S) TestDirectToUnknownStateMember(c *C) { - session, err := mgo.Dial("localhost:40041?connect=direct") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) - - // We've got no master, so it'll timeout. - session.SetSyncTimeout(5e8 * time.Nanosecond) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"test": 1}) - c.Assert(err, ErrorMatches, "no reachable servers") - - // Slave is still reachable. - result.Host = "" - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) -} - -func (s *S) TestFailFast(c *C) { - info := mgo.DialInfo{ - Addrs: []string{"localhost:99999"}, - Timeout: 5 * time.Second, - FailFast: true, - } - - started := time.Now() - - _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) -} - -type OpCounters struct { - Insert int - Query int - Update int - Delete int - GetMore int - Command int -} - -func getOpCounters(server string) (c *OpCounters, err error) { - session, err := mgo.Dial(server + "?connect=direct") - if err != nil { - return nil, err - } - defer session.Close() - session.SetMode(mgo.Monotonic, true) - result := struct{ OpCounters }{} - err = session.Run("serverStatus", &result) - return &result.OpCounters, err -} - -func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - ssresult := &struct{ Host string }{} - imresult := &struct{ IsMaster bool }{} - - // Figure the master while still using the strong session. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - master := ssresult.Host - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - // Collect op counters for everyone. - opc21a, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22a, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23a, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - // Do a SlaveOk query through MongoS - - mongos, err := mgo.Dial("localhost:40202") - c.Assert(err, IsNil) - defer mongos.Close() - - mongos.SetMode(mgo.Monotonic, true) - - coll := mongos.DB("mydb").C("mycoll") - result := &struct{}{} - for i := 0; i != 5; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - } - - // Collect op counters for everyone again. - opc21b, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22b, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23b, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - masterPort := master[strings.Index(master, ":")+1:] - - var masterDelta, slaveDelta int - switch masterPort { - case "40021": - masterDelta = opc21b.Query - opc21a.Query - slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) - case "40022": - masterDelta = opc22b.Query - opc22a.Query - slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query) - case "40023": - masterDelta = opc23b.Query - opc23a.Query - slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query) - default: - c.Fatal("Uh?") - } - - c.Check(masterDelta, Equals, 0) // Just the counting itself. - c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. -} - -func (s *S) TestRemovalOfClusterMember(c *C) { - if *fast { - c.Skip("-fast") - } - - master, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer master.Close() - - // Wait for cluster to fully sync up. - for i := 0; i < 10; i++ { - if len(master.LiveServers()) == 3 { - break - } - time.Sleep(5e8) - } - if len(master.LiveServers()) != 3 { - c.Fatalf("Test started with bad cluster state: %v", master.LiveServers()) - } - - result := &struct { - IsMaster bool - Me string - }{} - slave := master.Copy() - slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently. - err = slave.Run("isMaster", result) - c.Assert(err, IsNil) - c.Assert(result.IsMaster, Equals, false) - slaveAddr := result.Me - - defer func() { - master.Refresh() - master.Run(bson.D{{"$eval", `rs.add("` + slaveAddr + `")`}}, nil) - master.Close() - slave.Close() - }() - - c.Logf("========== Removing slave: %s ==========", slaveAddr) - - master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) - err = master.Ping() - c.Assert(err, Equals, io.EOF) - - master.Refresh() - - // Give the cluster a moment to catch up by doing a roundtrip to the master. - err = master.Ping() - c.Assert(err, IsNil) - - time.Sleep(3e9) - - // This must fail since the slave has been taken off the cluster. - err = slave.Ping() - c.Assert(err, NotNil) - - for i := 0; i < 15; i++ { - if len(master.LiveServers()) == 2 { - break - } - time.Sleep(time.Second) - } - live := master.LiveServers() - if len(live) != 2 { - c.Errorf("Removed server still considered live: %#s", live) - } - - c.Log("========== Test succeeded. ==========") -} - -func (s *S) TestPoolLimitSimple(c *C) { - for test := 0; test < 2; test++ { - var session *mgo.Session - var err error - if test == 0 { - session, err = mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - session.SetPoolLimit(1) - } else { - session, err = mgo.Dial("localhost:40001?maxPoolSize=1") - c.Assert(err, IsNil) - } - defer session.Close() - - // Put one socket in use. - c.Assert(session.Ping(), IsNil) - - done := make(chan time.Duration) - - // Now block trying to get another one due to the pool limit. - go func() { - copy := session.Copy() - defer copy.Close() - started := time.Now() - c.Check(copy.Ping(), IsNil) - done <- time.Now().Sub(started) - }() - - time.Sleep(300 * time.Millisecond) - - // Put the one socket back in the pool, freeing it for the copy. - session.Refresh() - delay := <-done - c.Assert(delay > 300*time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) - } -} - -func (s *S) TestPoolLimitMany(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 3 { - stats = mgo.GetStats() - c.Log("Waiting for all connections to be established...") - time.Sleep(500 * time.Millisecond) - } - c.Assert(stats.SocketsAlive, Equals, 3) - - const poolLimit = 64 - session.SetPoolLimit(poolLimit) - - // Consume the whole limit for the master. - var master []*mgo.Session - for i := 0; i < poolLimit; i++ { - s := session.Copy() - defer s.Close() - c.Assert(s.Ping(), IsNil) - master = append(master, s) - } - - before := time.Now() - go func() { - time.Sleep(3e9) - master[0].Refresh() - }() - - // Then, a single ping must block, since it would need another - // connection to the master, over the limit. Once the goroutine - // above releases its socket, it should move on. - session.Ping() - delay := time.Now().Sub(before) - c.Assert(delay > 3e9, Equals, true) - c.Assert(delay < 6e9, Equals, true) -} - -func (s *S) TestSetModeEventualIterBug(c *C) { - session1, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session1.Close() - - session1.SetMode(mgo.Eventual, false) - - coll1 := session1.DB("mydb").C("mycoll") - - const N = 100 - for i := 0; i < N; i++ { - err = coll1.Insert(M{"_id": i}) - c.Assert(err, IsNil) - } - - c.Logf("Waiting until secondary syncs") - for { - n, err := coll1.Count() - c.Assert(err, IsNil) - if n == N { - c.Logf("Found all") - break - } - } - - session2, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session2.Close() - - session2.SetMode(mgo.Eventual, false) - - coll2 := session2.DB("mydb").C("mycoll") - - i := 0 - iter := coll2.Find(nil).Batch(10).Iter() - var result struct{} - for iter.Next(&result) { - i++ - } - c.Assert(iter.Close(), Equals, nil) - c.Assert(i, Equals, N) -} - -func (s *S) TestCustomDialOld(c *C) { - dials := make(chan bool, 16) - dial := func(addr net.Addr) (net.Conn, error) { - tcpaddr, ok := addr.(*net.TCPAddr) - if !ok { - return nil, fmt.Errorf("unexpected address type: %T", addr) - } - dials <- true - return net.DialTCP("tcp", nil, tcpaddr) - } - info := mgo.DialInfo{ - Addrs: []string{"localhost:40012"}, - Dial: dial, - } - - // Use hostname here rather than IP, to make things trickier. - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - defer session.Close() - - const N = 3 - for i := 0; i < N; i++ { - select { - case <-dials: - case <-time.After(5 * time.Second): - c.Fatalf("expected %d dials, got %d", N, i) - } - } - select { - case <-dials: - c.Fatalf("got more dials than expected") - case <-time.After(100 * time.Millisecond): - } -} - -func (s *S) TestCustomDialNew(c *C) { - dials := make(chan bool, 16) - dial := func(addr *mgo.ServerAddr) (net.Conn, error) { - dials <- true - if addr.TCPAddr().Port == 40012 { - c.Check(addr.String(), Equals, "localhost:40012") - } - return net.DialTCP("tcp", nil, addr.TCPAddr()) - } - info := mgo.DialInfo{ - Addrs: []string{"localhost:40012"}, - DialServer: dial, - } - - // Use hostname here rather than IP, to make things trickier. - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - defer session.Close() - - const N = 3 - for i := 0; i < N; i++ { - select { - case <-dials: - case <-time.After(5 * time.Second): - c.Fatalf("expected %d dials, got %d", N, i) - } - } - select { - case <-dials: - c.Fatalf("got more dials than expected") - case <-time.After(100 * time.Millisecond): - } -} - -func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { - if *fast { - c.Skip("-fast") - } - - // Dial the shard. - session, err := mgo.Dial("localhost:40203") - c.Assert(err, IsNil) - defer session.Close() - - // Login and insert something to make it more realistic. - session.DB("admin").Login("root", "rapadura") - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(bson.M{"n": 1}) - c.Assert(err, IsNil) - - // Dial the replica set to figure the master out. - rs, err := mgo.Dial("root:rapadura@localhost:40031") - c.Assert(err, IsNil) - defer rs.Close() - - // With strong consistency, this will open a socket to the master. - result := &struct{ Host string }{} - err = rs.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Kill the master. - host := result.Host - s.Stop(host) - - // This must fail, since the connection was broken. - err = rs.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // This won't work because the master just died. - err = coll.Insert(bson.M{"n": 2}) - c.Assert(err, NotNil) - - // Refresh session and wait for re-election. - session.Refresh() - for i := 0; i < 60; i++ { - err = coll.Insert(bson.M{"n": 3}) - if err == nil { - break - } - c.Logf("Waiting for replica set to elect a new master. Last error: %v", err) - time.Sleep(500 * time.Millisecond) - } - c.Assert(err, IsNil) - - count, err := coll.Count() - c.Assert(count > 1, Equals, true) -} - -func (s *S) TestNearestSecondary(c *C) { - defer mgo.HackPingDelay(3 * time.Second)() - - rs1a := "127.0.0.1:40011" - rs1b := "127.0.0.1:40012" - rs1c := "127.0.0.1:40013" - s.Freeze(rs1b) - - session, err := mgo.Dial(rs1a) - c.Assert(err, IsNil) - defer session.Close() - - // Wait for the sync up to run through the first couple of servers. - for len(session.LiveServers()) != 2 { - c.Log("Waiting for two servers to be alive...") - time.Sleep(100 * time.Millisecond) - } - - // Extra delay to ensure the third server gets penalized. - time.Sleep(500 * time.Millisecond) - - // Release third server. - s.Thaw(rs1b) - - // Wait for it to come up. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for all servers to be alive...") - time.Sleep(100 * time.Millisecond) - } - - session.SetMode(mgo.Monotonic, true) - var result struct{ Host string } - - // See which slave picks the line, several times to avoid chance. - for i := 0; i < 10; i++ { - session.Refresh() - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) - } - - if *fast { - // Don't hold back for several seconds. - return - } - - // Now hold the other server for long enough to penalize it. - s.Freeze(rs1c) - time.Sleep(5 * time.Second) - s.Thaw(rs1c) - - // Wait for the ping to be processed. - time.Sleep(500 * time.Millisecond) - - // Repeating the test should now pick the former server consistently. - for i := 0; i < 10; i++ { - session.Refresh() - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, hostPort(rs1b)) - } -} - -func (s *S) TestConnectCloseConcurrency(c *C) { - restore := mgo.HackPingDelay(500 * time.Millisecond) - defer restore() - var wg sync.WaitGroup - const n = 500 - wg.Add(n) - for i := 0; i < n; i++ { - go func() { - defer wg.Done() - session, err := mgo.Dial("localhost:40001") - if err != nil { - c.Fatal(err) - } - time.Sleep(1) - session.Close() - }() - } - wg.Wait() -} - -func (s *S) TestSelectServers(c *C) { - if !s.versionAtLeast(2, 2) { - c.Skip("read preferences introduced in 2.2") - } - - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Eventual, true) - - var result struct{ Host string } - - session.Refresh() - session.SelectServers(bson.D{{"rs1", "b"}}) - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, "40012") - - session.Refresh() - session.SelectServers(bson.D{{"rs1", "c"}}) - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, "40013") -} - -func (s *S) TestSelectServersWithMongos(c *C) { - if !s.versionAtLeast(2, 2) { - c.Skip("read preferences introduced in 2.2") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - ssresult := &struct{ Host string }{} - imresult := &struct{ IsMaster bool }{} - - // Figure the master while still using the strong session. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - master := ssresult.Host - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - var slave1, slave2 string - switch hostPort(master) { - case "40021": - slave1, slave2 = "b", "c" - case "40022": - slave1, slave2 = "a", "c" - case "40023": - slave1, slave2 = "a", "b" - } - - // Collect op counters for everyone. - opc21a, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22a, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23a, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - // Do a SlaveOk query through MongoS - mongos, err := mgo.Dial("localhost:40202") - c.Assert(err, IsNil) - defer mongos.Close() - - mongos.SetMode(mgo.Monotonic, true) - - mongos.Refresh() - mongos.SelectServers(bson.D{{"rs2", slave1}}) - coll := mongos.DB("mydb").C("mycoll") - result := &struct{}{} - for i := 0; i != 5; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - } - - mongos.Refresh() - mongos.SelectServers(bson.D{{"rs2", slave2}}) - coll = mongos.DB("mydb").C("mycoll") - for i := 0; i != 7; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - } - - // Collect op counters for everyone again. - opc21b, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22b, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23b, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - switch hostPort(master) { - case "40021": - c.Check(opc21b.Query-opc21a.Query, Equals, 0) - c.Check(opc22b.Query-opc22a.Query, Equals, 5) - c.Check(opc23b.Query-opc23a.Query, Equals, 7) - case "40022": - c.Check(opc21b.Query-opc21a.Query, Equals, 5) - c.Check(opc22b.Query-opc22a.Query, Equals, 0) - c.Check(opc23b.Query-opc23a.Query, Equals, 7) - case "40023": - c.Check(opc21b.Query-opc21a.Query, Equals, 5) - c.Check(opc22b.Query-opc22a.Query, Equals, 7) - c.Check(opc23b.Query-opc23a.Query, Equals, 0) - default: - c.Fatal("Uh?") - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/dbtest/dbserver.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/dbtest/dbserver.go new file mode 100644 index 00000000..f19b8d04 --- /dev/null +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/dbtest/dbserver.go @@ -0,0 +1,196 @@ +package dbtest + +import ( + "bytes" + "fmt" + "net" + "os" + "os/exec" + "strconv" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" + "gopkg.in/tomb.v2" +) + +// DBServer controls a MongoDB server process to be used within test suites. +// +// The test server is started when Session is called the first time and should +// remain running for the duration of all tests, with the Wipe method being +// called between tests (before each of them) to clear stored data. After all tests +// are done, the Stop method should be called to stop the test server. +// +// Before the DBServer is used the SetPath method must be called to define +// the location for the database files to be stored. +type DBServer struct { + session *mgo.Session + output bytes.Buffer + server *exec.Cmd + dbpath string + host string + tomb tomb.Tomb +} + +// SetPath defines the path to the directory where the database files will be +// stored if it is started. The directory path itself is not created or removed +// by the test helper. +func (dbs *DBServer) SetPath(dbpath string) { + dbs.dbpath = dbpath +} + +func (dbs *DBServer) start() { + if dbs.server != nil { + panic("DBServer already started") + } + if dbs.dbpath == "" { + panic("DBServer.SetPath must be called before using the server") + } + mgo.SetStats(true) + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic("unable to listen on a local address: " + err.Error()) + } + addr := l.Addr().(*net.TCPAddr) + l.Close() + dbs.host = addr.String() + + args := []string{ + "--dbpath", dbs.dbpath, + "--bind_ip", "127.0.0.1", + "--port", strconv.Itoa(addr.Port), + "--nssize", "1", + "--noprealloc", + "--smallfiles", + "--nojournal", + } + dbs.tomb = tomb.Tomb{} + dbs.server = exec.Command("mongod", args...) + dbs.server.Stdout = &dbs.output + dbs.server.Stderr = &dbs.output + err = dbs.server.Start() + if err != nil { + panic(err) + } + dbs.tomb.Go(dbs.monitor) + dbs.Wipe() +} + +func (dbs *DBServer) monitor() error { + dbs.server.Process.Wait() + if dbs.tomb.Alive() { + // Present some debugging information. + fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") + fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes()) + fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") + cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + cmd.Run() + fmt.Fprintf(os.Stderr, "----------------------------------------\n") + + panic("mongod process died unexpectedly") + } + return nil +} + +// Stop stops the test server process, if it is running. +// +// It's okay to call Stop multiple times. After the test server is +// stopped it cannot be restarted. +// +// All database sessions must be closed before or while the Stop method +// is running. Otherwise Stop will panic after a timeout informing that +// there is a session leak. +func (dbs *DBServer) Stop() { + if dbs.session != nil { + dbs.checkSessions() + if dbs.session != nil { + dbs.session.Close() + dbs.session = nil + } + } + if dbs.server != nil { + dbs.tomb.Kill(nil) + dbs.server.Process.Kill() + select { + case <-dbs.tomb.Dead(): + case <-time.After(5 * time.Second): + panic("timeout waiting for mongod process to die") + } + dbs.server = nil + } +} + +// Session returns a new session to the server. The returned session +// must be closed after the test is done with it. +// +// The first Session obtained from a DBServer will start it. +func (dbs *DBServer) Session() *mgo.Session { + if dbs.server == nil { + dbs.start() + } + if dbs.session == nil { + mgo.ResetStats() + var err error + dbs.session, err = mgo.Dial(dbs.host + "/test") + if err != nil { + panic(err) + } + } + return dbs.session.Copy() +} + +// checkSessions ensures all mgo sessions opened were properly closed. +// For slightly faster tests, it may be disabled setting the +// environmnet variable CHECK_SESSIONS to 0. +func (dbs *DBServer) checkSessions() { + if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil { + return + } + dbs.session.Close() + dbs.session = nil + for i := 0; i < 100; i++ { + stats := mgo.GetStats() + if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { + return + } + time.Sleep(100 * time.Millisecond) + } + panic("There are mgo sessions still alive.") +} + +// Wipe drops all created databases and their data. +// +// The MongoDB server remains running if it was prevoiusly running, +// or stopped if it was previously stopped. +// +// All database sessions must be closed before or while the Wipe method +// is running. Otherwise Wipe will panic after a timeout informing that +// there is a session leak. +func (dbs *DBServer) Wipe() { + if dbs.server == nil || dbs.session == nil { + return + } + dbs.checkSessions() + sessionUnset := dbs.session == nil + session := dbs.Session() + defer session.Close() + if sessionUnset { + dbs.session.Close() + dbs.session = nil + } + names, err := session.DatabaseNames() + if err != nil { + panic(err) + } + for _, name := range names { + switch name { + case "admin", "local", "config": + default: + err = session.DB(name).DropDatabase() + if err != nil { + panic(err) + } + } + } +} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go index 9316c555..859fd9b8 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go @@ -20,7 +20,7 @@ // // New sessions are typically created by calling session.Copy on the // initial session obtained at dial time. These new sessions will share -// the same cluster information and connection cache, and may be easily +// the same cluster information and connection pool, and may be easily // handed into other methods and functions for organizing logic. // Every session created must have its Close method called at the end // of its life time, so its resources may be put back in the pool or diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/export_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/export_test.go deleted file mode 100644 index 690f84d3..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/export_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package mgo - -import ( - "time" -) - -func HackPingDelay(newDelay time.Duration) (restore func()) { - globalMutex.Lock() - defer globalMutex.Unlock() - - oldDelay := pingDelay - restore = func() { - globalMutex.Lock() - pingDelay = oldDelay - globalMutex.Unlock() - } - pingDelay = newDelay - return -} - -func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) { - globalMutex.Lock() - defer globalMutex.Unlock() - - oldTimeout := syncSocketTimeout - restore = func() { - globalMutex.Lock() - syncSocketTimeout = oldTimeout - globalMutex.Unlock() - } - syncSocketTimeout = newTimeout - return -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs.go index 8788a74c..503725bd 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs.go @@ -692,7 +692,7 @@ func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { // Read reads into b the next available data from the file and // returns the number of bytes written and an error in case // something wrong happened. At the end of the file, n will -// be zero and err will be set to os.EOF. +// be zero and err will be set to io.EOF. // // The parameters and behavior of this function turn the file // into an io.Reader. diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs_test.go deleted file mode 100644 index a1e013f1..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs_test.go +++ /dev/null @@ -1,708 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "io" - "os" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" -) - -func (s *S) TestGridFSCreate(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - before := bson.Now() - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - - n, err := file.Write([]byte("some data")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 9) - - err = file.Close() - c.Assert(err, IsNil) - - after := bson.Now() - - // Check the file information. - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - fileId, ok := result["_id"].(bson.ObjectId) - c.Assert(ok, Equals, true) - c.Assert(fileId.Valid(), Equals, true) - result["_id"] = "" - - ud, ok := result["uploadDate"].(time.Time) - c.Assert(ok, Equals, true) - c.Assert(ud.After(before) && ud.Before(after), Equals, true) - result["uploadDate"] = "" - - expected := M{ - "_id": "", - "length": 9, - "chunkSize": 255 * 1024, - "uploadDate": "", - "md5": "1e50210a0202497fb79bc38b6ade6c34", - } - c.Assert(result, DeepEquals, expected) - - // Check the chunk. - result = M{} - err = db.C("fs.chunks").Find(nil).One(result) - c.Assert(err, IsNil) - - chunkId, ok := result["_id"].(bson.ObjectId) - c.Assert(ok, Equals, true) - c.Assert(chunkId.Valid(), Equals, true) - result["_id"] = "" - - expected = M{ - "_id": "", - "files_id": fileId, - "n": 0, - "data": []byte("some data"), - } - c.Assert(result, DeepEquals, expected) - - // Check that an index was created. - indexes, err := db.C("fs.chunks").Indexes() - c.Assert(err, IsNil) - c.Assert(len(indexes), Equals, 2) - c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"}) -} - -func (s *S) TestGridFSFileDetails(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile1.txt") - c.Assert(err, IsNil) - - n, err := file.Write([]byte("some")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) - - c.Assert(file.Size(), Equals, int64(4)) - - n, err = file.Write([]byte(" data")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 5) - - c.Assert(file.Size(), Equals, int64(9)) - - id, _ := file.Id().(bson.ObjectId) - c.Assert(id.Valid(), Equals, true) - c.Assert(file.Name(), Equals, "myfile1.txt") - c.Assert(file.ContentType(), Equals, "") - - var info interface{} - err = file.GetMeta(&info) - c.Assert(err, IsNil) - c.Assert(info, IsNil) - - file.SetId("myid") - file.SetName("myfile2.txt") - file.SetContentType("text/plain") - file.SetMeta(M{"any": "thing"}) - - c.Assert(file.Id(), Equals, "myid") - c.Assert(file.Name(), Equals, "myfile2.txt") - c.Assert(file.ContentType(), Equals, "text/plain") - - err = file.GetMeta(&info) - c.Assert(err, IsNil) - c.Assert(info, DeepEquals, bson.M{"any": "thing"}) - - err = file.Close() - c.Assert(err, IsNil) - - c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34") - - ud := file.UploadDate() - now := time.Now() - c.Assert(ud.Before(now), Equals, true) - c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true) - - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - result["uploadDate"] = "" - - expected := M{ - "_id": "myid", - "length": 9, - "chunkSize": 255 * 1024, - "uploadDate": "", - "md5": "1e50210a0202497fb79bc38b6ade6c34", - "filename": "myfile2.txt", - "contentType": "text/plain", - "metadata": M{"any": "thing"}, - } - c.Assert(result, DeepEquals, expected) -} - -func (s *S) TestGridFSSetUploadDate(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - - t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local) - file.SetUploadDate(t) - - err = file.Close() - c.Assert(err, IsNil) - - // Check the file information. - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - ud := result["uploadDate"].(time.Time) - if !ud.Equal(t) { - c.Fatalf("want upload date %s, got %s", t, ud) - } -} - -func (s *S) TestGridFSCreateWithChunking(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("") - c.Assert(err, IsNil) - - file.SetChunkSize(5) - - // Smaller than the chunk size. - n, err := file.Write([]byte("abc")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - - // Boundary in the middle. - n, err = file.Write([]byte("defg")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) - - // Boundary at the end. - n, err = file.Write([]byte("hij")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - - // Larger than the chunk size, with 3 chunks. - n, err = file.Write([]byte("klmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 12) - - err = file.Close() - c.Assert(err, IsNil) - - // Check the file information. - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - fileId, _ := result["_id"].(bson.ObjectId) - c.Assert(fileId.Valid(), Equals, true) - result["_id"] = "" - result["uploadDate"] = "" - - expected := M{ - "_id": "", - "length": 22, - "chunkSize": 5, - "uploadDate": "", - "md5": "44a66044834cbe55040089cabfc102d5", - } - c.Assert(result, DeepEquals, expected) - - // Check the chunks. - iter := db.C("fs.chunks").Find(nil).Sort("n").Iter() - dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"} - for i := 0; ; i++ { - result = M{} - if !iter.Next(result) { - if i != 5 { - c.Fatalf("Expected 5 chunks, got %d", i) - } - break - } - c.Assert(iter.Close(), IsNil) - - result["_id"] = "" - - expected = M{ - "_id": "", - "files_id": fileId, - "n": i, - "data": []byte(dataChunks[i]), - } - c.Assert(result, DeepEquals, expected) - } -} - -func (s *S) TestGridFSAbort(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - - file.SetChunkSize(5) - - n, err := file.Write([]byte("some data")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 9) - - var count int - for i := 0; i < 10; i++ { - count, err = db.C("fs.chunks").Count() - if count > 0 || err != nil { - break - } - } - c.Assert(err, IsNil) - c.Assert(count, Equals, 1) - - file.Abort() - - err = file.Close() - c.Assert(err, ErrorMatches, "write aborted") - - count, err = db.C("fs.chunks").Count() - c.Assert(err, IsNil) - c.Assert(count, Equals, 0) -} - -func (s *S) TestGridFSCloseConflict(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true}) - - // For a closing-time conflict - err = db.C("fs.files").Insert(M{"filename": "foo.txt"}) - c.Assert(err, IsNil) - - gfs := db.GridFS("fs") - file, err := gfs.Create("foo.txt") - c.Assert(err, IsNil) - - _, err = file.Write([]byte("some data")) - c.Assert(err, IsNil) - - err = file.Close() - c.Assert(mgo.IsDup(err), Equals, true) - - count, err := db.C("fs.chunks").Count() - c.Assert(err, IsNil) - c.Assert(count, Equals, 0) -} - -func (s *S) TestGridFSOpenNotFound(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.OpenId("non-existent") - c.Assert(err == mgo.ErrNotFound, Equals, true) - c.Assert(file, IsNil) - - file, err = gfs.Open("non-existent") - c.Assert(err == mgo.ErrNotFound, Equals, true) - c.Assert(file, IsNil) -} - -func (s *S) TestGridFSReadAll(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - id := file.Id() - - file.SetChunkSize(5) - - n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 22) - - err = file.Close() - c.Assert(err, IsNil) - - file, err = gfs.OpenId(id) - c.Assert(err, IsNil) - - b := make([]byte, 30) - n, err = file.Read(b) - c.Assert(n, Equals, 22) - c.Assert(err, IsNil) - - n, err = file.Read(b) - c.Assert(n, Equals, 0) - c.Assert(err == io.EOF, Equals, true) - - err = file.Close() - c.Assert(err, IsNil) -} - -func (s *S) TestGridFSReadChunking(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("") - c.Assert(err, IsNil) - - id := file.Id() - - file.SetChunkSize(5) - - n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 22) - - err = file.Close() - c.Assert(err, IsNil) - - file, err = gfs.OpenId(id) - c.Assert(err, IsNil) - - b := make([]byte, 30) - - // Smaller than the chunk size. - n, err = file.Read(b[:3]) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - c.Assert(b[:3], DeepEquals, []byte("abc")) - - // Boundary in the middle. - n, err = file.Read(b[:4]) - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) - c.Assert(b[:4], DeepEquals, []byte("defg")) - - // Boundary at the end. - n, err = file.Read(b[:3]) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - c.Assert(b[:3], DeepEquals, []byte("hij")) - - // Larger than the chunk size, with 3 chunks. - n, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(n, Equals, 12) - c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv")) - - n, err = file.Read(b) - c.Assert(n, Equals, 0) - c.Assert(err == io.EOF, Equals, true) - - err = file.Close() - c.Assert(err, IsNil) -} - -func (s *S) TestGridFSOpen(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - file.Close() - - file, err = gfs.Open("myfile.txt") - c.Assert(err, IsNil) - defer file.Close() - - var b [1]byte - - _, err = file.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "2") -} - -func (s *S) TestGridFSSeek(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - id := file.Id() - - file.SetChunkSize(5) - - n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 22) - - err = file.Close() - c.Assert(err, IsNil) - - b := make([]byte, 5) - - file, err = gfs.OpenId(id) - c.Assert(err, IsNil) - - o, err := file.Seek(3, os.SEEK_SET) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(3)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("defgh")) - - o, err = file.Seek(5, os.SEEK_CUR) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(13)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("nopqr")) - - o, err = file.Seek(0, os.SEEK_END) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(22)) - n, err = file.Read(b) - c.Assert(err, Equals, io.EOF) - c.Assert(n, Equals, 0) - - o, err = file.Seek(-10, os.SEEK_END) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(12)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("mnopq")) - - o, err = file.Seek(8, os.SEEK_SET) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(8)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("ijklm")) - - // Trivial seek forward within same chunk. Already - // got the data, shouldn't touch the database. - sent := mgo.GetStats().SentOps - o, err = file.Seek(1, os.SEEK_CUR) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(14)) - c.Assert(mgo.GetStats().SentOps, Equals, sent) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("opqrs")) - - // Try seeking past end of file. - file.Seek(3, os.SEEK_SET) - o, err = file.Seek(23, os.SEEK_SET) - c.Assert(err, ErrorMatches, "seek past end of file") - c.Assert(o, Equals, int64(3)) -} - -func (s *S) TestGridFSRemoveId(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - id := file.Id() - file.Close() - - err = gfs.RemoveId(id) - c.Assert(err, IsNil) - - file, err = gfs.Open("myfile.txt") - c.Assert(err, IsNil) - defer file.Close() - - var b [1]byte - - _, err = file.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "1") - - n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 0) -} - -func (s *S) TestGridFSRemove(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - file.Close() - - err = gfs.Remove("myfile.txt") - c.Assert(err, IsNil) - - _, err = gfs.Open("myfile.txt") - c.Assert(err == mgo.ErrNotFound, Equals, true) - - n, err := db.C("fs.chunks").Find(nil).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 0) -} - -func (s *S) TestGridFSOpenNext(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile1.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile2.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - file.Close() - - var f *mgo.GridFile - var b [1]byte - - iter := gfs.Find(nil).Sort("-filename").Iter() - - ok := gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, true) - c.Check(f.Name(), Equals, "myfile2.txt") - - _, err = f.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "2") - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, true) - c.Check(f.Name(), Equals, "myfile1.txt") - - _, err = f.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "1") - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - c.Assert(f, IsNil) - - // Do it again with a more restrictive query to make sure - // it's actually taken into account. - iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter() - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, true) - c.Check(f.Name(), Equals, "myfile1.txt") - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - c.Assert(f, IsNil) -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl.c b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl.c similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl.c rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl.c diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl.go similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl.go rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl.go diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl_windows.c b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl_windows.c rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl_windows.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl_windows.go rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl_windows.h b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl_windows.h rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sspi_windows.c b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sspi_windows.c rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sspi_windows.h b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h similarity index 100% rename from server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sspi_windows.h rename to server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram/scram_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram/scram_test.go deleted file mode 100644 index 029622fd..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram/scram_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package scram_test - -import ( - "crypto/sha1" - "testing" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram" - . "gopkg.in/check.v1" - "strings" -) - -var _ = Suite(&S{}) - -func Test(t *testing.T) { TestingT(t) } - -type S struct{} - -var tests = [][]string{{ - "U: user pencil", - "N: fyko+d2lbbFgONRv9qkxdawL", - "C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL", - "S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096", - "C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", - "S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=", -}, { - "U: root fe8c89e308ec08763df36333cbf5d3a2", - "N: OTcxNDk5NjM2MzE5", - "C: n,,n=root,r=OTcxNDk5NjM2MzE5", - "S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000", - "C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=", - "S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=", -}} - -func (s *S) TestExamples(c *C) { - for _, steps := range tests { - if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") { - c.Fatalf("Invalid test: %#v", steps) - } - auth := strings.Fields(steps[0][3:]) - client := scram.NewClient(sha1.New, auth[0], auth[1]) - first, done := true, false - c.Logf("-----") - c.Logf("%s", steps[0]) - for _, step := range steps[1:] { - c.Logf("%s", step) - switch step[:3] { - case "N: ": - client.SetNonce([]byte(step[3:])) - case "C: ": - if first { - first = false - done = client.Step(nil) - } - c.Assert(done, Equals, false) - c.Assert(client.Err(), IsNil) - c.Assert(string(client.Out()), Equals, step[3:]) - case "S: ": - first = false - done = client.Step([]byte(step[3:])) - default: - panic("invalid test line: " + step) - } - } - c.Assert(done, Equals, true) - c.Assert(client.Err(), IsNil) - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/queue_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/queue_test.go deleted file mode 100644 index bd0ab550..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/queue_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - . "gopkg.in/check.v1" -) - -type QS struct{} - -var _ = Suite(&QS{}) - -func (s *QS) TestSequentialGrowth(c *C) { - q := queue{} - n := 2048 - for i := 0; i != n; i++ { - q.Push(i) - } - for i := 0; i != n; i++ { - c.Assert(q.Pop(), Equals, i) - } -} - -var queueTestLists = [][]int{ - // {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - - // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} - {0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11}, - - // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} - {0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11}, - - // {0, 1, 2, 3, 4, 5, 6, 7, 8} - {0, 1, 2, 3, 4, 5, 6, 7, 8, - -1, -1, -1, -1, -1, -1, -1, -1, -1, - 0, 1, 2, 3, 4, 5, 6, 7, 8}, -} - -func (s *QS) TestQueueTestLists(c *C) { - test := []int{} - testi := 0 - reset := func() { - test = test[0:0] - testi = 0 - } - push := func(i int) { - test = append(test, i) - } - pop := func() (i int) { - if testi == len(test) { - return -1 - } - i = test[testi] - testi++ - return - } - - for _, list := range queueTestLists { - reset() - q := queue{} - for _, n := range list { - if n == -1 { - c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list)) - } else { - q.Push(n) - push(n) - } - } - - for n := pop(); n != -1; n = pop() { - c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list)) - } - - c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list)) - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/saslimpl.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/saslimpl.go index 1712c27a..c3f107cb 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/saslimpl.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/saslimpl.go @@ -3,7 +3,7 @@ package mgo import ( - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/sasl" + "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl" ) func saslNew(cred Credential, host string) (saslStepper, error) { diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/server.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/server.go index 368c2d65..2781d13b 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/server.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/server.go @@ -84,9 +84,8 @@ func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) * sync: sync, dial: dial, info: &defaultServerInfo, + pingValue: time.Hour, // Push it back before an actual ping. } - // Once so the server gets a ping value, then loop in background. - server.pinger(false) go server.pinger(true) return server } @@ -274,7 +273,7 @@ NextTagSet: return false } -var pingDelay = 5 * time.Second +var pingDelay = 15 * time.Second func (server *mongoServer) pinger(loop bool) { var delay time.Duration @@ -297,7 +296,7 @@ func (server *mongoServer) pinger(loop bool) { time.Sleep(delay) } op := op - socket, _, err := server.AcquireSocket(0, 3*delay) + socket, _, err := server.AcquireSocket(0, delay) if err == nil { start := time.Now() _, _ = socket.SimpleQuery(&op) @@ -400,7 +399,7 @@ func (servers *mongoServers) Empty() bool { // BestFit returns the best guess of what would be the most interesting // server to perform operations on at this point in time. -func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer { +func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer { var best *mongoServer for _, next := range servers.slice { if best == nil { @@ -417,9 +416,9 @@ func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer { switch { case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags): // Must have requested tags. - case next.info.Master != best.info.Master: - // Prefer slaves. - swap = best.info.Master + case next.info.Master != best.info.Master && mode != Nearest: + // Prefer slaves, unless the mode is PrimaryPreferred. + swap = (mode == PrimaryPreferred) != best.info.Master case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond: // Prefer nearest server. swap = next.pingValue < best.pingValue diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session.go index 53845f41..f4218625 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session.go @@ -44,33 +44,53 @@ import ( "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" ) -type mode int +type Mode int const ( - Eventual mode = 0 - Monotonic mode = 1 - Strong mode = 2 + // Relevant documentation on read preference modes: + // + // http://docs.mongodb.org/manual/reference/read-preference/ + // + Primary Mode = 2 // Default mode. All operations read from the current replica set primary. + PrimaryPreferred Mode = 3 // Read from the primary if available. Read from the secondary otherwise. + Secondary Mode = 4 // Read from one of the nearest secondary members of the replica set. + SecondaryPreferred Mode = 5 // Read from one of the nearest secondaries if available. Read from primary otherwise. + Nearest Mode = 6 // Read from one of the nearest members, irrespective of it being primary or secondary. + + // Read preference modes are specific to mgo: + Eventual Mode = 0 // Same as Nearest, but may change servers between reads. + Monotonic Mode = 1 // Same as SecondaryPreferred before first write. Same as Primary after first write. + Strong Mode = 2 // Same as Primary. ) +// mgo.v3: Drop Strong mode, suffix all modes with "Mode". + // When changing the Session type, check if newSession and copySession // need to be updated too. +// Session represents a communication session with the database. +// +// All Session methods are concurrency-safe and may be called from multiple +// goroutines. In all session modes but Eventual, using the session from +// multiple goroutines will cause them to share the same underlying socket. +// See the documentation on Session.SetMode for more details. type Session struct { - m sync.RWMutex - cluster_ *mongoCluster - slaveSocket *mongoSocket - masterSocket *mongoSocket - slaveOk bool - consistency mode - queryConfig query - safeOp *queryOp - syncTimeout time.Duration - sockTimeout time.Duration - defaultdb string - sourcedb string - dialCred *Credential - creds []Credential - poolLimit int + m sync.RWMutex + cluster_ *mongoCluster + slaveSocket *mongoSocket + masterSocket *mongoSocket + slaveOk bool + consistency Mode + queryConfig query + safeOp *queryOp + syncTimeout time.Duration + sockTimeout time.Duration + defaultdb string + sourcedb string + dialCred *Credential + creds []Credential + poolLimit int + bypassValidation bool } type Database struct { @@ -97,7 +117,7 @@ type query struct { } type getLastError struct { - CmdName int "getLastError" + CmdName int "getLastError,omitempty" W interface{} "w,omitempty" WTimeout int "wtimeout,omitempty" FSync bool "fsync,omitempty" @@ -218,7 +238,20 @@ func Dial(url string) (*Session, error) { // // See SetSyncTimeout for customizing the timeout for the session. func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { - uinfo, err := parseURL(url) + info, err := ParseURL(url) + if err != nil { + return nil, err + } + info.Timeout = timeout + return DialWithInfo(info) +} + +// ParseURL parses a MongoDB URL as accepted by the Dial function and returns +// a value suitable for providing into DialWithInfo. +// +// See Dial for more details on the format of url. +func ParseURL(url string) (*DialInfo, error) { + uinfo, err := extractURL(url) if err != nil { return nil, err } @@ -259,7 +292,6 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { info := DialInfo{ Addrs: uinfo.addrs, Direct: direct, - Timeout: timeout, Database: uinfo.db, Username: uinfo.user, Password: uinfo.pass, @@ -269,7 +301,7 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { PoolLimit: poolLimit, ReplicaSetName: setName, } - return DialWithInfo(&info) + return &info, nil } // DialInfo holds options for establishing a session with a MongoDB cluster. @@ -286,7 +318,7 @@ type DialInfo struct { // Timeout is the amount of time to wait for a server to respond when // first connecting and on follow up operations in the session. If // timeout is zero, the call may block forever waiting for a connection - // to be established. + // to be established. Timeout does not affect logic in DialServer. Timeout time.Duration // FailFast will cause connection and query attempts to fail faster when @@ -342,6 +374,8 @@ type DialInfo struct { Dial func(addr net.Addr) (net.Conn, error) } +// mgo.v3: Drop DialInfo.Dial. + // ServerAddr represents the address for establishing a connection to an // individual MongoDB server. type ServerAddr struct { @@ -428,7 +462,7 @@ type urlInfo struct { options map[string]string } -func parseURL(s string) (*urlInfo, error) { +func extractURL(s string) (*urlInfo, error) { if strings.HasPrefix(s, "mongodb://") { s = s[10:] } @@ -469,7 +503,7 @@ func parseURL(s string) (*urlInfo, error) { return info, nil } -func newSession(consistency mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { +func newSession(consistency Mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { cluster.Acquire() session = &Session{ cluster_: cluster, @@ -594,10 +628,14 @@ func (db *Database) GridFS(prefix string) *GridFS { // http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips // func (db *Database) Run(cmd interface{}, result interface{}) error { - if name, ok := cmd.(string); ok { - cmd = bson.D{{name, 1}} + socket, err := db.Session.acquireSocket(true) + if err != nil { + return err } - return db.C("$cmd").Find(cmd).One(result) + defer socket.Release() + + // This is an optimized form of db.C("$cmd").Find(cmd).One(result). + return db.run(socket, cmd, result) } // Credential holds details to authenticate with a MongoDB server. @@ -851,7 +889,7 @@ func (db *Database) UpsertUser(user *User) error { func isNoCmd(err error) bool { e, ok := err.(*QueryError) - return ok && strings.HasPrefix(e.Message, "no such cmd:") + return ok && (e.Code == 59 || e.Code == 13390 || strings.HasPrefix(e.Message, "no such cmd:")) } func isNotFound(err error) bool { @@ -943,15 +981,18 @@ func (db *Database) RemoveUser(user string) error { type indexSpec struct { Name, NS string Key bson.D - Unique bool ",omitempty" - DropDups bool "dropDups,omitempty" - Background bool ",omitempty" - Sparse bool ",omitempty" - Bits, Min, Max int ",omitempty" - ExpireAfter int "expireAfterSeconds,omitempty" - Weights bson.D ",omitempty" - DefaultLanguage string "default_language,omitempty" - LanguageOverride string "language_override,omitempty" + Unique bool ",omitempty" + DropDups bool "dropDups,omitempty" + Background bool ",omitempty" + Sparse bool ",omitempty" + Bits int ",omitempty" + Min, Max float64 ",omitempty" + BucketSize float64 "bucketSize,omitempty" + ExpireAfter int "expireAfterSeconds,omitempty" + Weights bson.D ",omitempty" + DefaultLanguage string "default_language,omitempty" + LanguageOverride string "language_override,omitempty" + TextIndexVersion int "textIndexVersion,omitempty" } type Index struct { @@ -965,12 +1006,21 @@ type Index struct { // documents with indexed time.Time older than the provided delta. ExpireAfter time.Duration - // Name holds the stored index name. On creation this field is ignored and the index name - // is automatically computed by EnsureIndex based on the index key + // Name holds the stored index name. On creation if this field is unset it is + // computed by EnsureIndex based on the index key. Name string // Properties for spatial indexes. - Bits, Min, Max int + // + // Min and Max were improperly typed as int when they should have been + // floats. To preserve backwards compatibility they are still typed as + // int and the following two fields enable reading and writing the same + // fields as float numbers. In mgo.v3, these fields will be dropped and + // Min/Max will become floats. + Min, Max int + Minf, Maxf float64 + BucketSize float64 + Bits int // Properties for text indexes. DefaultLanguage string @@ -983,6 +1033,9 @@ type Index struct { Weights map[string]int } +// mgo.v3: Drop Minf and Maxf and transform Min and Max to floats. +// mgo.v3: Drop DropDups as it's unsupported past 2.8. + type indexKeyInfo struct { name string key bson.D @@ -1164,14 +1217,24 @@ func (c *Collection) EnsureIndex(index Index) error { Background: index.Background, Sparse: index.Sparse, Bits: index.Bits, - Min: index.Min, - Max: index.Max, + Min: index.Minf, + Max: index.Maxf, + BucketSize: index.BucketSize, ExpireAfter: int(index.ExpireAfter / time.Second), Weights: keyInfo.weights, DefaultLanguage: index.DefaultLanguage, LanguageOverride: index.LanguageOverride, } + if spec.Min == 0 && spec.Max == 0 { + spec.Min = float64(index.Min) + spec.Max = float64(index.Max) + } + + if index.Name != "" { + spec.Name = index.Name + } + NextField: for name, weight := range index.Weights { for i, elem := range spec.Weights { @@ -1183,31 +1246,33 @@ NextField: panic("weight provided for field that is not part of index key: " + name) } - session = session.Clone() - defer session.Close() - session.SetMode(Strong, false) - session.EnsureSafe(&Safe{}) + cloned := session.Clone() + defer cloned.Close() + cloned.SetMode(Strong, false) + cloned.EnsureSafe(&Safe{}) + db := c.Database.With(cloned) - db := c.Database.With(session) - err = db.C("system.indexes").Insert(&spec) + // Try with a command first. + err = db.Run(bson.D{{"createIndexes", c.Name}, {"indexes", []indexSpec{spec}}}, nil) + if isNoCmd(err) { + // Command not yet supported. Insert into the indexes collection instead. + err = db.C("system.indexes").Insert(&spec) + } if err == nil { session.cluster().CacheIndex(cacheKey, true) } - session.Close() return err } -// DropIndex removes the index with key from the collection. +// DropIndex drops the index with the provided key from the c collection. // -// The key value determines which fields compose the index. The index ordering -// will be ascending by default. To obtain an index with a descending order, -// the field name should be prefixed by a dash (e.g. []string{"-time"}). +// See EnsureIndex for details on the accepted key variants. // // For example: // -// err := collection.DropIndex("lastname", "firstname") +// err1 := collection.DropIndex("firstField", "-secondField") +// err2 := collection.DropIndex("customIndexName") // -// See the EnsureIndex method for more details on indexes. func (c *Collection) DropIndex(key ...string) error { keyInfo, err := parseIndexKey(key) if err != nil { @@ -1237,6 +1302,58 @@ func (c *Collection) DropIndex(key ...string) error { return nil } +// DropIndexName removes the index with the provided index name. +// +// For example: +// +// err := collection.DropIndex("customIndexName") +// +func (c *Collection) DropIndexName(name string) error { + session := c.Database.Session + + session = session.Clone() + defer session.Close() + session.SetMode(Strong, false) + + c = c.With(session) + + indexes, err := c.Indexes() + if err != nil { + return err + } + + var index Index + for _, idx := range indexes { + if idx.Name == name { + index = idx + break + } + } + + if index.Name != "" { + keyInfo, err := parseIndexKey(index.Key) + if err != nil { + return err + } + + cacheKey := c.FullName + "\x00" + keyInfo.name + session.cluster().CacheIndex(cacheKey, false) + } + + result := struct { + ErrMsg string + Ok bool + }{} + err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) + if err != nil { + return err + } + if !result.Ok { + return errors.New(result.ErrMsg) + } + return nil +} + // Indexes returns a list of all indexes for the collection. // // For example, this snippet would drop all available indexes: @@ -1306,15 +1423,36 @@ func (c *Collection) Indexes() (indexes []Index, err error) { } func indexFromSpec(spec indexSpec) Index { - return Index{ - Name: spec.Name, - Key: simpleIndexKey(spec.Key), - Unique: spec.Unique, - DropDups: spec.DropDups, - Background: spec.Background, - Sparse: spec.Sparse, - ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + index := Index{ + Name: spec.Name, + Key: simpleIndexKey(spec.Key), + Unique: spec.Unique, + DropDups: spec.DropDups, + Background: spec.Background, + Sparse: spec.Sparse, + Minf: spec.Min, + Maxf: spec.Max, + Bits: spec.Bits, + BucketSize: spec.BucketSize, + DefaultLanguage: spec.DefaultLanguage, + LanguageOverride: spec.LanguageOverride, + ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + } + if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max { + index.Min = int(spec.Min) + index.Max = int(spec.Max) + } + if spec.TextIndexVersion > 0 { + index.Key = make([]string, len(spec.Weights)) + index.Weights = make(map[string]int) + for i, elem := range spec.Weights { + index.Key[i] = "$text:" + elem.Name + if w, ok := elem.Value.(int); ok { + index.Weights[elem.Name] = w + } + } } + return index } type indexSlice []Index @@ -1466,7 +1604,7 @@ func (s *Session) Refresh() { // Shifting between Monotonic and Strong modes will keep a previously // reserved connection for the session unless refresh is true or the // connection is unsuitable (to a secondary server in a Strong session). -func (s *Session) SetMode(consistency mode, refresh bool) { +func (s *Session) SetMode(consistency Mode, refresh bool) { s.m.Lock() debugf("Session %p: setting mode %d with refresh=%v (master=%p, slave=%p)", s, consistency, refresh, s.masterSocket, s.slaveSocket) s.consistency = consistency @@ -1482,7 +1620,7 @@ func (s *Session) SetMode(consistency mode, refresh bool) { } // Mode returns the current consistency mode for the session. -func (s *Session) Mode() mode { +func (s *Session) Mode() Mode { s.m.RLock() mode := s.consistency s.m.RUnlock() @@ -1541,6 +1679,24 @@ func (s *Session) SetPoolLimit(limit int) { s.m.Unlock() } +// SetBypassValidation sets whether the server should bypass the registered +// validation expressions executed when documents are inserted or modified, +// in the interest of preserving properties for documents in the collection +// being modfified. The default is to not bypass, and thus to perform the +// validation expressions registered for modified collections. +// +// Document validation was introuced in MongoDB 3.2. +// +// Relevant documentation: +// +// https://docs.mongodb.org/manual/release-notes/3.2/#bypass-validation +// +func (s *Session) SetBypassValidation(bypass bool) { + s.m.Lock() + s.bypassValidation = bypass + s.m.Unlock() +} + // SetBatch sets the default batch size used when fetching documents from the // database. It's possible to change this setting on a per-query basis as // well, using the Query.Batch method. @@ -1582,8 +1738,8 @@ type Safe struct { W int // Min # of servers to ack before success WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") WTimeout int // Milliseconds to wait for W before timing out - FSync bool // Should servers sync to disk before returning success - J bool // Wait for next group commit if journaling; no effect otherwise + FSync bool // Sync via the journal if present, or via data files sync otherwise + J bool // Sync via the journal if present } // Safe returns the current safety mode for the session. @@ -1627,10 +1783,18 @@ func (s *Session) Safe() (safe *Safe) { // the links below for more details (note that MongoDB internally reuses the // "w" field name for WMode). // -// If safe.FSync is true and journaling is disabled, the servers will be -// forced to sync all files to disk immediately before returning. If the -// same option is true but journaling is enabled, the server will instead -// await for the next group commit before returning. +// If safe.J is true, servers will block until write operations have been +// committed to the journal. Cannot be used in combination with FSync. Prior +// to MongoDB 2.6 this option was ignored if the server was running without +// journaling. Starting with MongoDB 2.6 write operations will fail with an +// exception if this option is used when the server is running without +// journaling. +// +// If safe.FSync is true and the server is running without journaling, blocks +// until the server has synced all data files to disk. If the server is running +// with journaling, this acts the same as the J option, blocking until write +// operations have been committed to the journal. Cannot be used in +// combination with J. // // Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync // to force the server to wait for a group commit in case journaling is @@ -1777,7 +1941,7 @@ func (s *Session) Run(cmd interface{}, result interface{}) error { // used for reading operations to those with both tag "disk" set to // "ssd" and tag "rack" set to 1: // -// session.SelectSlaves(bson.D{{"disk", "ssd"}, {"rack", 1}}) +// session.SelectServers(bson.D{{"disk", "ssd"}, {"rack", 1}}) // // Multiple sets of tags may be provided, in which case the used server // must match all tags within any one set. @@ -2138,6 +2302,8 @@ func (p *Pipe) Batch(n int) *Pipe { return p } +// mgo.v3: Use a single user-visible error type. + type LastError struct { Err string Code, N, Waited int @@ -2145,6 +2311,9 @@ type LastError struct { WTimeout bool UpdatedExisting bool `bson:"updatedExisting"` UpsertedId interface{} `bson:"upserted"` + + modified int + errors []error } func (err *LastError) Error() string { @@ -2181,6 +2350,13 @@ func IsDup(err error) bool { return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ") case *QueryError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 + case *bulkError: + for _, ee := range e.errs { + if !IsDup(ee) { + return false + } + } + return true } return false } @@ -2190,7 +2366,7 @@ func IsDup(err error) bool { // happens while inserting the provided documents, the returned error will // be of type *LastError. func (c *Collection) Insert(docs ...interface{}) error { - _, err := c.writeQuery(&insertOp{c.FullName, docs, 0}) + _, err := c.writeOp(&insertOp{c.FullName, docs, 0}, true) return err } @@ -2206,7 +2382,15 @@ func (c *Collection) Insert(docs ...interface{}) error { // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Update(selector interface{}, update interface{}) error { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 0}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil && !lerr.UpdatedExisting { return ErrNotFound } @@ -2242,7 +2426,17 @@ type ChangeInfo struct { // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 2}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + Flags: 2, + Multi: true, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{Updated: lerr.N} } @@ -2263,7 +2457,17 @@ func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info * // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 1}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + Flags: 1, + Upsert: true, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{} if lerr.UpdatedExisting { @@ -2295,7 +2499,10 @@ func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeI // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) Remove(selector interface{}) error { - lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 1}) + if selector == nil { + selector = bson.D{} + } + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1, 1}, true) if err == nil && lerr != nil && lerr.N == 0 { return ErrNotFound } @@ -2321,7 +2528,10 @@ func (c *Collection) RemoveId(id interface{}) error { // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 0}) + if selector == nil { + selector = bson.D{} + } + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0, 0}, true) if err == nil && lerr != nil { info = &ChangeInfo{Removed: lerr.N} } @@ -2608,6 +2818,44 @@ func (q *Query) SetMaxScan(n int) *Query { return q } +// SetMaxTime constrains the query to stop after running for the specified time. +// +// When the time limit is reached MongoDB automatically cancels the query. +// This can be used to efficiently prevent and identify unexpectedly slow queries. +// +// A few important notes about the mechanism enforcing this limit: +// +// - Requests can block behind locking operations on the server, and that blocking +// time is not accounted for. In other words, the timer starts ticking only after +// the actual start of the query when it initially acquires the appropriate lock; +// +// - Operations are interrupted only at interrupt points where an operation can be +// safely aborted – the total execution time may exceed the specified value; +// +// - The limit can be applied to both CRUD operations and commands, but not all +// commands are interruptible; +// +// - While iterating over results, computing follow up batches is included in the +// total time and the iteration continues until the alloted time is over, but +// network roundtrips are not taken into account for the limit. +// +// - This limit does not override the inactive cursor timeout for idle cursors +// (default is 10 min). +// +// This mechanism was introduced in MongoDB 2.6. +// +// Relevant documentation: +// +// http://blog.mongodb.org/post/83621787773/maxtimems-and-query-optimizer-introspection-in +// +func (q *Query) SetMaxTime(d time.Duration) *Query { + q.m.Lock() + q.op.options.MaxTimeMS = int(d / time.Millisecond) + q.op.hasOptions = true + q.m.Unlock() + return q +} + // Snapshot will force the performed query to make use of an available // index on the _id field to prevent the same document from being returned // more than once in a single iteration. This might happen without this @@ -2639,6 +2887,22 @@ func (q *Query) Snapshot() *Query { return q } +// Comment adds a comment to the query to identify it in the database profiler output. +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/operator/meta/comment +// http://docs.mongodb.org/manual/reference/command/profile +// http://docs.mongodb.org/manual/administration/analyzing-mongodb-performance/#database-profiling +// +func (q *Query) Comment(comment string) *Query { + q.m.Lock() + q.op.options.Comment = comment + q.op.hasOptions = true + q.m.Unlock() + return q +} + // LogReplay enables an option that optimizes queries that are typically // made on the MongoDB oplog for replaying it. This is an internal // implementation aspect and most likely uninteresting for other uses. @@ -2712,7 +2976,7 @@ func (q *Query) One(result interface{}) (err error) { } defer socket.Release() - op.flags |= session.slaveOkFlag() + session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) @@ -2734,6 +2998,48 @@ func (q *Query) One(result interface{}) (err error) { return checkQueryError(op.collection, data) } +// run duplicates the behavior of collection.Find(query).One(&result) +// as performed by Database.Run, specializing the logic for running +// database commands on a given socket. +func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error) { + // Database.Run: + if name, ok := cmd.(string); ok { + cmd = bson.D{{name, 1}} + } + + // Collection.Find: + session := db.Session + session.m.RLock() + op := session.queryConfig.op // Copy. + session.m.RUnlock() + op.query = cmd + op.collection = db.Name + ".$cmd" + + // Query.One: + session.prepareQuery(&op) + op.limit = -1 + + data, err := socket.SimpleQuery(&op) + if err != nil { + return err + } + if data == nil { + return ErrNotFound + } + if result != nil { + err = bson.Unmarshal(data, result) + if err == nil { + var res bson.M + bson.Unmarshal(data, &res) + debugf("Run command unmarshaled: %#v, result: %#v", op, res) + } else { + debugf("Run command unmarshaling failed: %#v", op, err) + return err + } + } + return checkQueryError(op.collection, data) +} + // The DBRef type implements support for the database reference MongoDB // convention as supported by multiple drivers. This convention enables // cross-referencing documents between collections and databases using @@ -2903,8 +3209,9 @@ func (q *Query) Iter() *Iter { iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - op.flags |= session.slaveOkFlag() socket, err := session.acquireSocket(true) if err != nil { @@ -2984,8 +3291,9 @@ func (q *Query) Tail(timeout time.Duration) *Iter { iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - op.flags |= flagTailable | flagAwaitData | session.slaveOkFlag() + op.flags |= flagTailable | flagAwaitData socket, err := session.acquireSocket(true) if err != nil { @@ -3004,10 +3312,11 @@ func (q *Query) Tail(timeout time.Duration) *Iter { return iter } -func (s *Session) slaveOkFlag() (flag queryOpFlags) { +func (s *Session) prepareQuery(op *queryOp) { s.m.RLock() + op.mode = s.consistency if s.slaveOk { - flag = flagSlaveOk + op.flags |= flagSlaveOk } s.m.RUnlock() return @@ -3379,9 +3688,7 @@ type distinctCmd struct { Query interface{} ",omitempty" } -// Distinct returns a list of distinct values for the given key within -// the result set. The list of distinct values will be unmarshalled -// in the "values" key of the provided result parameter. +// Distinct unmarshals into result the list of distinct values for the given key. // // For example: // @@ -3712,7 +4019,7 @@ func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err if doc.LastError.N == 0 { return nil, ErrNotFound } - if doc.Value.Kind != 0x0A { + if doc.Value.Kind != 0x0A && result != nil { err = doc.Value.Unmarshal(result) if err != nil { return nil, err @@ -3740,7 +4047,7 @@ type BuildInfo struct { VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise GitVersion string `bson:"gitVersion"` OpenSSLVersion string `bson:"OpenSSLVersion"` - SysInfo string `bson:"sysInfo"` + SysInfo string `bson:"sysInfo"` // Deprecated and empty on MongoDB 3.2+. Bits int Debug bool MaxObjectSize int `bson:"maxBsonObjectSize"` @@ -3782,6 +4089,9 @@ func (s *Session) BuildInfo() (info BuildInfo, err error) { // That information may be moved to another field if people need it. info.GitVersion = info.GitVersion[:i] } + if info.SysInfo == "deprecated" { + info.SysInfo = "" + } return } @@ -3792,14 +4102,16 @@ func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { // Read-only lock to check for previously reserved socket. s.m.RLock() - if s.masterSocket != nil { - socket := s.masterSocket + // If there is a slave socket reserved and its use is acceptable, take it as long + // as there isn't a master socket which would be preferred by the read preference mode. + if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { + socket := s.slaveSocket socket.Acquire() s.m.RUnlock() return socket, nil } - if s.slaveSocket != nil && s.slaveOk && slaveOk { - socket := s.slaveSocket + if s.masterSocket != nil { + socket := s.masterSocket socket.Acquire() s.m.RUnlock() return socket, nil @@ -3811,17 +4123,17 @@ func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { s.m.Lock() defer s.m.Unlock() + if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { + s.slaveSocket.Acquire() + return s.slaveSocket, nil + } if s.masterSocket != nil { s.masterSocket.Acquire() return s.masterSocket, nil } - if s.slaveSocket != nil && s.slaveOk && slaveOk { - s.slaveSocket.Acquire() - return s.slaveSocket, nil - } // Still not good. We need a new socket. - sock, err := s.cluster().AcquireSocket(slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) + sock, err := s.cluster().AcquireSocket(s.consistency, slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) if err != nil { return nil, err } @@ -3916,14 +4228,44 @@ func (iter *Iter) replyFunc() replyFunc { } } -// writeQuery runs the given modifying operation, potentially followed up +type writeCmdResult struct { + Ok bool + N int + NModified int `bson:"nModified"` + Upserted []struct { + Index int + Id interface{} `_id` + } + ConcernError writeConcernError `bson:"writeConcernError"` + Errors []writeCmdError `bson:"writeErrors"` +} + +type writeConcernError struct { + Code int + ErrMsg string +} + +type writeCmdError struct { + Index int + Code int + ErrMsg string +} + +func (r *writeCmdResult) QueryErrors() []error { + var errs []error + for _, err := range r.Errors { + errs = append(errs, &QueryError{Code: err.Code, Message: err.ErrMsg}) + } + return errs +} + +// writeOp runs the given modifying operation, potentially followed up // by a getLastError command in case the session is in safe mode. The // LastError result is made available in lerr, and if lerr.Err is set it // will also be returned as err. -func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { +func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err error) { s := c.Database.Session - dbname := c.Database.Name - socket, err := s.acquireSocket(dbname == "local") + socket, err := s.acquireSocket(c.Database.Name == "local") if err != nil { return nil, err } @@ -3931,46 +4273,197 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { s.m.RLock() safeOp := s.safeOp + bypassValidation := s.bypassValidation s.m.RUnlock() + if socket.ServerInfo().MaxWireVersion >= 2 { + // Servers with a more recent write protocol benefit from write commands. + if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { + var errors []error + // Maximum batch size is 1000. Must split out in separate operations for compatibility. + all := op.documents + for i := 0; i < len(all); i += 1000 { + l := i + 1000 + if l > len(all) { + l = len(all) + } + op.documents = all[i:l] + lerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) + if err != nil { + errors = append(errors, lerr.errors...) + if op.flags&1 == 0 { + return &LastError{errors: errors}, err + } + } + } + if len(errors) == 0 { + return nil, nil + } + return &LastError{errors: errors}, errors[0] + } + return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) + } else if updateOps, ok := op.(bulkUpdateOp); ok { + var lerr LastError + for _, updateOp := range updateOps { + oplerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) + if err != nil { + lerr.N += oplerr.N + lerr.modified += oplerr.modified + lerr.errors = append(lerr.errors, oplerr.errors...) + if ordered { + break + } + } + } + if len(lerr.errors) == 0 { + return nil, nil + } + return &lerr, lerr.errors[0] + } else if deleteOps, ok := op.(bulkDeleteOp); ok { + var lerr LastError + for _, deleteOp := range deleteOps { + oplerr, err := c.writeOpQuery(socket, safeOp, deleteOp, ordered) + if err != nil { + lerr.N += oplerr.N + lerr.modified += oplerr.modified + lerr.errors = append(lerr.errors, oplerr.errors...) + if ordered { + break + } + } + } + if len(lerr.errors) == 0 { + return nil, nil + } + return &lerr, lerr.errors[0] + } + return c.writeOpQuery(socket, safeOp, op, ordered) +} + +func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { if safeOp == nil { return nil, socket.Query(op) - } else { - var mutex sync.Mutex - var replyData []byte - var replyErr error - mutex.Lock() - query := *safeOp // Copy the data. - query.collection = dbname + ".$cmd" - query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { - replyData = docData - replyErr = err - mutex.Unlock() - } - err = socket.Query(op, &query) + } + + var mutex sync.Mutex + var replyData []byte + var replyErr error + mutex.Lock() + query := *safeOp // Copy the data. + query.collection = c.Database.Name + ".$cmd" + query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + replyData = docData + replyErr = err + mutex.Unlock() + } + err = socket.Query(op, &query) + if err != nil { + return nil, err + } + mutex.Lock() // Wait. + if replyErr != nil { + return nil, replyErr // XXX TESTME + } + if hasErrMsg(replyData) { + // Looks like getLastError itself failed. + err = checkQueryError(query.collection, replyData) if err != nil { return nil, err } - mutex.Lock() // Wait. - if replyErr != nil { - return nil, replyErr // XXX TESTME + } + result := &LastError{} + bson.Unmarshal(replyData, &result) + debugf("Result from writing query: %#v", result) + if result.Err != "" { + return result, result + } + return result, nil +} + +func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered, bypassValidation bool) (lerr *LastError, err error) { + var writeConcern interface{} + if safeOp == nil { + writeConcern = bson.D{{"w", 0}} + } else { + writeConcern = safeOp.query.(*getLastError) + } + + var cmd bson.D + switch op := op.(type) { + case *insertOp: + // http://docs.mongodb.org/manual/reference/command/insert + cmd = bson.D{ + {"insert", c.Name}, + {"documents", op.documents}, + {"writeConcern", writeConcern}, + {"ordered", op.flags&1 == 0}, } - if hasErrMsg(replyData) { - // Looks like getLastError itself failed. - err = checkQueryError(query.collection, replyData) - if err != nil { - return nil, err - } + case *updateOp: + // http://docs.mongodb.org/manual/reference/command/update + cmd = bson.D{ + {"update", c.Name}, + {"updates", []interface{}{op}}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, + } + case bulkUpdateOp: + // http://docs.mongodb.org/manual/reference/command/update + cmd = bson.D{ + {"update", c.Name}, + {"updates", op}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, + } + case *deleteOp: + // http://docs.mongodb.org/manual/reference/command/delete + cmd = bson.D{ + {"delete", c.Name}, + {"deletes", []interface{}{op}}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, } - result := &LastError{} - bson.Unmarshal(replyData, &result) - debugf("Result from writing query: %#v", result) - if result.Err != "" { - return result, result + case bulkDeleteOp: + // http://docs.mongodb.org/manual/reference/command/delete + cmd = bson.D{ + {"delete", c.Name}, + {"deletes", op}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, } - return result, nil } - panic("unreachable") + if bypassValidation { + cmd = append(cmd, bson.DocElem{"bypassDocumentValidation", true}) + } + + var result writeCmdResult + err = c.Database.run(socket, cmd, &result) + debugf("Write command result: %#v (err=%v)", result, err) + lerr = &LastError{ + UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, + N: result.N, + + modified: result.NModified, + errors: result.QueryErrors(), + } + if len(result.Upserted) > 0 { + lerr.UpsertedId = result.Upserted[0].Id + } + if len(result.Errors) > 0 { + e := result.Errors[0] + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr + } else if result.ConcernError.Code != 0 { + e := result.ConcernError + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr + } + + if err == nil && safeOp == nil { + return nil, nil + } + return lerr, err } func hasErrMsg(d []byte) bool { diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session_test.go deleted file mode 100644 index c9e71a13..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/session_test.go +++ /dev/null @@ -1,3563 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "flag" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" -) - -func (s *S) TestRunString(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run("ping", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestRunValue(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run(M{"ping": 1}, &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestPing(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Just ensure the nonce has been received. - result := struct{}{} - err = session.Run("ping", &result) - - mgo.ResetStats() - - err = session.Ping() - c.Assert(err, IsNil) - - // Pretty boring. - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 1) - c.Assert(stats.ReceivedOps, Equals, 1) -} - -func (s *S) TestURLSingle(c *C) { - session, err := mgo.Dial("mongodb://localhost:40001/") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run("ping", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestURLMany(c *C) { - session, err := mgo.Dial("mongodb://localhost:40011,localhost:40012/") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run("ping", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestURLParsing(c *C) { - urls := []string{ - "localhost:40001?foo=1&bar=2", - "localhost:40001?foo=1;bar=2", - } - for _, url := range urls { - session, err := mgo.Dial(url) - if session != nil { - session.Close() - } - c.Assert(err, ErrorMatches, "unsupported connection URL option: (foo=1|bar=2)") - } -} - -func (s *S) TestInsertFindOne(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 1, "b": 3}) - c.Assert(err, IsNil) - - result := struct{ A, B int }{} - - err = coll.Find(M{"a": 1}).Sort("b").One(&result) - c.Assert(err, IsNil) - c.Assert(result.A, Equals, 1) - c.Assert(result.B, Equals, 2) -} - -func (s *S) TestInsertFindOneNil(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Find(nil).One(nil) - c.Assert(err, ErrorMatches, "unauthorized.*|not authorized.*") -} - -func (s *S) TestInsertFindOneMap(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - result := make(M) - err = coll.Find(M{"a": 1}).One(result) - c.Assert(err, IsNil) - c.Assert(result["a"], Equals, 1) - c.Assert(result["b"], Equals, 2) -} - -func (s *S) TestInsertFindAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 3, "b": 4}) - c.Assert(err, IsNil) - - type R struct{ A, B int } - var result []R - - assertResult := func() { - c.Assert(len(result), Equals, 2) - c.Assert(result[0].A, Equals, 1) - c.Assert(result[0].B, Equals, 2) - c.Assert(result[1].A, Equals, 3) - c.Assert(result[1].B, Equals, 4) - } - - // nil slice - err = coll.Find(nil).Sort("a").All(&result) - c.Assert(err, IsNil) - assertResult() - - // Previously allocated slice - allocd := make([]R, 5) - result = allocd - err = coll.Find(nil).Sort("a").All(&result) - c.Assert(err, IsNil) - assertResult() - - // Ensure result is backed by the originally allocated array - c.Assert(&result[0], Equals, &allocd[0]) - - // Non-pointer slice error - f := func() { coll.Find(nil).All(result) } - c.Assert(f, Panics, "result argument must be a slice address") - - // Non-slice error - f = func() { coll.Find(nil).All(new(int)) } - c.Assert(f, Panics, "result argument must be a slice address") -} - -func (s *S) TestFindRef(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db1 := session.DB("db1") - db1col1 := db1.C("col1") - - db2 := session.DB("db2") - db2col1 := db2.C("col1") - - err = db1col1.Insert(M{"_id": 1, "n": 1}) - c.Assert(err, IsNil) - err = db1col1.Insert(M{"_id": 2, "n": 2}) - c.Assert(err, IsNil) - err = db2col1.Insert(M{"_id": 2, "n": 3}) - c.Assert(err, IsNil) - - result := struct{ N int }{} - - ref1 := &mgo.DBRef{Collection: "col1", Id: 1} - ref2 := &mgo.DBRef{Collection: "col1", Id: 2, Database: "db2"} - - err = db1.FindRef(ref1).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) - - err = db1.FindRef(ref2).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 3) - - err = db2.FindRef(ref1).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = db2.FindRef(ref2).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 3) - - err = session.FindRef(ref2).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 3) - - f := func() { session.FindRef(ref1).One(&result) } - c.Assert(f, PanicMatches, "Can't resolve database for &mgo.DBRef{Collection:\"col1\", Id:1, Database:\"\"}") -} - -func (s *S) TestDatabaseAndCollectionNames(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db1 := session.DB("db1") - db1col1 := db1.C("col1") - db1col2 := db1.C("col2") - - db2 := session.DB("db2") - db2col1 := db2.C("col3") - - err = db1col1.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = db1col2.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = db2col1.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db1", "db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db1", "db2", "local"}) - } - - // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. - session.SetBatch(2) - - names, err = db1.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"col1", "col2", "system.indexes"}) - - names, err = db2.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"col3", "system.indexes"}) -} - -func (s *S) TestSelect(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ A, B int }{} - - err = coll.Find(M{"a": 1}).Select(M{"b": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result.A, Equals, 0) - c.Assert(result.B, Equals, 2) -} - -func (s *S) TestInlineMap(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - var v, result1 struct { - A int - M map[string]int ",inline" - } - - v.A = 1 - v.M = map[string]int{"b": 2} - err = coll.Insert(v) - c.Assert(err, IsNil) - - noId := M{"_id": 0} - - err = coll.Find(nil).Select(noId).One(&result1) - c.Assert(err, IsNil) - c.Assert(result1.A, Equals, 1) - c.Assert(result1.M, DeepEquals, map[string]int{"b": 2}) - - var result2 M - err = coll.Find(nil).Select(noId).One(&result2) - c.Assert(err, IsNil) - c.Assert(result2, DeepEquals, M{"a": 1, "b": 2}) - -} - -func (s *S) TestUpdate(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) - c.Assert(err, IsNil) - } - - err = coll.Update(M{"k": 42}, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - result := make(M) - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 43) - - err = coll.Update(M{"k": 47}, M{"k": 47, "n": 47}) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.Find(M{"k": 47}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestUpdateId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"_id": n, "n": n}) - c.Assert(err, IsNil) - } - - err = coll.UpdateId(42, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - result := make(M) - err = coll.FindId(42).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 43) - - err = coll.UpdateId(47, M{"k": 47, "n": 47}) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.FindId(47).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestUpdateNil(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"k": 42, "n": 42}) - c.Assert(err, IsNil) - err = coll.Update(nil, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - result := make(M) - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 43) - - err = coll.Insert(M{"k": 45, "n": 45}) - c.Assert(err, IsNil) - _, err = coll.UpdateAll(nil, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 44) - err = coll.Find(M{"k": 45}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 46) - -} - -func (s *S) TestUpsert(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.Upsert(M{"k": 42}, M{"k": 42, "n": 24}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.UpsertedId, IsNil) - - result := M{} - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 24) - - // Insert with internally created id. - info, err = coll.Upsert(M{"k": 47}, M{"k": 47, "n": 47}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.UpsertedId, NotNil) - - err = coll.Find(M{"k": 47}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 47) - - result = M{} - err = coll.Find(M{"_id": info.UpsertedId}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 47) - - // Insert with provided id. - info, err = coll.Upsert(M{"k": 48}, M{"k": 48, "n": 48, "_id": 48}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - if s.versionAtLeast(2, 6) { - c.Assert(info.UpsertedId, Equals, 48) - } else { - c.Assert(info.UpsertedId, IsNil) // Unfortunate, but that's what Mongo gave us. - } - - err = coll.Find(M{"k": 48}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 48) -} - -func (s *S) TestUpsertId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"_id": n, "n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.UpsertId(42, M{"n": 24}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.UpsertedId, IsNil) - - result := M{} - err = coll.FindId(42).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 24) - - info, err = coll.UpsertId(47, M{"_id": 47, "n": 47}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - if s.versionAtLeast(2, 6) { - c.Assert(info.UpsertedId, Equals, 47) - } else { - c.Assert(info.UpsertedId, IsNil) - } - - err = coll.FindId(47).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 47) -} - -func (s *S) TestUpdateAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 4) - - result := make(M) - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 42) - - err = coll.Find(M{"k": 43}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 44) - - err = coll.Find(M{"k": 44}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 45) - - if !s.versionAtLeast(2, 6) { - // 2.6 made this invalid. - info, err = coll.UpdateAll(M{"k": 47}, M{"k": 47, "n": 47}) - c.Assert(err, Equals, nil) - c.Assert(info.Updated, Equals, 0) - } -} - -func (s *S) TestRemove(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - err = coll.Remove(M{"n": M{"$gt": 42}}) - c.Assert(err, IsNil) - - result := &struct{ N int }{} - err = coll.Find(M{"n": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 42) - - err = coll.Find(M{"n": 43}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.Find(M{"n": 44}).One(result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 44) -} - -func (s *S) TestRemoveId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42}) - c.Assert(err, IsNil) - - err = coll.RemoveId(41) - c.Assert(err, IsNil) - - c.Assert(coll.FindId(40).One(nil), IsNil) - c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound) - c.Assert(coll.FindId(42).One(nil), IsNil) -} - -func (s *S) TestRemoveAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.RemoveAll(M{"n": M{"$gt": 42}}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.Removed, Equals, 4) - c.Assert(info.UpsertedId, IsNil) - - result := &struct{ N int }{} - err = coll.Find(M{"n": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 42) - - err = coll.Find(M{"n": 43}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.Find(M{"n": 44}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestDropDatabase(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db1 := session.DB("db1") - db1.C("col").Insert(M{"_id": 1}) - - db2 := session.DB("db2") - db2.C("col").Insert(M{"_id": 1}) - - err = db1.DropDatabase() - c.Assert(err, IsNil) - - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db2", "local"}) - } - - err = db2.DropDatabase() - c.Assert(err, IsNil) - - names, err = session.DatabaseNames() - c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string(nil)) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"local"}) - } -} - -func (s *S) TestDropCollection(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("db1") - db.C("col1").Insert(M{"_id": 1}) - db.C("col2").Insert(M{"_id": 1}) - - err = db.C("col1").DropCollection() - c.Assert(err, IsNil) - - names, err := db.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"col2", "system.indexes"}) - - err = db.C("col2").DropCollection() - c.Assert(err, IsNil) - - names, err = db.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"system.indexes"}) -} - -func (s *S) TestCreateCollectionCapped(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - Capped: true, - MaxBytes: 1024, - MaxDocs: 3, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - ns := []int{1, 2, 3, 4, 5} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(nil).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) -} - -func (s *S) TestCreateCollectionNoIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - DisableIdIndex: true, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(indexes, HasLen, 0) -} - -func (s *S) TestCreateCollectionForceIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - ForceIdIndex: true, - Capped: true, - MaxBytes: 1024, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(indexes, HasLen, 1) -} - -func (s *S) TestIsDupValues(c *C) { - c.Assert(mgo.IsDup(nil), Equals, false) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 1}), Equals, false) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 1}), Equals, false) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 11000}), Equals, true) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11000}), Equals, true) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 11001}), Equals, true) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11001}), Equals, true) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 12582}), Equals, true) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 12582}), Equals, true) - lerr := &mgo.LastError{Code: 16460, Err: "error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."} - c.Assert(mgo.IsDup(lerr), Equals, true) -} - -func (s *S) TestIsDupPrimary(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestIsDupUnique(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - index := mgo.Index{ - Key: []string{"a", "b"}, - Unique: true, - } - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(index) - c.Assert(err, IsNil) - - err = coll.Insert(M{"a": 1, "b": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 1, "b": 1}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestIsDupCapped(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - ForceIdIndex: true, - Capped: true, - MaxBytes: 1024, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"_id": 1}) - // The error was different for capped collections before 2.6. - c.Assert(err, ErrorMatches, ".*duplicate key.*") - // The issue is reduced by using IsDup. - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestIsDupFindAndModify(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(mgo.Index{Key: []string{"n"}, Unique: true}) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"n": 2}) - c.Assert(err, IsNil) - _, err = coll.Find(M{"n": 1}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, bson.M{}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestFindAndModify(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"n": 42}) - - session.SetMode(mgo.Monotonic, true) - - result := M{} - info, err := coll.Find(M{"n": 42}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 42) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 44) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 50}).Apply(mgo.Change{Upsert: true, Update: M{"n": 51, "o": 52}}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], IsNil) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, NotNil) - - result = M{} - info, err = coll.Find(nil).Sort("-n").Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 52) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 52}).Select(M{"o": 1}).Apply(mgo.Change{Remove: true}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], IsNil) - c.Assert(result["o"], Equals, 52) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.Removed, Equals, 1) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 60}).Apply(mgo.Change{Remove: true}, result) - c.Assert(err, Equals, mgo.ErrNotFound) - c.Assert(len(result), Equals, 0) - c.Assert(info, IsNil) -} - -func (s *S) TestFindAndModifyBug997828(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"n": "not-a-number"}) - - result := make(M) - _, err = coll.Find(M{"n": "not-a-number"}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) - c.Assert(err, ErrorMatches, `(exception: )?Cannot apply \$inc .*`) - if s.versionAtLeast(2, 1) { - qerr, _ := err.(*mgo.QueryError) - c.Assert(qerr, NotNil, Commentf("err: %#v", err)) - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( - c.Assert(qerr.Code, Equals, 16837) - } else { - c.Assert(qerr.Code, Equals, 10140) - } - } else { - lerr, _ := err.(*mgo.LastError) - c.Assert(lerr, NotNil, Commentf("err: %#v", err)) - c.Assert(lerr.Code, Equals, 10140) - } -} - -func (s *S) TestCountCollection(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) -} - -func (s *S) TestCountQuery(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(M{"n": M{"$gt": 40}}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestCountQuerySorted(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(M{"n": M{"$gt": 40}}).Sort("n").Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestCountSkipLimit(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(nil).Skip(1).Limit(3).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - - n, err = coll.Find(nil).Skip(1).Limit(5).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) -} - -func (s *S) TestQueryExplain(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - m := M{} - query := coll.Find(nil).Limit(2) - err = query.Explain(m) - c.Assert(err, IsNil) - if m["queryPlanner"] != nil { - c.Assert(m["executionStats"].(M)["totalDocsExamined"], Equals, 2) - } else { - c.Assert(m["cursor"], Equals, "BasicCursor") - c.Assert(m["nscanned"], Equals, 2) - c.Assert(m["n"], Equals, 2) - } - - n := 0 - var result M - iter := query.Iter() - for iter.Next(&result) { - n++ - } - c.Assert(iter.Close(), IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestQueryMaxScan(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - query := coll.Find(nil).SetMaxScan(2) - var result []M - err = query.All(&result) - c.Assert(err, IsNil) - c.Assert(result, HasLen, 2) -} - -func (s *S) TestQueryHint(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.EnsureIndexKey("a") - - m := M{} - err = coll.Find(nil).Hint("a").Explain(m) - c.Assert(err, IsNil) - - if m["queryPlanner"] != nil { - m = m["queryPlanner"].(M) - m = m["winningPlan"].(M) - m = m["inputStage"].(M) - c.Assert(m["indexName"], Equals, "a_1") - } else { - c.Assert(m["indexBounds"], NotNil) - c.Assert(m["indexBounds"].(M)["a"], NotNil) - } -} - -func (s *S) TestFindOneNotFound(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - result := struct{ A, B int }{} - err = coll.Find(M{"a": 1}).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - c.Assert(err, ErrorMatches, "not found") - c.Assert(err == mgo.ErrNotFound, Equals, true) -} - -func (s *S) TestFindNil(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - result := struct{ N int }{} - - err = coll.Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) -} - -func (s *S) TestFindId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"_id": 41, "n": 41}) - c.Assert(err, IsNil) - err = coll.Insert(M{"_id": 42, "n": 42}) - c.Assert(err, IsNil) - - result := struct{ N int }{} - - err = coll.FindId(42).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 42) -} - -func (s *S) TestFindIterAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - iter := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2).Iter() - result := struct{ N int }{} - for i := 2; i < 7; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 1 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindIterTwiceWithSameQuery(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 40; i != 47; i++ { - coll.Insert(M{"n": i}) - } - - query := coll.Find(M{}).Sort("n") - - result1 := query.Skip(1).Iter() - result2 := query.Skip(2).Iter() - - result := struct{ N int }{} - ok := result2.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, 42) - ok = result1.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, 41) -} - -func (s *S) TestFindIterWithoutResults(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 42}) - - iter := coll.Find(M{"n": 0}).Iter() - - result := struct{ N int }{} - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - c.Assert(result.N, Equals, 0) -} - -func (s *S) TestFindIterLimit(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3) - iter := query.Iter() - - result := struct{ N int }{} - for i := 2; i < 5; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP - c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -var cursorTimeout = flag.Bool("cursor-timeout", false, "Enable cursor timeout test") - -func (s *S) TestFindIterCursorTimeout(c *C) { - if !*cursorTimeout { - c.Skip("-cursor-timeout") - } - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - type Doc struct { - Id int "_id" - } - - coll := session.DB("test").C("test") - coll.Remove(nil) - for i := 0; i < 100; i++ { - err = coll.Insert(Doc{i}) - c.Assert(err, IsNil) - } - - session.SetBatch(1) - iter := coll.Find(nil).Iter() - var doc Doc - if !iter.Next(&doc) { - c.Fatalf("iterator failed to return any documents") - } - - for i := 10; i > 0; i-- { - c.Logf("Sleeping... %d minutes to go...", i) - time.Sleep(1*time.Minute + 2*time.Second) - } - - // Drain any existing documents that were fetched. - if !iter.Next(&doc) { - c.Fatalf("iterator with timed out cursor failed to return previously cached document") - } - if iter.Next(&doc) { - c.Fatalf("timed out cursor returned document") - } - - c.Assert(iter.Err(), Equals, mgo.ErrCursor) -} - -func (s *S) TestTooManyItemsLimitBug(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) - - mgo.SetDebug(false) - coll := session.DB("mydb").C("mycoll") - words := strings.Split("foo bar baz", " ") - for i := 0; i < 5; i++ { - words = append(words, words...) - } - doc := bson.D{{"words", words}} - inserts := 10000 - limit := 5000 - iters := 0 - c.Assert(inserts > limit, Equals, true) - for i := 0; i < inserts; i++ { - err := coll.Insert(&doc) - c.Assert(err, IsNil) - } - iter := coll.Find(nil).Limit(limit).Iter() - for iter.Next(&doc) { - if iters%100 == 0 { - c.Logf("Seen %d docments", iters) - } - iters++ - } - c.Assert(iter.Close(), IsNil) - c.Assert(iters, Equals, limit) -} - -func serverCursorsOpen(session *mgo.Session) int { - var result struct { - Cursors struct { - TotalOpen int `bson:"totalOpen"` - TimedOut int `bson:"timedOut"` - } - } - err := session.Run("serverStatus", &result) - if err != nil { - panic(err) - } - return result.Cursors.TotalOpen -} - -func (s *S) TestFindIterLimitWithMore(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - // Insane amounts of logging otherwise due to the - // amount of data being shuffled. - mgo.SetDebug(false) - defer mgo.SetDebug(true) - - // Should amount to more than 4MB bson payload, - // the default limit per result chunk. - const total = 4096 - var d struct{ A [1024]byte } - docs := make([]interface{}, total) - for i := 0; i < total; i++ { - docs[i] = &d - } - err = coll.Insert(docs...) - c.Assert(err, IsNil) - - n, err := coll.Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, total) - - // First, try restricting to a single chunk with a negative limit. - nresults := 0 - iter := coll.Find(nil).Limit(-total).Iter() - var discard struct{} - for iter.Next(&discard) { - nresults++ - } - if nresults < total/2 || nresults >= total { - c.Fatalf("Bad result size with negative limit: %d", nresults) - } - - cursorsOpen := serverCursorsOpen(session) - - // Try again, with a positive limit. Should reach the end now, - // using multiple chunks. - nresults = 0 - iter = coll.Find(nil).Limit(total).Iter() - for iter.Next(&discard) { - nresults++ - } - c.Assert(nresults, Equals, total) - - // Ensure the cursor used is properly killed. - c.Assert(serverCursorsOpen(session), Equals, cursorsOpen) - - // Edge case, -MinInt == -MinInt. - nresults = 0 - iter = coll.Find(nil).Limit(math.MinInt32).Iter() - for iter.Next(&discard) { - nresults++ - } - if nresults < total/2 || nresults >= total { - c.Fatalf("Bad result size with MinInt32 limit: %d", nresults) - } -} - -func (s *S) TestFindIterLimitWithBatch(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - // Ping the database to ensure the nonce has been received already. - c.Assert(session.Ping(), IsNil) - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3).Batch(2) - iter := query.Iter() - result := struct{ N int }{} - for i := 2; i < 5; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP - c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs - c.Assert(stats.ReceivedDocs, Equals, 3) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindIterSortWithBatch(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - // Without this, the logic above breaks because Mongo refuses to - // return a cursor with an in-memory sort. - coll.EnsureIndexKey("n") - - // Ping the database to ensure the nonce has been received already. - c.Assert(session.Ping(), IsNil) - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$lte": 44}}).Sort("-n").Batch(2) - iter := query.Iter() - ns = []int{46, 45, 44, 43, 42, 41, 40} - result := struct{ N int }{} - for i := 2; i < len(ns); i++ { - c.Logf("i=%d", i) - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and its REPLY_OPs - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -// Test tailable cursors in a situation where Next has to sleep to -// respect the timeout requested on Tail. -func (s *S) TestFindTailTimeoutWithSleep(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cresult := struct{ ErrMsg string }{} - - db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) - c.Assert(err, IsNil) - c.Assert(cresult.ErrMsg, Equals, "") - coll := db.C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - timeout := 3 * time.Second - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Tail(timeout) - - n := len(ns) - result := struct{ N int }{} - for i := 2; i != n; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { // The batch boundary. - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - mgo.ResetStats() - - // The following call to Next will block. - go func() { - // The internal AwaitData timing of MongoDB is around 2 seconds, - // so this should force mgo to sleep at least once by itself to - // respect the requested timeout. - time.Sleep(timeout + 5e8*time.Nanosecond) - session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) - }() - - c.Log("Will wait for Next with N=47...") - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 47) - c.Log("Got Next with N=47!") - - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 5) - c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - - c.Log("Will wait for a result which will never come...") - - started := time.Now() - ok = iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, true) - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - - c.Log("Will now reuse the timed out tail cursor...") - - coll.Insert(M{"n": 48}) - ok = iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Close(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 48) -} - -// Test tailable cursors in a situation where Next never gets to sleep once -// to respect the timeout requested on Tail. -func (s *S) TestFindTailTimeoutNoSleep(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cresult := struct{ ErrMsg string }{} - - db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) - c.Assert(err, IsNil) - c.Assert(cresult.ErrMsg, Equals, "") - coll := db.C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - timeout := 1 * time.Second - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Tail(timeout) - - n := len(ns) - result := struct{ N int }{} - for i := 2; i != n; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { // The batch boundary. - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - mgo.ResetStats() - - // The following call to Next will block. - go func() { - // The internal AwaitData timing of MongoDB is around 2 seconds, - // so this item should arrive within the AwaitData threshold. - time.Sleep(5e8) - session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) - }() - - c.Log("Will wait for Next with N=47...") - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 47) - c.Log("Got Next with N=47!") - - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 4) - c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - - c.Log("Will wait for a result which will never come...") - - started := time.Now() - ok = iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, true) - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - - c.Log("Will now reuse the timed out tail cursor...") - - coll.Insert(M{"n": 48}) - ok = iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Close(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 48) -} - -// Test tailable cursors in a situation where Next never gets to sleep once -// to respect the timeout requested on Tail. -func (s *S) TestFindTailNoTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cresult := struct{ ErrMsg string }{} - - db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) - c.Assert(err, IsNil) - c.Assert(cresult.ErrMsg, Equals, "") - coll := db.C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Tail(-1) - c.Assert(err, IsNil) - - n := len(ns) - result := struct{ N int }{} - for i := 2; i != n; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { // The batch boundary. - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - mgo.ResetStats() - - // The following call to Next will block. - go func() { - time.Sleep(5e8) - session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) - }() - - c.Log("Will wait for Next with N=47...") - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 47) - c.Log("Got Next with N=47!") - - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 4) - c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - - c.Log("Will wait for a result which will never come...") - - gotNext := make(chan bool) - go func() { - ok := iter.Next(&result) - gotNext <- ok - }() - - select { - case ok := <-gotNext: - c.Fatalf("Next returned: %v", ok) - case <-time.After(3e9): - // Good. Should still be sleeping at that point. - } - - // Closing the session should cause Next to return. - session.Close() - - select { - case ok := <-gotNext: - c.Assert(ok, Equals, false) - c.Assert(iter.Err(), ErrorMatches, "Closed explicitly") - c.Assert(iter.Timeout(), Equals, false) - case <-time.After(1e9): - c.Fatal("Closing the session did not unblock Next") - } -} - -func (s *S) TestIterNextResetsResult(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{1, 2, 3} - for _, n := range ns { - coll.Insert(M{"n" + strconv.Itoa(n): n}) - } - - query := coll.Find(nil).Sort("$natural") - - i := 0 - var sresult *struct{ N1, N2, N3 int } - iter := query.Iter() - for iter.Next(&sresult) { - switch i { - case 0: - c.Assert(sresult.N1, Equals, 1) - c.Assert(sresult.N2+sresult.N3, Equals, 0) - case 1: - c.Assert(sresult.N2, Equals, 2) - c.Assert(sresult.N1+sresult.N3, Equals, 0) - case 2: - c.Assert(sresult.N3, Equals, 3) - c.Assert(sresult.N1+sresult.N2, Equals, 0) - } - i++ - } - c.Assert(iter.Close(), IsNil) - - i = 0 - var mresult M - iter = query.Iter() - for iter.Next(&mresult) { - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, M{"n3": 3}) - } - i++ - } - c.Assert(iter.Close(), IsNil) - - i = 0 - var iresult interface{} - iter = query.Iter() - for iter.Next(&iresult) { - mresult, ok := iresult.(bson.M) - c.Assert(ok, Equals, true, Commentf("%#v", iresult)) - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) - } - i++ - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestFindForOnIter(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Iter() - - i := 2 - var result *struct{ N int } - err = iter.For(&result, func() error { - c.Assert(i < 7, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 1 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindFor(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - - i := 2 - var result *struct{ N int } - err = query.For(&result, func() error { - c.Assert(i < 7, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 1 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindForStopOnError(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - query := coll.Find(M{"n": M{"$gte": 42}}) - i := 2 - var result *struct{ N int } - err = query.For(&result, func() error { - c.Assert(i < 4, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { - return fmt.Errorf("stop!") - } - i++ - return nil - }) - c.Assert(err, ErrorMatches, "stop!") -} - -func (s *S) TestFindForResetsResult(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{1, 2, 3} - for _, n := range ns { - coll.Insert(M{"n" + strconv.Itoa(n): n}) - } - - query := coll.Find(nil).Sort("$natural") - - i := 0 - var sresult *struct{ N1, N2, N3 int } - err = query.For(&sresult, func() error { - switch i { - case 0: - c.Assert(sresult.N1, Equals, 1) - c.Assert(sresult.N2+sresult.N3, Equals, 0) - case 1: - c.Assert(sresult.N2, Equals, 2) - c.Assert(sresult.N1+sresult.N3, Equals, 0) - case 2: - c.Assert(sresult.N3, Equals, 3) - c.Assert(sresult.N1+sresult.N2, Equals, 0) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - i = 0 - var mresult M - err = query.For(&mresult, func() error { - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, M{"n3": 3}) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - i = 0 - var iresult interface{} - err = query.For(&iresult, func() error { - mresult, ok := iresult.(bson.M) - c.Assert(ok, Equals, true, Commentf("%#v", iresult)) - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) - } - i++ - return nil - }) - c.Assert(err, IsNil) -} - -func (s *S) TestFindIterSnapshot(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Insane amounts of logging otherwise due to the - // amount of data being shuffled. - mgo.SetDebug(false) - defer mgo.SetDebug(true) - - coll := session.DB("mydb").C("mycoll") - - var a [1024000]byte - - for n := 0; n < 10; n++ { - err := coll.Insert(M{"_id": n, "n": n, "a1": &a}) - c.Assert(err, IsNil) - } - - query := coll.Find(M{"n": M{"$gt": -1}}).Batch(2).Prefetch(0) - query.Snapshot() - iter := query.Iter() - - seen := map[int]bool{} - result := struct { - Id int "_id" - }{} - for iter.Next(&result) { - if len(seen) == 2 { - // Grow all entries so that they have to move. - // Backwards so that the order is inverted. - for n := 10; n >= 0; n-- { - _, err := coll.Upsert(M{"_id": n}, M{"$set": M{"a2": &a}}) - c.Assert(err, IsNil) - } - } - if seen[result.Id] { - c.Fatalf("seen duplicated key: %d", result.Id) - } - seen[result.Id] = true - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestSort(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - coll.Insert(M{"a": 1, "b": 1}) - coll.Insert(M{"a": 2, "b": 2}) - coll.Insert(M{"a": 2, "b": 1}) - coll.Insert(M{"a": 0, "b": 1}) - coll.Insert(M{"a": 2, "b": 0}) - coll.Insert(M{"a": 0, "b": 2}) - coll.Insert(M{"a": 1, "b": 2}) - coll.Insert(M{"a": 0, "b": 0}) - coll.Insert(M{"a": 1, "b": 0}) - - query := coll.Find(M{}) - query.Sort("-a") // Should be ignored. - query.Sort("-b", "a") - iter := query.Iter() - - l := make([]int, 18) - r := struct{ A, B int }{} - for i := 0; i != len(l); i += 2 { - ok := iter.Next(&r) - c.Assert(ok, Equals, true) - c.Assert(err, IsNil) - l[i] = r.A - l[i+1] = r.B - } - - c.Assert(l, DeepEquals, []int{0, 2, 1, 2, 2, 2, 0, 1, 1, 1, 2, 1, 0, 0, 1, 0, 2, 0}) -} - -func (s *S) TestSortWithBadArgs(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - f1 := func() { coll.Find(nil).Sort("") } - f2 := func() { coll.Find(nil).Sort("+") } - f3 := func() { coll.Find(nil).Sort("foo", "-") } - - for _, f := range []func(){f1, f2, f3} { - c.Assert(f, PanicMatches, "Sort: empty field name") - } -} - -func (s *S) TestSortScoreText(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(mgo.Index{ - Key: []string{"$text:a", "$text:b"}, - }) - c.Assert(err, IsNil) - - err = coll.Insert(M{ - "a": "none", - "b": "twice: foo foo", - }) - c.Assert(err, IsNil) - err = coll.Insert(M{ - "a": "just once: foo", - "b": "none", - }) - c.Assert(err, IsNil) - err = coll.Insert(M{ - "a": "many: foo foo foo", - "b": "none", - }) - c.Assert(err, IsNil) - err = coll.Insert(M{ - "a": "none", - "b": "none", - "c": "ignore: foo", - }) - c.Assert(err, IsNil) - - query := coll.Find(M{"$text": M{"$search": "foo"}}) - query.Select(M{"score": M{"$meta": "textScore"}}) - query.Sort("$textScore:score") - iter := query.Iter() - - var r struct{ A, B string } - var results []string - for iter.Next(&r) { - results = append(results, r.A, r.B) - } - - c.Assert(results, DeepEquals, []string{ - "many: foo foo foo", "none", - "none", "twice: foo foo", - "just once: foo", "none", - }) -} - -func (s *S) TestPrefetching(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - mgo.SetDebug(false) - docs := make([]interface{}, 800) - for i := 0; i != 600; i++ { - docs[i] = bson.D{{"n", i}} - } - coll.Insert(docs...) - - for testi := 0; testi < 5; testi++ { - mgo.ResetStats() - - var iter *mgo.Iter - var beforeMore int - - switch testi { - case 0: // The default session value. - session.SetBatch(100) - iter = coll.Find(M{}).Iter() - beforeMore = 75 - - case 2: // Changing the session value. - session.SetBatch(100) - session.SetPrefetch(0.27) - iter = coll.Find(M{}).Iter() - beforeMore = 73 - - case 1: // Changing via query methods. - iter = coll.Find(M{}).Prefetch(0.27).Batch(100).Iter() - beforeMore = 73 - - case 3: // With prefetch on first document. - iter = coll.Find(M{}).Prefetch(1.0).Batch(100).Iter() - beforeMore = 0 - - case 4: // Without prefetch. - iter = coll.Find(M{}).Prefetch(0).Batch(100).Iter() - beforeMore = 100 - } - - pings := 0 - for batchi := 0; batchi < len(docs)/100-1; batchi++ { - c.Logf("Iterating over %d documents on batch %d", beforeMore, batchi) - var result struct{ N int } - for i := 0; i < beforeMore; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) - } - beforeMore = 99 - c.Logf("Done iterating.") - - session.Run("ping", nil) // Roundtrip to settle down. - pings++ - - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*100+pings) - - c.Logf("Iterating over one more document on batch %d", batchi) - ok := iter.Next(&result) - c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) - c.Logf("Done iterating.") - - session.Run("ping", nil) // Roundtrip to settle down. - pings++ - - stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*100+pings) - } - } -} - -func (s *S) TestSafeSetting(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Check the default - safe := session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 0) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, false) - - // Tweak it - session.SetSafe(&mgo.Safe{W: 1, WTimeout: 2, FSync: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 1) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // Reset it again. - session.SetSafe(&mgo.Safe{}) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 0) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, false) - - // Ensure safety to something more conservative. - session.SetSafe(&mgo.Safe{W: 5, WTimeout: 6, J: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 5) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 6) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, true) - - // Ensure safety to something less conservative won't change it. - session.EnsureSafe(&mgo.Safe{W: 4, WTimeout: 7}) - safe = session.Safe() - c.Assert(safe.W, Equals, 5) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 6) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, true) - - // But to something more conservative will. - session.EnsureSafe(&mgo.Safe{W: 6, WTimeout: 4, FSync: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 6) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 4) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // Even more conservative. - session.EnsureSafe(&mgo.Safe{WMode: "majority", WTimeout: 2}) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "majority") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // WMode always overrides, whatever it is, but J doesn't. - session.EnsureSafe(&mgo.Safe{WMode: "something", J: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "something") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // EnsureSafe with nil does nothing. - session.EnsureSafe(nil) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "something") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // Changing the safety of a cloned session doesn't touch the original. - clone := session.Clone() - defer clone.Close() - clone.EnsureSafe(&mgo.Safe{WMode: "foo"}) - safe = session.Safe() - c.Assert(safe.WMode, Equals, "something") -} - -func (s *S) TestSafeInsert(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - // Insert an element with a predefined key. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - mgo.ResetStats() - - // Session should be safe by default, so inserting it again must fail. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, ".*E11000 duplicate.*") - c.Assert(err.(*mgo.LastError).Code, Equals, 11000) - - // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) - - mgo.ResetStats() - - // If we disable safety, though, it won't complain. - session.SetSafe(nil) - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Must have sent a single operation this time (just the INSERT_OP) - stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 1) -} - -func (s *S) TestSafeParameters(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - // Tweak the safety parameters to something unachievable. - session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100}) - err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves|Not enough data-bearing nodes") - if !s.versionAtLeast(2, 6) { - // 2.6 turned it into a query error. - c.Assert(err.(*mgo.LastError).WTimeout, Equals, true) - } -} - -func (s *S) TestQueryErrorOne(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - result := struct { - Err string "$err" - }{} - - err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(&result) - c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") - c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( - c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) - } else { - c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) - } - - // The result should be properly unmarshalled with QueryError - c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") -} - -func (s *S) TestQueryErrorNext(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - result := struct { - Err string "$err" - }{} - - iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter() - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - - err = iter.Close() - c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") - c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( - c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) - } else { - c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) - } - c.Assert(iter.Err(), Equals, err) - - // The result should be properly unmarshalled with QueryError - c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") -} - -var indexTests = []struct { - index mgo.Index - expected M -}{{ - mgo.Index{ - Key: []string{"a"}, - Background: true, - }, - M{ - "name": "a_1", - "key": M{"a": 1}, - "ns": "mydb.mycoll", - "background": true, - }, -}, { - mgo.Index{ - Key: []string{"a", "-b"}, - Unique: true, - DropDups: true, - }, - M{ - "name": "a_1_b_-1", - "key": M{"a": 1, "b": -1}, - "ns": "mydb.mycoll", - "unique": true, - "dropDups": true, - }, -}, { - mgo.Index{ - Key: []string{"@loc_old"}, // Obsolete - Min: -500, - Max: 500, - Bits: 32, - }, - M{ - "name": "loc_old_2d", - "key": M{"loc_old": "2d"}, - "ns": "mydb.mycoll", - "min": -500, - "max": 500, - "bits": 32, - }, -}, { - mgo.Index{ - Key: []string{"$2d:loc"}, - Min: -500, - Max: 500, - Bits: 32, - }, - M{ - "name": "loc_2d", - "key": M{"loc": "2d"}, - "ns": "mydb.mycoll", - "min": -500, - "max": 500, - "bits": 32, - }, -}, { - mgo.Index{ - Key: []string{"$text:a", "$text:b"}, - Weights: map[string]int{"b": 42}, - }, - M{ - "name": "a_text_b_text", - "key": M{"_fts": "text", "_ftsx": 1}, - "ns": "mydb.mycoll", - "weights": M{"a": 1, "b": 42}, - "default_language": "english", - "language_override": "language", - "textIndexVersion": 2, - }, -}, { - mgo.Index{ - Key: []string{"$text:a"}, - DefaultLanguage: "portuguese", - LanguageOverride: "idioma", - }, - M{ - "name": "a_text", - "key": M{"_fts": "text", "_ftsx": 1}, - "ns": "mydb.mycoll", - "weights": M{"a": 1}, - "default_language": "portuguese", - "language_override": "idioma", - "textIndexVersion": 2, - }, -}, { - mgo.Index{ - Key: []string{"$text:$**"}, - }, - M{ - "name": "$**_text", - "key": M{"_fts": "text", "_ftsx": 1}, - "ns": "mydb.mycoll", - "weights": M{"$**": 1}, - "default_language": "english", - "language_override": "language", - "textIndexVersion": 2, - }, -}} - -func (s *S) TestEnsureIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - idxs := session.DB("mydb").C("system.indexes") - - for _, test := range indexTests { - err = coll.EnsureIndex(test.index) - c.Assert(err, IsNil) - - obtained := M{} - err = idxs.Find(M{"name": test.expected["name"]}).One(obtained) - c.Assert(err, IsNil) - - delete(obtained, "v") - - if s.versionAtLeast(2, 7) { - // Was deprecated in 2.6, and not being reported by 2.7+. - delete(test.expected, "dropDups") - } - - c.Assert(obtained, DeepEquals, test.expected) - - err = coll.DropIndex(test.index.Key...) - c.Assert(err, IsNil) - } -} - -func (s *S) TestEnsureIndexWithBadInfo(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(mgo.Index{}) - c.Assert(err, ErrorMatches, "invalid index key:.*") - - err = coll.EnsureIndex(mgo.Index{Key: []string{""}}) - c.Assert(err, ErrorMatches, "invalid index key:.*") -} - -func (s *S) TestEnsureIndexWithUnsafeSession(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - session.SetSafe(nil) - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Should fail since there are duplicated entries. - index := mgo.Index{ - Key: []string{"a"}, - Unique: true, - } - - err = coll.EnsureIndex(index) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") -} - -func (s *S) TestEnsureIndexKey(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("a", "-b") - c.Assert(err, IsNil) - - sysidx := session.DB("mydb").C("system.indexes") - - result1 := M{} - err = sysidx.Find(M{"name": "a_1"}).One(result1) - c.Assert(err, IsNil) - - result2 := M{} - err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) - c.Assert(err, IsNil) - - delete(result1, "v") - expected1 := M{ - "name": "a_1", - "key": M{"a": 1}, - "ns": "mydb.mycoll", - } - c.Assert(result1, DeepEquals, expected1) - - delete(result2, "v") - expected2 := M{ - "name": "a_1_b_-1", - "key": M{"a": 1, "b": -1}, - "ns": "mydb.mycoll", - } - c.Assert(result2, DeepEquals, expected2) -} - -func (s *S) TestEnsureIndexDropIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("-b") - c.Assert(err, IsNil) - - err = coll.DropIndex("-b") - c.Assert(err, IsNil) - - sysidx := session.DB("mydb").C("system.indexes") - dummy := &struct{}{} - - err = sysidx.Find(M{"name": "a_1"}).One(dummy) - c.Assert(err, IsNil) - - err = sysidx.Find(M{"name": "b_1"}).One(dummy) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.DropIndex("a") - c.Assert(err, IsNil) - - err = sysidx.Find(M{"name": "a_1"}).One(dummy) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.DropIndex("a") - c.Assert(err, ErrorMatches, "index not found.*") -} - -func (s *S) TestEnsureIndexCaching(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - mgo.ResetStats() - - // Second EnsureIndex should be cached and do nothing. - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 0) - - // Resetting the cache should make it contact the server again. - session.ResetIndexCache() - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) - - // Dropping the index should also drop the cached index key. - err = coll.DropIndex("a") - c.Assert(err, IsNil) - - mgo.ResetStats() - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) -} - -func (s *S) TestEnsureIndexGetIndexes(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("-b") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - // Obsolete. - err = coll.EnsureIndexKey("@c") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("$2d:d") - c.Assert(err, IsNil) - - // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. - session.SetBatch(2) - - indexes, err := coll.Indexes() - c.Assert(err, IsNil) - - c.Assert(indexes[0].Name, Equals, "_id_") - c.Assert(indexes[1].Name, Equals, "a_1") - c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) - c.Assert(indexes[2].Name, Equals, "b_-1") - c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) - c.Assert(indexes[3].Name, Equals, "c_2d") - c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) - c.Assert(indexes[4].Name, Equals, "d_2d") - c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) -} - -func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil) - c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) - c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: '2d'})"}}, nil) - c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: -1, e: 1})"}}, nil) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(err, IsNil) - - c.Assert(indexes[0].Name, Equals, "_id_") - c.Assert(indexes[1].Name, Equals, "a_1") - c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) - c.Assert(indexes[2].Name, Equals, "b_-1") - c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) - c.Assert(indexes[3].Name, Equals, "c_2d") - c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) - c.Assert(indexes[4].Name, Equals, "d_-1_e_1") - c.Assert(indexes[4].Key, DeepEquals, []string{"-d", "e"}) -} - -var testTTL = flag.Bool("test-ttl", false, "test TTL collections (may take 1 minute)") - -func (s *S) TestEnsureIndexExpireAfter(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - session.SetSafe(nil) - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"n": 1, "t": time.Now().Add(-120 * time.Second)}) - c.Assert(err, IsNil) - err = coll.Insert(M{"n": 2, "t": time.Now()}) - c.Assert(err, IsNil) - - // Should fail since there are duplicated entries. - index := mgo.Index{ - Key: []string{"t"}, - ExpireAfter: 1 * time.Minute, - } - - err = coll.EnsureIndex(index) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(err, IsNil) - c.Assert(indexes[1].Name, Equals, "t_1") - c.Assert(indexes[1].ExpireAfter, Equals, 1*time.Minute) - - if *testTTL { - worked := false - stop := time.Now().Add(70 * time.Second) - for time.Now().Before(stop) { - n, err := coll.Count() - c.Assert(err, IsNil) - if n == 1 { - worked = true - break - } - c.Assert(n, Equals, 2) - c.Logf("Still has 2 entries...") - time.Sleep(1 * time.Second) - } - if !worked { - c.Fatalf("TTL index didn't work") - } - } -} - -func (s *S) TestDistinct(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - var result []int - err = coll.Find(M{"n": M{"$gt": 2}}).Sort("n").Distinct("n", &result) - - sort.IntSlice(result).Sort() - c.Assert(result, DeepEquals, []int{3, 4, 6}) -} - -func (s *S) TestMapReduce(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - } - var result []struct { - Id int "_id" - Value int - } - - info, err := coll.Find(M{"n": M{"$gt": 2}}).MapReduce(job, &result) - c.Assert(err, IsNil) - c.Assert(info.InputCount, Equals, 4) - c.Assert(info.EmitCount, Equals, 4) - c.Assert(info.OutputCount, Equals, 3) - c.Assert(info.VerboseTime, IsNil) - - expected := map[int]int{3: 1, 4: 2, 6: 1} - for _, item := range result { - c.Logf("Item: %#v", &item) - c.Assert(item.Value, Equals, expected[item.Id]) - expected[item.Id] = -1 - } -} - -func (s *S) TestMapReduceFinalize(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1) }", - Reduce: "function(key, values) { return Array.sum(values) }", - Finalize: "function(key, count) { return {count: count} }", - } - var result []struct { - Id int "_id" - Value struct{ Count int } - } - _, err = coll.Find(nil).MapReduce(job, &result) - c.Assert(err, IsNil) - - expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} - for _, item := range result { - c.Logf("Item: %#v", &item) - c.Assert(item.Value.Count, Equals, expected[item.Id]) - expected[item.Id] = -1 - } -} - -func (s *S) TestMapReduceToCollection(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Out: "mr", - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.InputCount, Equals, 7) - c.Assert(info.EmitCount, Equals, 7) - c.Assert(info.OutputCount, Equals, 5) - c.Assert(info.Collection, Equals, "mr") - c.Assert(info.Database, Equals, "mydb") - - expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} - var item *struct { - Id int "_id" - Value int - } - mr := session.DB("mydb").C("mr") - iter := mr.Find(nil).Iter() - for iter.Next(&item) { - c.Logf("Item: %#v", &item) - c.Assert(item.Value, Equals, expected[item.Id]) - expected[item.Id] = -1 - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestMapReduceToOtherDb(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Out: bson.D{{"replace", "mr"}, {"db", "otherdb"}}, - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.InputCount, Equals, 7) - c.Assert(info.EmitCount, Equals, 7) - c.Assert(info.OutputCount, Equals, 5) - c.Assert(info.Collection, Equals, "mr") - c.Assert(info.Database, Equals, "otherdb") - - expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} - var item *struct { - Id int "_id" - Value int - } - mr := session.DB("otherdb").C("mr") - iter := mr.Find(nil).Iter() - for iter.Next(&item) { - c.Logf("Item: %#v", &item) - c.Assert(item.Value, Equals, expected[item.Id]) - expected[item.Id] = -1 - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestMapReduceOutOfOrder(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Out: bson.M{"a": "a", "z": "z", "replace": "mr", "db": "otherdb", "b": "b", "y": "y"}, - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.Collection, Equals, "mr") - c.Assert(info.Database, Equals, "otherdb") -} - -func (s *S) TestMapReduceScope(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - coll.Insert(M{"n": 1}) - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, x); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Scope: M{"x": 42}, - } - - var result []bson.M - _, err = coll.Find(nil).MapReduce(job, &result) - c.Assert(len(result), Equals, 1) - c.Assert(result[0]["value"], Equals, 42.0) -} - -func (s *S) TestMapReduceVerbose(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 0; i < 100; i++ { - err = coll.Insert(M{"n": i}) - c.Assert(err, IsNil) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Verbose: true, - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.VerboseTime, NotNil) -} - -func (s *S) TestMapReduceLimit(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - } - - var result []bson.M - _, err = coll.Find(nil).Limit(3).MapReduce(job, &result) - c.Assert(err, IsNil) - c.Assert(len(result), Equals, 3) -} - -func (s *S) TestBuildInfo(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - info, err := session.BuildInfo() - c.Assert(err, IsNil) - - var v []int - for i, a := range strings.Split(info.Version, ".") { - for _, token := range []string{"-rc", "-pre"} { - if i == 2 && strings.Contains(a, token) { - a = a[:strings.Index(a, token)] - info.VersionArray[len(info.VersionArray)-1] = 0 - } - } - n, err := strconv.Atoi(a) - c.Assert(err, IsNil) - v = append(v, n) - } - for len(v) < 4 { - v = append(v, 0) - } - - c.Assert(info.VersionArray, DeepEquals, v) - c.Assert(info.GitVersion, Matches, "[a-z0-9]+") - c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*") - if info.Bits != 32 && info.Bits != 64 { - c.Fatalf("info.Bits is %d", info.Bits) - } - if info.MaxObjectSize < 8192 { - c.Fatalf("info.MaxObjectSize seems too small: %d", info.MaxObjectSize) - } -} - -func (s *S) TestZeroTimeRoundtrip(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - var d struct{ T time.Time } - conn := session.DB("mydb").C("mycoll") - err = conn.Insert(d) - c.Assert(err, IsNil) - - var result bson.M - err = conn.Find(nil).One(&result) - c.Assert(err, IsNil) - t, isTime := result["t"].(time.Time) - c.Assert(isTime, Equals, true) - c.Assert(t, Equals, time.Time{}) -} - -func (s *S) TestFsyncLock(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - clone := session.Clone() - defer clone.Close() - - err = session.FsyncLock() - c.Assert(err, IsNil) - - done := make(chan time.Time) - go func() { - time.Sleep(3e9) - now := time.Now() - err := session.FsyncUnlock() - c.Check(err, IsNil) - done <- now - }() - - err = clone.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) - unlocked := time.Now() - unlocking := <-done - c.Assert(err, IsNil) - - c.Assert(unlocked.After(unlocking), Equals, true) - c.Assert(unlocked.Sub(unlocking) < 1e9, Equals, true) -} - -func (s *S) TestFsync(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Not much to do here. Just a smoke check. - err = session.Fsync(false) - c.Assert(err, IsNil) - err = session.Fsync(true) - c.Assert(err, IsNil) -} - -func (s *S) TestRepairCursor(c *C) { - if !s.versionAtLeast(2, 7) { - c.Skip("RepairCursor only works on 2.7+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - session.SetBatch(2) - - coll := session.DB("mydb").C("mycoll3") - err = coll.DropCollection() - - ns := []int{0, 10, 20, 30, 40, 50} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - repairIter := coll.Repair() - - c.Assert(repairIter.Err(), IsNil) - - result := struct{ N int }{} - resultCounts := map[int]int{} - for repairIter.Next(&result) { - resultCounts[result.N]++ - } - - c.Assert(repairIter.Next(&result), Equals, false) - c.Assert(repairIter.Err(), IsNil) - c.Assert(repairIter.Close(), IsNil) - - // Verify that the results of the repair cursor are valid. - // The repair cursor can return multiple copies - // of the same document, so to check correctness we only - // need to verify that at least 1 of each document was returned. - - for _, key := range ns { - c.Assert(resultCounts[key] > 0, Equals, true) - } -} - -func (s *S) TestPipeIter(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - pipe := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}) - - // Ensure cursor logic is working by forcing a small batch. - pipe.Batch(2) - - // Smoke test for AllowDiskUse. - pipe.AllowDiskUse() - - iter := pipe.Iter() - result := struct{ N int }{} - for i := 2; i < 7; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - } - - c.Assert(iter.Next(&result), Equals, false) - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestPipeAll(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - var result []struct{ N int } - err = coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).All(&result) - c.Assert(err, IsNil) - for i := 2; i < 7; i++ { - c.Assert(result[i-2].N, Equals, ns[i]) - } -} - -func (s *S) TestPipeOne(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ A, B int }{} - - pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) - err = pipe.One(&result) - c.Assert(err, IsNil) - c.Assert(result.A, Equals, 1) - c.Assert(result.B, Equals, 3) - - pipe = coll.Pipe([]M{{"$match": M{"a": 2}}}) - err = pipe.One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestPipeExplain(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) - - // The explain command result changes across versions. - var result struct{ Ok int } - err = pipe.Explain(&result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestBatch1Bug(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 0; i < 3; i++ { - err := coll.Insert(M{"n": i}) - c.Assert(err, IsNil) - } - - var ns []struct{ N int } - err = coll.Find(nil).Batch(1).All(&ns) - c.Assert(err, IsNil) - c.Assert(len(ns), Equals, 3) - - session.SetBatch(1) - err = coll.Find(nil).All(&ns) - c.Assert(err, IsNil) - c.Assert(len(ns), Equals, 3) -} - -func (s *S) TestInterfaceIterBug(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 0; i < 3; i++ { - err := coll.Insert(M{"n": i}) - c.Assert(err, IsNil) - } - - var result interface{} - - i := 0 - iter := coll.Find(nil).Sort("n").Iter() - for iter.Next(&result) { - c.Assert(result.(bson.M)["n"], Equals, i) - i++ - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestFindIterCloseKillsCursor(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cursors := serverCursorsOpen(session) - - coll := session.DB("mydb").C("mycoll") - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err = coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - iter := coll.Find(nil).Batch(2).Iter() - c.Assert(iter.Next(bson.M{}), Equals, true) - - c.Assert(iter.Close(), IsNil) - c.Assert(serverCursorsOpen(session), Equals, cursors) -} - -func (s *S) TestLogReplay(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - for i := 0; i < 5; i++ { - err = coll.Insert(M{"ts": time.Now()}) - c.Assert(err, IsNil) - } - - iter := coll.Find(nil).LogReplay().Iter() - if s.versionAtLeast(2, 6) { - // This used to fail in 2.4. Now it's just a smoke test. - c.Assert(iter.Err(), IsNil) - } else { - c.Assert(iter.Next(bson.M{}), Equals, false) - c.Assert(iter.Err(), ErrorMatches, "no ts field in query") - } -} - -func (s *S) TestSetCursorTimeout(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 42}) - - // This is just a smoke test. Won't wait 10 minutes for an actual timeout. - - session.SetCursorTimeout(0) - - var result struct{ N int } - iter := coll.Find(nil).Iter() - c.Assert(iter.Next(&result), Equals, true) - c.Assert(result.N, Equals, 42) - c.Assert(iter.Next(&result), Equals, false) -} - -func (s *S) TestNewIterNoServer(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - data, err := bson.Marshal(bson.M{"a": 1}) - - coll := session.DB("mydb").C("mycoll") - iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, nil) - - var result struct{ A int } - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.A, Equals, 1) - - ok = iter.Next(&result) - c.Assert(ok, Equals, false) - - c.Assert(iter.Err(), ErrorMatches, "server not available") -} - -func (s *S) TestNewIterNoServerPresetErr(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - data, err := bson.Marshal(bson.M{"a": 1}) - - coll := session.DB("mydb").C("mycoll") - iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, fmt.Errorf("my error")) - - var result struct{ A int } - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.A, Equals, 1) - - ok = iter.Next(&result) - c.Assert(ok, Equals, false) - - c.Assert(iter.Err(), ErrorMatches, "my error") -} - -// -------------------------------------------------------------------------- -// Some benchmarks that require a running database. - -func (s *S) BenchmarkFindIterRaw(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - doc := bson.D{ - {"f2", "a short string"}, - {"f3", bson.D{{"1", "one"}, {"2", 2.0}}}, - {"f4", []string{"a", "b", "c", "d", "e", "f", "g"}}, - } - - for i := 0; i < c.N+1; i++ { - err := coll.Insert(doc) - c.Assert(err, IsNil) - } - - session.SetBatch(c.N) - - var raw bson.Raw - iter := coll.Find(nil).Iter() - iter.Next(&raw) - c.ResetTimer() - i := 0 - for iter.Next(&raw) { - i++ - } - c.StopTimer() - c.Assert(iter.Err(), IsNil) - c.Assert(i, Equals, c.N) -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/socket.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/socket.go index e535183c..e2a12503 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/socket.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/socket.go @@ -28,6 +28,7 @@ package mgo import ( "errors" + "fmt" "net" "sync" "time" @@ -74,6 +75,7 @@ type queryOp struct { flags queryOpFlags replyFunc replyFunc + mode Mode options queryWrapper hasOptions bool serverTags []bson.D @@ -87,12 +89,35 @@ type queryWrapper struct { Snapshot bool "$snapshot,omitempty" ReadPreference bson.D "$readPreference,omitempty" MaxScan int "$maxScan,omitempty" + MaxTimeMS int "$maxTimeMS,omitempty" + Comment string "$comment,omitempty" } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { - if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos { + if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos { + var modeName string + switch op.mode { + case Strong: + modeName = "primary" + case Monotonic, Eventual: + modeName = "secondaryPreferred" + case PrimaryPreferred: + modeName = "primaryPreferred" + case Secondary: + modeName = "secondary" + case SecondaryPreferred: + modeName = "secondaryPreferred" + case Nearest: + modeName = "nearest" + default: + panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) + } op.hasOptions = true - op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}} + op.options.ReadPreference = make(bson.D, 0, 2) + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName}) + if len(op.serverTags) > 0 { + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags}) + } } if op.hasOptions { if op.query == nil { @@ -128,16 +153,19 @@ type insertOp struct { } type updateOp struct { - collection string // "database.collection" - selector interface{} - update interface{} - flags uint32 + Collection string `bson:"-"` // "database.collection" + Selector interface{} `bson:"q"` + Update interface{} `bson:"u"` + Flags uint32 `bson:"-"` + Multi bool `bson:"multi,omitempty"` + Upsert bool `bson:"upsert,omitempty"` } type deleteOp struct { - collection string // "database.collection" - selector interface{} - flags uint32 + Collection string `bson:"-"` // "database.collection" + Selector interface{} `bson:"q"` + Flags uint32 `bson:"-"` + Limit int `bson:"limit"` } type killCursorsOp struct { @@ -368,15 +396,15 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) { case *updateOp: buf = addHeader(buf, 2001) buf = addInt32(buf, 0) // Reserved - buf = addCString(buf, op.collection) - buf = addInt32(buf, int32(op.flags)) - debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) - buf, err = addBSON(buf, op.selector) + buf = addCString(buf, op.Collection) + buf = addInt32(buf, int32(op.Flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector) + buf, err = addBSON(buf, op.Selector) if err != nil { return err } - debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update) - buf, err = addBSON(buf, op.update) + debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update) + buf, err = addBSON(buf, op.Update) if err != nil { return err } @@ -422,10 +450,10 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) { case *deleteOp: buf = addHeader(buf, 2006) buf = addInt32(buf, 0) // Reserved - buf = addCString(buf, op.collection) - buf = addInt32(buf, int32(op.flags)) - debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) - buf, err = addBSON(buf, op.selector) + buf = addCString(buf, op.Collection) + buf = addInt32(buf, int32(op.Flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector) + buf, err = addBSON(buf, op.Selector) if err != nil { return err } diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/suite_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/suite_test.go deleted file mode 100644 index f611cfb5..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/suite_test.go +++ /dev/null @@ -1,254 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "errors" - "flag" - "fmt" - "net" - "os/exec" - "runtime" - "strconv" - "testing" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" -) - -var fast = flag.Bool("fast", false, "Skip slow tests") - -type M bson.M - -type cLogger C - -func (c *cLogger) Output(calldepth int, s string) error { - ns := time.Now().UnixNano() - t := float64(ns%100e9) / 1e9 - ((*C)(c)).Logf("[LOG] %.05f %s", t, s) - return nil -} - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct { - session *mgo.Session - stopped bool - build mgo.BuildInfo - frozen []string -} - -func (s *S) versionAtLeast(v ...int) bool { - for i := range v { - if i == len(s.build.VersionArray) { - return false - } - if s.build.VersionArray[i] < v[i] { - return false - } - } - return true -} - -var _ = Suite(&S{}) - -func (s *S) SetUpSuite(c *C) { - mgo.SetDebug(true) - mgo.SetStats(true) - s.StartAll() - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - s.build, err = session.BuildInfo() - c.Check(err, IsNil) - session.Close() -} - -func (s *S) SetUpTest(c *C) { - err := run("mongo --nodb testdb/dropall.js") - if err != nil { - panic(err.Error()) - } - mgo.SetLogger((*cLogger)(c)) - mgo.ResetStats() -} - -func (s *S) TearDownTest(c *C) { - if s.stopped { - s.StartAll() - } - for _, host := range s.frozen { - if host != "" { - s.Thaw(host) - } - } - var stats mgo.Stats - for i := 0; ; i++ { - stats = mgo.GetStats() - if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { - break - } - if i == 20 { - c.Fatal("Test left sockets in a dirty state") - } - c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) - time.Sleep(500 * time.Millisecond) - } - for i := 0; ; i++ { - stats = mgo.GetStats() - if stats.Clusters == 0 { - break - } - if i == 60 { - c.Fatal("Test left clusters alive") - } - c.Logf("Waiting for clusters to die: %d alive", stats.Clusters) - time.Sleep(1 * time.Second) - } -} - -func (s *S) Stop(host string) { - // Give a moment for slaves to sync and avoid getting rollback issues. - panicOnWindows() - time.Sleep(2 * time.Second) - err := run("cd _testdb && supervisorctl stop " + supvName(host)) - if err != nil { - panic(err) - } - s.stopped = true -} - -func (s *S) pid(host string) int { - output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput() - if err != nil { - panic(err) - } - pidstr := string(output[1 : len(output)-1]) - pid, err := strconv.Atoi(pidstr) - if err != nil { - panic("cannot convert pid to int: " + pidstr) - } - return pid -} - -func (s *S) Freeze(host string) { - err := stop(s.pid(host)) - if err != nil { - panic(err) - } - s.frozen = append(s.frozen, host) -} - -func (s *S) Thaw(host string) { - err := cont(s.pid(host)) - if err != nil { - panic(err) - } - for i, frozen := range s.frozen { - if frozen == host { - s.frozen[i] = "" - } - } -} - -func (s *S) StartAll() { - // Restart any stopped nodes. - run("cd _testdb && supervisorctl start all") - err := run("cd testdb && mongo --nodb wait.js") - if err != nil { - panic(err) - } - s.stopped = false -} - -func run(command string) error { - var output []byte - var err error - if runtime.GOOS == "windows" { - output, err = exec.Command("cmd", "/C", command).CombinedOutput() - } else { - output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput() - } - - if err != nil { - msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output)) - return errors.New(msg) - } - return nil -} - -var supvNames = map[string]string{ - "40001": "db1", - "40002": "db2", - "40011": "rs1a", - "40012": "rs1b", - "40013": "rs1c", - "40021": "rs2a", - "40022": "rs2b", - "40023": "rs2c", - "40031": "rs3a", - "40032": "rs3b", - "40033": "rs3c", - "40041": "rs4a", - "40101": "cfg1", - "40102": "cfg2", - "40103": "cfg3", - "40201": "s1", - "40202": "s2", - "40203": "s3", -} - -// supvName returns the supervisord name for the given host address. -func supvName(host string) string { - host, port, err := net.SplitHostPort(host) - if err != nil { - panic(err) - } - name, ok := supvNames[port] - if !ok { - panic("Unknown host: " + host) - } - return name -} - -func hostPort(host string) string { - _, port, err := net.SplitHostPort(host) - if err != nil { - panic(err) - } - return port -} - -func panicOnWindows() { - if runtime.GOOS == "windows" { - panic("the test suite is not yet fully supported on Windows") - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_test.go deleted file mode 100644 index b8bbd7b3..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mgo_test - -import ( - "syscall" -) - -func stop(pid int) (err error) { - return syscall.Kill(pid, syscall.SIGSTOP) -} - -func cont(pid int) (err error) { - return syscall.Kill(pid, syscall.SIGCONT) -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_windows_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_windows_test.go deleted file mode 100644 index f2deaca8..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_windows_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package mgo_test - -func stop(pid int) (err error) { - panicOnWindows() // Always does. - return nil -} - -func cont(pid int) (err error) { - panicOnWindows() // Always does. - return nil -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/client.pem b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/client.pem index cc57eec7..93aed355 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/client.pem +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/client.pem @@ -1,44 +1,57 @@ +To regenerate the key: + + openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key + cat server.key server.crt > server.pem + openssl genrsa -out client.key 2048 + openssl req -key client.key -new -out client.req + openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt + cat client.key client.crt > client.pem + -----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA -sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD -KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp -peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y -lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN -LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS -J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT -X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT -AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ -Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr -GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB -TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX -1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV -YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw -9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR -SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx -W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7 -PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g -Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu -T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL -Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh -jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh -z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57 -RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6 -DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ== +MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 +wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ +r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ +Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI +KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 +Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu +La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq +KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv +bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f +Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA +Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp +QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo +DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl +QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F +Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ ++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F +jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB +K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy +HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP +Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E +xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB +28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z +ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ +4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo +I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM -MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE -CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN -MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD -VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD -Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX -xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ -N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/ -DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV -ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV -B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip -08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG -SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/ -rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk -3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP +MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV +BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl +cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw +OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH +DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H +4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ +616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I +AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd +7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO +Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx +l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 +CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW +DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 +PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR +OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI +/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r +z3A= -----END CERTIFICATE----- + diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/dropall.js b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/dropall.js index 5b654f33..7fa39d11 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/dropall.js +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/dropall.js @@ -3,7 +3,7 @@ var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 4010 var auth = [40002, 40103, 40203, 40031] var db1 = new Mongo("localhost:40001") -if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion != "") { +if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) { ports.push(40003) auth.push(40003) } @@ -32,12 +32,12 @@ for (var i in ports) { } var result = admin.runCommand({"listDatabases": 1}) for (var j = 0; j != 100; j++) { - if (typeof result.databases != "undefined" || result.errmsg == "not master") { + if (typeof result.databases != "undefined" || notMaster(result)) { break } result = admin.runCommand({"listDatabases": 1}) } - if (result.errmsg == "not master") { + if (notMaster(result)) { continue } if (typeof result.databases == "undefined") { @@ -59,4 +59,8 @@ for (var i in ports) { } } +function notMaster(result) { + return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found")) +} + // vim:ts=4:sw=4:et diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/init.js b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/init.js index 7deb67e1..ceb75a5e 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/init.js +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/init.js @@ -33,7 +33,7 @@ for (var i = 0; i != 60; i++) { } function hasSSL() { - return db1.serverBuildInfo().OpenSSLVersion != "" + return Boolean(db1.serverBuildInfo().OpenSSLVersion) } rs1a.runCommand({replSetInitiate: rs1cfg}) @@ -58,14 +58,30 @@ function configAuth() { addrs.push("127.0.0.1:40003") } for (var i in addrs) { + print("Configuring auth for", addrs[i]) var db = new Mongo(addrs[i]).getDB("admin") var v = db.serverBuildInfo().versionArray + var timedOut = false if (v < [2, 5]) { db.addUser("root", "rapadura") } else { - db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) + try { + db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) + } catch (err) { + // 3.2 consistently fails replication of creds on 40031 (config server) + print("createUser command returned an error: " + err) + if (String(err).indexOf("timed out") >= 0) { + timedOut = true; + } + } + } + for (var i = 0; i < 60; i++) { + var ok = db.auth("root", "rapadura") + if (ok || !timedOut) { + break + } + sleep(1000); } - db.auth("root", "rapadura") if (v >= [2, 6]) { db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) } else if (v >= [2, 4]) { @@ -79,14 +95,21 @@ function configAuth() { function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 + var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 + if (m.state == 1) { + primary = 1 + } } } } + if (primary == 0) { + count = 0 + } return count } @@ -96,7 +119,6 @@ for (var i = 0; i != 60; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { - sleep(2000) configShards() configAuth() quit(0) diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/server.pem b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/server.pem index 16fbef16..487b92d6 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/server.pem +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/server.pem @@ -1,33 +1,50 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB +Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk +mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi +xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb +YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R +ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs +uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 +wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu +MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi +wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby +yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk +eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 +ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC +tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB +xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 +MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 +Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 +IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q +Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl +QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z +GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do +4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 +ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 +1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt +9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk +SruEA1+5bfBRMW0P+h7Qfe4= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV -BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP -MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx -MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN -R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx -EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA -pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0 -fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR -q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV -HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB -rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x -DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ -BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI -hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO -7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC -sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE= +MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP +MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw +ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM +A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl +cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm +6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK +IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 +GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji +fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP +JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd +OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu +2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG +TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw +nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s +UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C +W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL +yQ== -----END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4 -ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw -fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB -AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd -SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o -aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA -yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG -WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL -mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r -YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+ -PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD -UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv -I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA== ------END RSA PRIVATE KEY----- diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/setup.sh b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/setup.sh index 317e8e5a..a121847e 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/setup.sh +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/setup.sh @@ -15,7 +15,7 @@ start() { echo "Running supervisord..." supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) echo "Supervisord is up, starting $COUNT processes..." - for i in $(seq 10); do + for i in $(seq 30); do RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') echo "$RUNNING processes running..." if [ x$COUNT = x$RUNNING ]; then diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/supervisord.conf b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/supervisord.conf index 1c2b859a..724eaa79 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/supervisord.conf +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/supervisord.conf @@ -14,13 +14,13 @@ serverurl = http://127.0.0.1:9001 supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001 +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1,::1 --port 40001 --ipv6 [program:db2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth [program:db3] -command = mongod -nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem [program:rs1a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 @@ -57,9 +57,12 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz [program:s1] command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 +startretries = 10 [program:s2] command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 +startretries = 10 [program:s3] command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile +startretries = 10 diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/wait.js b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/wait.js index de0d6607..2735d0e5 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/wait.js +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/wait.js @@ -32,20 +32,27 @@ for (var i = 0; i != 60; i++) { function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 + var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 + if (m.state == 1) { + primary = 1 + } } } } + if (primary == 0) { + count = 0 + } return count } var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length -for (var i = 0; i != 60; i++) { +for (var i = 0; i != 90; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { @@ -56,3 +63,5 @@ for (var i = 0; i != 60; i++) { print("Replica sets didn't sync up properly.") quit(12) + +// vim:ts=4:sw=4:et diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testserver/testserver.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testserver/testserver.go new file mode 100644 index 00000000..21834ccd --- /dev/null +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/testserver/testserver.go @@ -0,0 +1,168 @@ +// WARNING: This package was replaced by mgo.v2/dbtest. +package testserver + +import ( + "bytes" + "fmt" + "net" + "os" + "os/exec" + "strconv" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" + "gopkg.in/tomb.v2" +) + +// WARNING: This package was replaced by mgo.v2/dbtest. +type TestServer struct { + session *mgo.Session + output bytes.Buffer + server *exec.Cmd + dbpath string + host string + tomb tomb.Tomb +} + +// WARNING: This package was replaced by mgo.v2/dbtest. +func (ts *TestServer) SetPath(dbpath string) { + ts.dbpath = dbpath +} + +func (ts *TestServer) start() { + if ts.server != nil { + panic("TestServer already started") + } + if ts.dbpath == "" { + panic("TestServer.SetPath must be called before using the server") + } + mgo.SetStats(true) + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic("unable to listen on a local address: " + err.Error()) + } + addr := l.Addr().(*net.TCPAddr) + l.Close() + ts.host = addr.String() + + args := []string{ + "--dbpath", ts.dbpath, + "--bind_ip", "127.0.0.1", + "--port", strconv.Itoa(addr.Port), + "--nssize", "1", + "--noprealloc", + "--smallfiles", + "--nojournal", + } + ts.tomb = tomb.Tomb{} + ts.server = exec.Command("mongod", args...) + ts.server.Stdout = &ts.output + ts.server.Stderr = &ts.output + err = ts.server.Start() + if err != nil { + panic(err) + } + ts.tomb.Go(ts.monitor) + ts.Wipe() +} + +func (ts *TestServer) monitor() error { + ts.server.Process.Wait() + if ts.tomb.Alive() { + // Present some debugging information. + fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") + fmt.Fprintf(os.Stderr, "%s", ts.output.Bytes()) + fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") + cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + cmd.Run() + fmt.Fprintf(os.Stderr, "----------------------------------------\n") + + panic("mongod process died unexpectedly") + } + return nil +} + +// WARNING: This package was replaced by mgo.v2/dbtest. +func (ts *TestServer) Stop() { + if ts.session != nil { + ts.checkSessions() + if ts.session != nil { + ts.session.Close() + ts.session = nil + } + } + if ts.server != nil { + ts.tomb.Kill(nil) + ts.server.Process.Kill() + select { + case <-ts.tomb.Dead(): + case <-time.After(5 * time.Second): + panic("timeout waiting for mongod process to die") + } + ts.server = nil + } +} + +// WARNING: This package was replaced by mgo.v2/dbtest. +func (ts *TestServer) Session() *mgo.Session { + if ts.server == nil { + ts.start() + } + if ts.session == nil { + mgo.ResetStats() + var err error + ts.session, err = mgo.Dial(ts.host + "/test") + if err != nil { + panic(err) + } + } + return ts.session.Copy() +} + +// WARNING: This package was replaced by mgo.v2/dbtest. +func (ts *TestServer) checkSessions() { + if check := os.Getenv("CHECK_SESSIONS"); check == "0" || ts.server == nil || ts.session == nil { + return + } + ts.session.Close() + ts.session = nil + for i := 0; i < 100; i++ { + stats := mgo.GetStats() + if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { + return + } + time.Sleep(100 * time.Millisecond) + } + panic("There are mgo sessions still alive.") +} + +// WARNING: This package was replaced by mgo.v2/dbtest. +func (ts *TestServer) Wipe() { + if ts.server == nil || ts.session == nil { + return + } + ts.checkSessions() + sessionUnset := ts.session == nil + session := ts.Session() + defer session.Close() + if sessionUnset { + ts.session.Close() + ts.session = nil + } + names, err := session.DatabaseNames() + if err != nil { + panic(err) + } + for _, name := range names { + switch name { + case "admin", "local", "config": + default: + err = session.DB(name).DropDatabase() + if err != nil { + panic(err) + } + } + } +} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/dockey_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/dockey_test.go deleted file mode 100644 index e8dee952..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/dockey_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package txn - -import ( - "sort" - - . "gopkg.in/check.v1" -) - -type DocKeySuite struct{} - -var _ = Suite(&DocKeySuite{}) - -type T struct { - A int - B string -} - -type T2 struct { - A int - B string -} - -type T3 struct { - A int - B string -} - -type T4 struct { - A int - B string -} - -type T5 struct { - F int - Q string -} - -type T6 struct { - A int - B string -} - -type T7 struct { - A bool - B float64 -} - -type T8 struct { - A int - B string -} - -type T9 struct { - A int - B string - C bool -} - -type T10 struct { - C int `bson:"a"` - D string `bson:"b,omitempty"` -} - -type T11 struct { - C int - D string -} - -type T12 struct { - S string -} - -type T13 struct { - p, q, r bool - S string -} - -var docKeysTests = [][]docKeys{ - {{ - {"c", 1}, - {"c", 5}, - {"c", 2}, - }, { - {"c", 1}, - {"c", 2}, - {"c", 5}, - }}, {{ - {"c", "foo"}, - {"c", "bar"}, - {"c", "bob"}, - }, { - {"c", "bar"}, - {"c", "bob"}, - {"c", "foo"}, - }}, {{ - {"c", 0.2}, - {"c", 0.07}, - {"c", 0.9}, - }, { - {"c", 0.07}, - {"c", 0.2}, - {"c", 0.9}, - }}, {{ - {"c", true}, - {"c", false}, - {"c", true}, - }, { - {"c", false}, - {"c", true}, - {"c", true}, - }}, {{ - {"c", T{1, "b"}}, - {"c", T{1, "a"}}, - {"c", T{0, "b"}}, - {"c", T{0, "a"}}, - }, { - {"c", T{0, "a"}}, - {"c", T{0, "b"}}, - {"c", T{1, "a"}}, - {"c", T{1, "b"}}, - }}, {{ - {"c", T{1, "a"}}, - {"c", T{0, "a"}}, - }, { - {"c", T{0, "a"}}, - {"c", T{1, "a"}}, - }}, {{ - {"c", T3{0, "b"}}, - {"c", T2{1, "b"}}, - {"c", T3{1, "a"}}, - {"c", T2{0, "a"}}, - }, { - {"c", T2{0, "a"}}, - {"c", T3{0, "b"}}, - {"c", T3{1, "a"}}, - {"c", T2{1, "b"}}, - }}, {{ - {"c", T5{1, "b"}}, - {"c", T4{1, "b"}}, - {"c", T5{0, "a"}}, - {"c", T4{0, "a"}}, - }, { - {"c", T4{0, "a"}}, - {"c", T5{0, "a"}}, - {"c", T4{1, "b"}}, - {"c", T5{1, "b"}}, - }}, {{ - {"c", T6{1, "b"}}, - {"c", T7{true, 0.2}}, - {"c", T6{0, "a"}}, - {"c", T7{false, 0.04}}, - }, { - {"c", T6{0, "a"}}, - {"c", T6{1, "b"}}, - {"c", T7{false, 0.04}}, - {"c", T7{true, 0.2}}, - }}, {{ - {"c", T9{1, "b", true}}, - {"c", T8{1, "b"}}, - {"c", T9{0, "a", false}}, - {"c", T8{0, "a"}}, - }, { - {"c", T9{0, "a", false}}, - {"c", T8{0, "a"}}, - {"c", T9{1, "b", true}}, - {"c", T8{1, "b"}}, - }}, {{ - {"b", 2}, - {"a", 5}, - {"c", 2}, - {"b", 1}, - }, { - {"a", 5}, - {"b", 1}, - {"b", 2}, - {"c", 2}, - }}, {{ - {"c", T11{1, "a"}}, - {"c", T11{1, "a"}}, - {"c", T10{1, "a"}}, - }, { - {"c", T10{1, "a"}}, - {"c", T11{1, "a"}}, - {"c", T11{1, "a"}}, - }}, {{ - {"c", T12{"a"}}, - {"c", T13{false, true, false, "a"}}, - {"c", T12{"b"}}, - {"c", T13{false, true, false, "b"}}, - }, { - {"c", T12{"a"}}, - {"c", T13{false, true, false, "a"}}, - {"c", T12{"b"}}, - {"c", T13{false, true, false, "b"}}, - }}, -} - -func (s *DocKeySuite) TestSort(c *C) { - for _, test := range docKeysTests { - keys := test[0] - expected := test[1] - sort.Sort(keys) - c.Check(keys, DeepEquals, expected) - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/flusher.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/flusher.go index a66a8e78..78179422 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/flusher.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/flusher.go @@ -395,12 +395,15 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) revno := make(map[docKey]int64) info := txnInfo{} for _, dkey := range dkeys { - retry := 0 + const retries = 3 + retry := -1 RetryDoc: + retry++ c := f.tc.Database.C(dkey.C) if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound { // Document is missing. Look in stash. + chaos("") if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound { // Stash also doesn't exist. Maybe someone applied it. if err := f.reload(t); err != nil { @@ -409,8 +412,7 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) return t.Revnos, err } // Not applying either. - retry++ - if retry < 3 { + if retry < retries { // Retry since there might be an insert/remove race. goto RetryDoc } @@ -451,13 +453,28 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) } f.queue[dkey] = info.Queue if !found { - // Previously set txn-queue was popped by someone. - // Transaction is being/has been applied elsewhere. + // Rescanned transaction id was not in the queue. This could mean one + // of three things: + // 1) The transaction was applied and popped by someone else. This is + // the common case. + // 2) We've read an out-of-date queue from the stash. This can happen + // when someone else was paused for a long while preparing another + // transaction for this document, and improperly upserted to the + // stash when unpaused (after someone else inserted the document). + // This is rare but possible. + // 3) There's an actual bug somewhere, or outside interference. Worst + // possible case. f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue) err := f.reload(t) if t.State == tpreparing || t.State == tprepared { - panic("rescanned document misses transaction in queue") + if retry < retries { + // Case 2. + goto RetryDoc + } + // Case 3. + return nil, fmt.Errorf("cannot find transaction %s in queue for document %v", t, dkey) } + // Case 1. return t.Revnos, err } } diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/mgo_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/mgo_test.go deleted file mode 100644 index 66ce7336..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/mgo_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package txn_test - -import ( - "bytes" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - . "gopkg.in/check.v1" - "os/exec" - "time" -) - -// ---------------------------------------------------------------------------- -// The mgo test suite - -type MgoSuite struct { - output bytes.Buffer - server *exec.Cmd - session *mgo.Session -} - -var mgoaddr = "127.0.0.1:50017" - -func (s *MgoSuite) SetUpSuite(c *C) { - //mgo.SetDebug(true) - mgo.SetStats(true) - dbdir := c.MkDir() - args := []string{ - "--dbpath", dbdir, - "--bind_ip", "127.0.0.1", - "--port", "50017", - "--nssize", "1", - "--noprealloc", - "--smallfiles", - "--nojournal", - "-vvvvv", - } - s.server = exec.Command("mongod", args...) - s.server.Stdout = &s.output - s.server.Stderr = &s.output - err := s.server.Start() - if err != nil { - panic(err) - } -} - -func (s *MgoSuite) TearDownSuite(c *C) { - s.server.Process.Kill() - s.server.Process.Wait() -} - -func (s *MgoSuite) SetUpTest(c *C) { - err := DropAll(mgoaddr) - if err != nil { - panic(err) - } - mgo.SetLogger(c) - mgo.ResetStats() - - s.session, err = mgo.Dial(mgoaddr) - c.Assert(err, IsNil) -} - -func (s *MgoSuite) TearDownTest(c *C) { - if s.session != nil { - s.session.Close() - } - for i := 0; ; i++ { - stats := mgo.GetStats() - if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { - break - } - if i == 20 { - c.Fatal("Test left sockets in a dirty state") - } - c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) - time.Sleep(500 * time.Millisecond) - } -} - -func DropAll(mongourl string) (err error) { - session, err := mgo.Dial(mongourl) - if err != nil { - return err - } - defer session.Close() - - names, err := session.DatabaseNames() - if err != nil { - return err - } - for _, name := range names { - switch name { - case "admin", "local", "config": - default: - err = session.DB(name).DropDatabase() - if err != nil { - return err - } - } - } - return nil -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/sim_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/sim_test.go deleted file mode 100644 index 9a9e88f1..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/sim_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package txn_test - -import ( - "flag" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn" - . "gopkg.in/check.v1" - "math/rand" - "time" -) - -var ( - duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation") - seed = flag.Int64("seed", 0, "seed for rand") -) - -type params struct { - killChance float64 - slowdownChance float64 - slowdown time.Duration - - unsafe bool - workers int - accounts int - changeHalf bool - reinsertCopy bool - reinsertZeroed bool - changelog bool - - changes int -} - -func (s *S) TestSim1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 4, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSim4WorkersDense(c *C) { - simulate(c, params{ - workers: 4, - accounts: 2, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSim4WorkersSparse(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimHalf1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 4, - changeHalf: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimHalf4WorkersDense(c *C) { - simulate(c, params{ - workers: 4, - accounts: 2, - changeHalf: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimHalf4WorkersSparse(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - changeHalf: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertCopy1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 10, - reinsertCopy: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertCopy4Workers(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - reinsertCopy: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertZeroed1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 10, - reinsertZeroed: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertZeroed4Workers(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - reinsertZeroed: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimChangeLog(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - changelog: true, - }) -} - -type balanceChange struct { - id bson.ObjectId - origin int - target int - amount int -} - -func simulate(c *C, params params) { - seed := *seed - if seed == 0 { - seed = time.Now().UnixNano() - } - rand.Seed(seed) - c.Logf("Seed: %v", seed) - - txn.SetChaos(txn.Chaos{ - KillChance: params.killChance, - SlowdownChance: params.slowdownChance, - Slowdown: params.slowdown, - }) - defer txn.SetChaos(txn.Chaos{}) - - session, err := mgo.Dial(mgoaddr) - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("test") - tc := db.C("tc") - - runner := txn.NewRunner(tc) - - tclog := db.C("tc.log") - if params.changelog { - info := mgo.CollectionInfo{ - Capped: true, - MaxBytes: 1000000, - } - err := tclog.Create(&info) - c.Assert(err, IsNil) - runner.ChangeLog(tclog) - } - - accounts := db.C("accounts") - for i := 0; i < params.accounts; i++ { - err := accounts.Insert(M{"_id": i, "balance": 300}) - c.Assert(err, IsNil) - } - var stop time.Time - if params.changes <= 0 { - stop = time.Now().Add(*duration) - } - - max := params.accounts - if params.reinsertCopy || params.reinsertZeroed { - max = int(float64(params.accounts) * 1.5) - } - - changes := make(chan balanceChange, 1024) - - //session.SetMode(mgo.Eventual, true) - for i := 0; i < params.workers; i++ { - go func() { - n := 0 - for { - if n > 0 && n == params.changes { - break - } - if !stop.IsZero() && time.Now().After(stop) { - break - } - - change := balanceChange{ - id: bson.NewObjectId(), - origin: rand.Intn(max), - target: rand.Intn(max), - amount: 100, - } - - var old Account - var oldExists bool - if params.reinsertCopy || params.reinsertZeroed { - if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound { - c.Check(err, IsNil) - change.amount = old.Balance - oldExists = true - } - } - - var ops []txn.Op - switch { - case params.reinsertCopy && oldExists: - ops = []txn.Op{{ - C: "accounts", - Id: change.origin, - Assert: M{"balance": change.amount}, - Remove: true, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocMissing, - Insert: M{"balance": change.amount}, - }} - case params.reinsertZeroed && oldExists: - ops = []txn.Op{{ - C: "accounts", - Id: change.target, - Assert: txn.DocMissing, - Insert: M{"balance": 0}, - }, { - C: "accounts", - Id: change.origin, - Assert: M{"balance": change.amount}, - Remove: true, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocExists, - Update: M{"$inc": M{"balance": change.amount}}, - }} - case params.changeHalf: - ops = []txn.Op{{ - C: "accounts", - Id: change.origin, - Assert: M{"balance": M{"$gte": change.amount}}, - Update: M{"$inc": M{"balance": -change.amount / 2}}, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocExists, - Update: M{"$inc": M{"balance": change.amount / 2}}, - }, { - C: "accounts", - Id: change.origin, - Update: M{"$inc": M{"balance": -change.amount / 2}}, - }, { - C: "accounts", - Id: change.target, - Update: M{"$inc": M{"balance": change.amount / 2}}, - }} - default: - ops = []txn.Op{{ - C: "accounts", - Id: change.origin, - Assert: M{"balance": M{"$gte": change.amount}}, - Update: M{"$inc": M{"balance": -change.amount}}, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocExists, - Update: M{"$inc": M{"balance": change.amount}}, - }} - } - - err = runner.Run(ops, change.id, nil) - if err != nil && err != txn.ErrAborted && err != txn.ErrChaos { - c.Check(err, IsNil) - } - n++ - changes <- change - } - changes <- balanceChange{} - }() - } - - alive := params.workers - changeLog := make([]balanceChange, 0, 1024) - for alive > 0 { - change := <-changes - if change.id == "" { - alive-- - } else { - changeLog = append(changeLog, change) - } - } - c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted.")) - - txn.SetChaos(txn.Chaos{}) - err = runner.ResumeAll() - c.Assert(err, IsNil) - - n, err := accounts.Count() - c.Check(err, IsNil) - c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed.")) - - n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count() - c.Check(err, IsNil) - c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n)) - - globalBalance := 0 - iter := accounts.Find(nil).Iter() - account := Account{} - for iter.Next(&account) { - globalBalance += account.Balance - } - c.Check(iter.Close(), IsNil) - c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant.")) - - // Compute and verify the exact final state of all accounts. - balance := make(map[int]int) - for i := 0; i < params.accounts; i++ { - balance[i] += 300 - } - var applied, aborted int - for _, change := range changeLog { - err := runner.Resume(change.id) - if err == txn.ErrAborted { - aborted++ - continue - } else if err != nil { - c.Fatalf("resuming %s failed: %v", change.id, err) - } - balance[change.origin] -= change.amount - balance[change.target] += change.amount - applied++ - } - iter = accounts.Find(nil).Iter() - for iter.Next(&account) { - c.Assert(account.Balance, Equals, balance[account.Id]) - } - c.Check(iter.Close(), IsNil) - c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted) - - if params.changelog { - n, err := tclog.Count() - c.Assert(err, IsNil) - // Check if the capped collection is full. - dummy := make([]byte, 1024) - tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy}) - m, err := tclog.Count() - c.Assert(err, IsNil) - if m == n+1 { - // Wasn't full, so it must have seen it all. - c.Assert(err, IsNil) - c.Assert(n, Equals, applied) - } - } -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/tarjan_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/tarjan_test.go deleted file mode 100644 index 52c9007c..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/tarjan_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package txn - -import ( - "fmt" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" -) - -type TarjanSuite struct{} - -var _ = Suite(TarjanSuite{}) - -func bid(n int) bson.ObjectId { - return bson.ObjectId(fmt.Sprintf("%024d", n)) -} - -func bids(ns ...int) (ids []bson.ObjectId) { - for _, n := range ns { - ids = append(ids, bid(n)) - } - return -} - -func (TarjanSuite) TestExample(c *C) { - successors := map[bson.ObjectId][]bson.ObjectId{ - bid(1): bids(2, 3), - bid(2): bids(1, 5), - bid(3): bids(4), - bid(4): bids(3, 5), - bid(5): bids(6), - bid(6): bids(7), - bid(7): bids(8), - bid(8): bids(6, 9), - bid(9): bids(), - } - - c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{ - bids(9), - bids(6, 7, 8), - bids(5), - bids(3, 4), - bids(1, 2), - }) -} diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn.go index b0c1d753..15eb7295 100644 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn.go +++ b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn.go @@ -14,9 +14,10 @@ import ( "strings" "sync" - crand "crypto/rand" "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" + + crand "crypto/rand" mrand "math/rand" ) @@ -381,28 +382,51 @@ func (r *Runner) ChangeLog(logc *mgo.Collection) { func (r *Runner) PurgeMissing(collections ...string) error { type M map[string]interface{} type S []interface{} - pipeline := []M{ - {"$project": M{"_id": 1, "txn-queue": 1}}, - {"$unwind": "$txn-queue"}, - {"$sort": M{"_id": 1, "txn-queue": 1}}, - //{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}}, - } - type TRef struct { - DocId interface{} "_id" - TxnId string "txn-queue" + type TDoc struct { + Id interface{} "_id" + TxnQueue []string "txn-queue" } found := make(map[bson.ObjectId]bool) - colls := make(map[string]bool) sort.Strings(collections) for _, collection := range collections { c := r.tc.Database.C(collection) - iter := c.Pipe(pipeline).Iter() - var tref TRef - for iter.Next(&tref) { - txnId := bson.ObjectIdHex(tref.TxnId[:24]) + iter := c.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() + var tdoc TDoc + for iter.Next(&tdoc) { + for _, txnToken := range tdoc.TxnQueue { + txnId := bson.ObjectIdHex(txnToken[:24]) + if found[txnId] { + continue + } + if r.tc.FindId(txnId).One(nil) == nil { + found[txnId] = true + continue + } + logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tdoc.Id, txnId) + err := c.UpdateId(tdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) + if err != nil { + return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) + } + } + } + if err := iter.Close(); err != nil { + return fmt.Errorf("transaction queue iteration error for %s: %v", collection, err) + } + } + + type StashTDoc struct { + Id docKey "_id" + TxnQueue []string "txn-queue" + } + + iter := r.sc.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() + var stdoc StashTDoc + for iter.Next(&stdoc) { + for _, txnToken := range stdoc.TxnQueue { + txnId := bson.ObjectIdHex(txnToken[:24]) if found[txnId] { continue } @@ -410,36 +434,15 @@ func (r *Runner) PurgeMissing(collections ...string) error { found[txnId] = true continue } - logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId) - err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) + logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stdoc.Id.C, stdoc.Id.Id, txnId) + err := r.sc.UpdateId(stdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) if err != nil { return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) } } - colls[collection] = true - } - - type StashTRef struct { - Id docKey "_id" - TxnId string "txn-queue" } - - iter := r.sc.Pipe(pipeline).Iter() - var stref StashTRef - for iter.Next(&stref) { - txnId := bson.ObjectIdHex(stref.TxnId[:24]) - if found[txnId] { - continue - } - if r.tc.FindId(txnId).One(nil) == nil { - found[txnId] = true - continue - } - logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId) - err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) - if err != nil { - return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) - } + if err := iter.Close(); err != nil { + return fmt.Errorf("transaction stash iteration error: %v", err) } return nil diff --git a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn_test.go b/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn_test.go deleted file mode 100644 index 97543c71..00000000 --- a/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn_test.go +++ /dev/null @@ -1,627 +0,0 @@ -package txn_test - -import ( - "sync" - "testing" - "time" - - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" - "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/txn" - . "gopkg.in/check.v1" -) - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct { - MgoSuite - - db *mgo.Database - tc, sc *mgo.Collection - accounts *mgo.Collection - runner *txn.Runner -} - -var _ = Suite(&S{}) - -type M map[string]interface{} - -func (s *S) SetUpTest(c *C) { - txn.SetChaos(txn.Chaos{}) - txn.SetLogger(c) - txn.SetDebug(true) - s.MgoSuite.SetUpTest(c) - - s.db = s.session.DB("test") - s.tc = s.db.C("tc") - s.sc = s.db.C("tc.stash") - s.accounts = s.db.C("accounts") - s.runner = txn.NewRunner(s.tc) -} - -func (s *S) TearDownTest(c *C) { - txn.SetLogger(nil) - txn.SetDebug(false) -} - -type Account struct { - Id int `bson:"_id"` - Balance int -} - -func (s *S) TestDocExists(c *C) { - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - exists := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: txn.DocExists, - }} - missing := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: txn.DocMissing, - }} - - err = s.runner.Run(exists, "", nil) - c.Assert(err, IsNil) - err = s.runner.Run(missing, "", nil) - c.Assert(err, Equals, txn.ErrAborted) - - err = s.accounts.RemoveId(0) - c.Assert(err, IsNil) - - err = s.runner.Run(exists, "", nil) - c.Assert(err, Equals, txn.ErrAborted) - err = s.runner.Run(missing, "", nil) - c.Assert(err, IsNil) -} - -func (s *S) TestInsert(c *C) { - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: M{"balance": 200}, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) - - ops[0].Id = 1 - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(1).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 200) -} - -func (s *S) TestInsertStructID(c *C) { - type id struct { - FirstName string - LastName string - } - ops := []txn.Op{{ - C: "accounts", - Id: id{FirstName: "John", LastName: "Jones"}, - Assert: txn.DocMissing, - Insert: M{"balance": 200}, - }, { - C: "accounts", - Id: id{FirstName: "Sally", LastName: "Smith"}, - Assert: txn.DocMissing, - Insert: M{"balance": 800}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - n, err := s.accounts.Find(nil).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestRemove(c *C) { - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Remove: true, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(0).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) -} - -func (s *S) TestUpdate(c *C) { - var err error - err = s.accounts.Insert(M{"_id": 0, "balance": 200}) - c.Assert(err, IsNil) - err = s.accounts.Insert(M{"_id": 1, "balance": 200}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) - - ops[0].Id = 1 - - err = s.accounts.FindId(1).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 200) -} - -func (s *S) TestInsertUpdate(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 200}, - }, { - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 400) -} - -func (s *S) TestUpdateInsert(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }, { - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 200}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 200) - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) -} - -func (s *S) TestInsertRemoveInsert(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 200}, - }, { - C: "accounts", - Id: 0, - Remove: true, - }, { - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 300}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) -} - -func (s *S) TestQueueStashing(c *C) { - txn.SetChaos(txn.Chaos{ - KillChance: 1, - Breakpoint: "set-applying", - }) - - opses := [][]txn.Op{{{ - C: "accounts", - Id: 0, - Insert: M{"balance": 100}, - }}, {{ - C: "accounts", - Id: 0, - Remove: true, - }}, {{ - C: "accounts", - Id: 0, - Insert: M{"balance": 200}, - }}, {{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }}} - - var last bson.ObjectId - for _, ops := range opses { - last = bson.NewObjectId() - err := s.runner.Run(ops, last, nil) - c.Assert(err, Equals, txn.ErrChaos) - } - - txn.SetChaos(txn.Chaos{}) - err := s.runner.Resume(last) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) -} - -func (s *S) TestInfo(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: txn.DocMissing, - }} - - id := bson.NewObjectId() - err := s.runner.Run(ops, id, M{"n": 42}) - c.Assert(err, IsNil) - - var t struct{ I struct{ N int } } - err = s.tc.FindId(id).One(&t) - c.Assert(err, IsNil) - c.Assert(t.I.N, Equals, 42) -} - -func (s *S) TestErrors(c *C) { - doc := bson.M{"foo": 1} - tests := []txn.Op{{ - C: "c", - Id: 0, - }, { - C: "c", - Id: 0, - Insert: doc, - Remove: true, - }, { - C: "c", - Id: 0, - Insert: doc, - Update: doc, - }, { - C: "c", - Id: 0, - Update: doc, - Remove: true, - }, { - C: "c", - Assert: doc, - }, { - Id: 0, - Assert: doc, - }} - - txn.SetChaos(txn.Chaos{KillChance: 1.0}) - for _, op := range tests { - c.Logf("op: %v", op) - err := s.runner.Run([]txn.Op{op}, "", nil) - c.Assert(err, ErrorMatches, "error in transaction op 0: .*") - } -} - -func (s *S) TestAssertNestedOr(c *C) { - // Assert uses $or internally. Ensure nesting works. - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}}, - Update: bson.D{{"$inc", bson.D{{"balance", 100}}}}, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 400) -} - -func (s *S) TestVerifyFieldOrdering(c *C) { - // Used to have a map in certain operations, which means - // the ordering of fields would be messed up. - fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}} - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: fields, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var d bson.D - err = s.accounts.FindId(0).One(&d) - c.Assert(err, IsNil) - - var filtered bson.D - for _, e := range d { - switch e.Name { - case "a", "b", "c": - filtered = append(filtered, e) - } - } - c.Assert(filtered, DeepEquals, fields) -} - -func (s *S) TestChangeLog(c *C) { - chglog := s.db.C("chglog") - s.runner.ChangeLog(chglog) - - ops := []txn.Op{{ - C: "debts", - Id: 0, - Assert: txn.DocMissing, - }, { - C: "accounts", - Id: 0, - Insert: M{"balance": 300}, - }, { - C: "accounts", - Id: 1, - Insert: M{"balance": 300}, - }, { - C: "people", - Id: "joe", - Insert: M{"accounts": []int64{0, 1}}, - }} - id := bson.NewObjectId() - err := s.runner.Run(ops, id, nil) - c.Assert(err, IsNil) - - type IdList []interface{} - type Log struct { - Docs IdList "d" - Revnos []int64 "r" - } - var m map[string]*Log - err = chglog.FindId(id).One(&m) - c.Assert(err, IsNil) - - c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}}) - c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}}) - c.Assert(m["debts"], IsNil) - - ops = []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }, { - C: "accounts", - Id: 1, - Update: M{"$inc": M{"balance": 100}}, - }} - id = bson.NewObjectId() - err = s.runner.Run(ops, id, nil) - c.Assert(err, IsNil) - - m = nil - err = chglog.FindId(id).One(&m) - c.Assert(err, IsNil) - - c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}}) - c.Assert(m["people"], IsNil) - - ops = []txn.Op{{ - C: "accounts", - Id: 0, - Remove: true, - }, { - C: "people", - Id: "joe", - Remove: true, - }} - id = bson.NewObjectId() - err = s.runner.Run(ops, id, nil) - c.Assert(err, IsNil) - - m = nil - err = chglog.FindId(id).One(&m) - c.Assert(err, IsNil) - - c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}}) - c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}}) -} - -func (s *S) TestPurgeMissing(c *C) { - txn.SetChaos(txn.Chaos{ - KillChance: 1, - Breakpoint: "set-applying", - }) - - err := s.accounts.Insert(M{"_id": 0, "balance": 100}) - c.Assert(err, IsNil) - err = s.accounts.Insert(M{"_id": 1, "balance": 100}) - c.Assert(err, IsNil) - - ops1 := []txn.Op{{ - C: "accounts", - Id: 3, - Insert: M{"balance": 100}, - }} - - ops2 := []txn.Op{{ - C: "accounts", - Id: 0, - Remove: true, - }, { - C: "accounts", - Id: 1, - Update: M{"$inc": M{"balance": 100}}, - }, { - C: "accounts", - Id: 2, - Insert: M{"balance": 100}, - }} - - first := bson.NewObjectId() - c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex()) - err = s.runner.Run(ops1, first, nil) - c.Assert(err, Equals, txn.ErrChaos) - - last := bson.NewObjectId() - c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex()) - err = s.runner.Run(ops2, last, nil) - c.Assert(err, Equals, txn.ErrChaos) - - c.Logf("---- Removing transaction %q", last.Hex()) - err = s.tc.RemoveId(last) - c.Assert(err, IsNil) - - c.Logf("---- Disabling chaos and attempting to resume all") - txn.SetChaos(txn.Chaos{}) - err = s.runner.ResumeAll() - c.Assert(err, IsNil) - - again := bson.NewObjectId() - c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex()) - err = s.runner.Run(ops2, "", nil) - c.Assert(err, ErrorMatches, "cannot find transaction .*") - - c.Logf("---- Puring missing transactions") - err = s.runner.PurgeMissing("accounts") - c.Assert(err, IsNil) - - c.Logf("---- Resuming pending transactions") - err = s.runner.ResumeAll() - c.Assert(err, IsNil) - - expect := []struct{ Id, Balance int }{ - {0, -1}, - {1, 200}, - {2, 100}, - {3, 100}, - } - var got Account - for _, want := range expect { - err = s.accounts.FindId(want.Id).One(&got) - if want.Balance == -1 { - if err != mgo.ErrNotFound { - c.Errorf("Account %d should not exist, find got err=%#v", err) - } - } else if err != nil { - c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance) - } else if got.Balance != want.Balance { - c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance) - } - } -} - -func (s *S) TestTxnQueueStressTest(c *C) { - txn.SetChaos(txn.Chaos{ - SlowdownChance: 0.3, - Slowdown: 50 * time.Millisecond, - }) - defer txn.SetChaos(txn.Chaos{}) - - // So we can run more iterations of the test in less time. - txn.SetDebug(false) - - err := s.accounts.Insert(M{"_id": 0, "balance": 0}, M{"_id": 1, "balance": 0}) - c.Assert(err, IsNil) - - // Run half of the operations changing account 0 and then 1, - // and the other half in the opposite order. - ops01 := []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 1}}, - }, { - C: "accounts", - Id: 1, - Update: M{"$inc": M{"balance": 1}}, - }} - - ops10 := []txn.Op{{ - C: "accounts", - Id: 1, - Update: M{"$inc": M{"balance": 1}}, - }, { - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 1}}, - }} - - ops := [][]txn.Op{ops01, ops10} - - const runners = 4 - const changes = 1000 - - var wg sync.WaitGroup - wg.Add(runners) - for n := 0; n < runners; n++ { - n := n - go func() { - defer wg.Done() - for i := 0; i < changes; i++ { - err = s.runner.Run(ops[n%2], "", nil) - c.Assert(err, IsNil) - } - }() - } - wg.Wait() - - for id := 0; id < 2; id++ { - var account Account - err = s.accounts.FindId(id).One(&account) - if account.Balance != runners*changes { - c.Errorf("Account should have balance of %d, got %d", runners*changes, account.Balance) - } - } -} diff --git a/server/common/config.go b/server/common/config.go index 801d24f1..04d65298 100644 --- a/server/common/config.go +++ b/server/common/config.go @@ -60,6 +60,14 @@ type Configuration struct { SourceIPHeader string `json:"-"` UploadWhitelist []string `json:"-"` + Authentication bool `json:"authentication"` + GoogleAuthentication bool `json:"googleAuthentication"` + GoogleAPISecret string `json:"-"` + GoogleAPIClientID string `json:"-"` + OvhAuthentication bool `json:"ovhAuthentication"` + OvhAPIKey string `json:"-"` + OvhAPISecret string `json:"-"` + MetadataBackend string `json:"-"` MetadataBackendConfig map[string]interface{} `json:"-"` @@ -73,10 +81,10 @@ type Configuration struct { ShortenBackendConfig map[string]interface{} `json:"-"` } -// Global var to store conf +// Config static variable var Config *Configuration -// Parse upload whitelist only once at startup time +// UploadWhitelist is only parsed once at startup time var UploadWhitelist []*net.IPNet // NewConfiguration creates a new configuration @@ -104,22 +112,21 @@ func NewConfiguration() (this *Configuration) { func LoadConfiguration(file string) { Config = NewConfiguration() if _, err := toml.DecodeFile(file, Config); err != nil { - Log().Fatalf("Unable to load config file %s : %s", file, err) + Logger().Fatalf("Unable to load config file %s : %s", file, err) } - Log().SetMinLevelFromString(Config.LogLevel) - Log().Dump(logger.DEBUG, Config) + Logger().SetMinLevelFromString(Config.LogLevel) if Config.LogLevel == "DEBUG" { - Log().SetFlags(logger.Fdate | logger.Flevel | logger.FfixedSizeLevel | logger.FshortFile | logger.FshortFunction) + Logger().SetFlags(logger.Fdate | logger.Flevel | logger.FfixedSizeLevel | logger.FshortFile | logger.FshortFunction) } else { - Log().SetFlags(logger.Fdate | logger.Flevel | logger.FfixedSizeLevel) + Logger().SetFlags(logger.Fdate | logger.Flevel | logger.FfixedSizeLevel) } // Do user specified a ApiKey and ApiSecret for Yubikey if Config.YubikeyEnabled { yubiAuth, err := yubigo.NewYubiAuth(Config.YubikeyAPIKey, Config.YubikeyAPISecret) if err != nil { - Log().Warningf("Failed to load yubikey backend : %s", err) + Logger().Warningf("Failed to load yubikey backend : %s", err) Config.YubikeyEnabled = false } else { Config.YubiAuth = yubiAuth @@ -136,8 +143,30 @@ func LoadConfiguration(file string) { if _, net, err := net.ParseCIDR(cidr); err == nil { UploadWhitelist = append(UploadWhitelist, net) } else { - Log().Fatalf("Failed to parse upload whitelist : %s", cidr) + Logger().Fatalf("Failed to parse upload whitelist : %s", cidr) } } } + + if Config.GoogleAPIClientID != "" && Config.GoogleAPISecret != "" { + Config.GoogleAuthentication = true + } else { + Config.GoogleAuthentication = false + } + + if Config.OvhAPIKey != "" && Config.OvhAPISecret != "" { + Config.OvhAuthentication = true + } else { + Config.OvhAuthentication = false + } + + if !Config.GoogleAuthentication && !Config.OvhAuthentication { + Config.Authentication = false + } + + if Config.MetadataBackend == "file" { + Config.Authentication = false + } + + Logger().Dump(logger.DEBUG, Config) } diff --git a/server/common/context.go b/server/common/context.go index a53689db..915f82cd 100644 --- a/server/common/context.go +++ b/server/common/context.go @@ -4,7 +4,7 @@ The MIT License (MIT) -Copyright (c) <2015> Copyright holders list can be found in AUTHORS file +Copyright (c) <2015> - Mathieu Bodjikian - Charles-Antoine Mathieu @@ -31,97 +31,124 @@ package common import ( "fmt" + "net" "net/http" "strings" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/context" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/logger" ) var rootLogger = logger.NewLogger() -var rootContext = newRootContext() -// RootContext is a shortcut to get rootContext -func RootContext() *PlikContext { - return rootContext +// Logger return the root logger. +func Logger() *logger.Logger { + return rootLogger } -// Log is a shortcut to get rootLogger -func Log() *logger.Logger { +// GetLogger from the request context ( defaults to rootLogger ). +func GetLogger(ctx *juliet.Context) *logger.Logger { + if log, ok := ctx.Get("logger"); ok { + return log.(*logger.Logger) + } return rootLogger } -// PlikContext is a root-gg logger && logger object -type PlikContext struct { - *context.Context - *logger.Logger +// GetSourceIP from the request context. +func GetSourceIP(ctx *juliet.Context) net.IP { + if sourceIP, ok := ctx.Get("ip"); ok { + return sourceIP.(net.IP) + } + return nil } -func newRootContext() (ctx *PlikContext) { - ctx = new(PlikContext) - ctx.Context = context.NewContext("ROOT") - ctx.Logger = rootLogger - return -} +// IsWhitelisted return true if the IP address in the request context is whitelisted. +func IsWhitelisted(ctx *juliet.Context) bool { + if whitelisted, ok := ctx.Get("IsWhitelisted"); ok { + return whitelisted.(bool) + } -// NewPlikContext creates a new plik context forked from root logger/context -func NewPlikContext(name string, req *http.Request) (ctx *PlikContext) { - ctx = new(PlikContext) - ctx.Context = rootContext.Context.Fork(name).AutoDetach() - ctx.Logger = rootContext.Logger.Copy() - - var sourceIP string - if Config.SourceIPHeader != "" { - // Get source ip from header if behind reverse proxy. - sourceIP = req.Header.Get(Config.SourceIPHeader) - if sourceIP != "" { - ctx.Set("RemoteIP", sourceIP) + // Check if the source IP address is in whitelist + whitelisted := false + if len(UploadWhitelist) > 0 { + sourceIP := GetSourceIP(ctx) + if sourceIP != nil { + for _, net := range UploadWhitelist { + if net.Contains(sourceIP) { + whitelisted = true + break + } + } } } else { - remoteAddr := strings.Split(req.RemoteAddr, ":") - if len(remoteAddr) > 0 { - sourceIP = remoteAddr[0] - } + whitelisted = true } - ctx.Set("RemoteIP", sourceIP) - - ctx.UpdateLoggerPrefix("") - return + ctx.Set("IsWhitelisted", whitelisted) + return whitelisted } -// Fork context and copy logger -func (ctx *PlikContext) Fork(name string) (fork *PlikContext) { - fork = new(PlikContext) - fork.Context = ctx.Context.Fork(name) - fork.Logger = ctx.Logger.Copy() - return fork +// GetUser from the request context. +func GetUser(ctx *juliet.Context) *User { + if user, ok := ctx.Get("user"); ok { + return user.(*User) + } + return nil } -// SetUpload is used to display upload id in logger prefix and set it in context -func (ctx *PlikContext) SetUpload(uploadID string) *PlikContext { - ctx.Set("UploadId", uploadID) - ctx.UpdateLoggerPrefix("") - return ctx +// GetToken from the request context. +func GetToken(ctx *juliet.Context) *Token { + if token, ok := ctx.Get("token"); ok { + return token.(*Token) + } + return nil } -// SetFile is used to display file id in logger prefix and set it in context -func (ctx *PlikContext) SetFile(fileName string) *PlikContext { - ctx.Set("FileName", fileName) - ctx.UpdateLoggerPrefix("") - return ctx +// GetFile from the request context. +func GetFile(ctx *juliet.Context) *File { + if file, ok := ctx.Get("file"); ok { + return file.(*File) + } + return nil } -// UpdateLoggerPrefix sets a new prefix for the context logger -func (ctx *PlikContext) UpdateLoggerPrefix(prefix string) { - str := "" - if ip, ok := ctx.Get("RemoteIP"); ok { - str += fmt.Sprintf("[%s]", ip) +// GetUpload from the request context. +func GetUpload(ctx *juliet.Context) *Upload { + if upload, ok := ctx.Get("upload"); ok { + return upload.(*Upload) } - if uploadID, ok := ctx.Get("UploadId"); ok { - str += fmt.Sprintf("[%s]", uploadID) + return nil +} + +// IsRedirectOnFailure return true if the http responde should return +// a http redirect instead of an error string. +func IsRedirectOnFailure(ctx *juliet.Context) bool { + if redirect, ok := ctx.Get("redirect"); ok { + return redirect.(bool) } - if fileName, ok := ctx.Get("FileName"); ok { - str += fmt.Sprintf("[%s]", fileName) + return false +} + +var userAgents = []string{"wget", "curl", "python-urllib", "libwwww-perl", "php", "pycurl"} + +// Fail return write an error to the http response body. +// If IsRedirectOnFailure is true it write a http redirect that can be handled by the web client instead. +func Fail(ctx *juliet.Context, req *http.Request, resp http.ResponseWriter, message string, status int) { + if IsRedirectOnFailure(ctx) { + // The web client uses http redirect to get errors + // from http redirect and display a nice HTML error message + // But cli clients needs a clean string response + userAgent := strings.ToLower(req.UserAgent()) + redirect := true + for _, ua := range userAgents { + if strings.HasPrefix(userAgent, ua) { + redirect = false + } + } + if redirect { + http.Redirect(resp, req, fmt.Sprintf("/#/?err=%s&errcode=%d&uri=%s", message, status, req.RequestURI), 301) + return + } } - ctx.SetPrefix(str + prefix) + + http.Error(resp, NewResult(message, nil).ToJSONString(), status) } diff --git a/server/common/result.go b/server/common/result.go index 763c80f0..0f39b47e 100644 --- a/server/common/result.go +++ b/server/common/result.go @@ -41,8 +41,7 @@ type Result struct { Value interface{} `json:"value"` } -// NewResult takes a message and a interface and -// creates a new result object with them +// NewResult create a new Result instance func NewResult(message string, value interface{}) (r *Result) { r = new(Result) r.Message = message @@ -55,7 +54,7 @@ func (result *Result) ToJSON() []byte { j, err := utils.ToJson(result) if err != nil { msg := fmt.Sprintf("Unable to serialize result %s to json : %s", result.Message, err) - Log().Warning(msg) + Logger().Warning(msg) return []byte("{message:\"" + msg + "\"}") } diff --git a/server/common/token.go b/server/common/token.go new file mode 100644 index 00000000..efaa9642 --- /dev/null +++ b/server/common/token.go @@ -0,0 +1,60 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> Copyright holders list can be found in AUTHORS file + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package common + +import ( + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid" +) + +// Token provide a very basic authentication mechanism +type Token struct { + Token string `json:"token" bson:"token"` + CreationDate int64 `json:"creationDate" bson:"creationDate"` + Comment string `json:"comment,omitempty" bson:"comment"` +} + +// NewToken create a new Token instance +func NewToken() (t *Token) { + t = new(Token) + return +} + +// Create initialize a new Token +func (t *Token) Create() (err error) { + t.CreationDate = time.Now().Unix() + uuid, err := uuid.NewV4() + if err != nil { + return + } + t.Token = uuid.String() + return +} diff --git a/server/common/upload.go b/server/common/upload.go index 201598aa..e62e5cc5 100644 --- a/server/common/upload.go +++ b/server/common/upload.go @@ -41,14 +41,19 @@ var ( // Upload object type Upload struct { - ID string `json:"id" bson:"id"` - Creation int64 `json:"uploadDate" bson:"uploadDate"` - Comments string `json:"comments" bson:"comments"` - Files map[string]*File `json:"files" bson:"files"` - RemoteIP string `json:"uploadIp,omitempty" bson:"uploadIp"` - ShortURL string `json:"shortUrl" bson:"shortUrl"` - UploadToken string `json:"uploadToken,omitempty" bson:"uploadToken"` - TTL int `json:"ttl" bson:"ttl"` + ID string `json:"id" bson:"id"` + Creation int64 `json:"uploadDate" bson:"uploadDate"` + TTL int `json:"ttl" bson:"ttl"` + ShortURL string `json:"shortUrl" bson:"shortUrl"` + RemoteIP string `json:"uploadIp,omitempty" bson:"uploadIp"` + Comments string `json:"comments" bson:"comments"` + + Files map[string]*File `json:"files" bson:"files"` + + UploadToken string `json:"uploadToken,omitempty" bson:"uploadToken"` + User string `json:"user,omitempty" bson:"user"` + Token string `json:"token,omitempty" bson:"token"` + IsAdmin bool `json:"admin"` Stream bool `json:"stream" bson:"stream"` OneShot bool `json:"oneShot" bson:"oneShot"` @@ -88,6 +93,8 @@ func (upload *Upload) Sanitize() { upload.Password = "" upload.Yubikey = "" upload.UploadToken = "" + upload.User = "" + upload.Token = "" for _, file := range upload.Files { file.Sanitize() } @@ -105,3 +112,13 @@ func GenerateRandomID(length int) string { return string(b) } + +// IsExpired check if the upload is expired +func (upload *Upload) IsExpired() bool { + if upload.TTL > 0 { + if time.Now().Unix() >= (upload.Creation + int64(upload.TTL)) { + return true + } + } + return false +} diff --git a/server/common/user.go b/server/common/user.go new file mode 100644 index 00000000..8ae26a67 --- /dev/null +++ b/server/common/user.go @@ -0,0 +1,79 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> Copyright holders list can be found in AUTHORS file + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package common + +import "net/http" + +// User is a plik user +type User struct { + ID string `json:"id,omitempty" bson:"id"` + Login string `json:"login,omitempty" bson:"login"` + Name string `json:"name,omitempty" bson:"name"` + Email string `json:"email,omitempty" bson:"email"` + Tokens []*Token `json:"tokens,omitempty" bson:"tokens"` +} + +// NewUser create a new user object +func NewUser() (user *User) { + user = new(User) + user.Tokens = make([]*Token, 0) + return +} + +// NewToken add a new token to a user +func (user *User) NewToken() (token *Token) { + token = NewToken() + token.Create() + user.Tokens = append(user.Tokens, token) + return +} + +// Logout delete plik session cookies +func Logout(resp http.ResponseWriter) { + // Delete session cookie + sessionCookie := &http.Cookie{} + sessionCookie.HttpOnly = true + sessionCookie.Secure = true + sessionCookie.Name = "plik-session" + sessionCookie.Value = "" + sessionCookie.MaxAge = -1 + sessionCookie.Path = "/" + http.SetCookie(resp, sessionCookie) + + // Store xsrf token cookie + xsrfCookie := &http.Cookie{} + xsrfCookie.HttpOnly = false + xsrfCookie.Secure = true + xsrfCookie.Name = "plik-xsrf" + xsrfCookie.Value = "" + xsrfCookie.MaxAge = -1 + xsrfCookie.Path = "/" + http.SetCookie(resp, xsrfCookie) +} diff --git a/server/dataBackend/dataBackend.go b/server/dataBackend/dataBackend.go index a3f48b33..7e9f0b79 100644 --- a/server/dataBackend/dataBackend.go +++ b/server/dataBackend/dataBackend.go @@ -32,6 +32,7 @@ package dataBackend import ( "io" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" "github.com/root-gg/plik/server/dataBackend/file" "github.com/root-gg/plik/server/dataBackend/stream" @@ -45,10 +46,10 @@ var streamBackend DataBackend // DataBackend interface describes methods that data backends // must implements to be compatible with plik. type DataBackend interface { - GetFile(ctx *common.PlikContext, u *common.Upload, id string) (rc io.ReadCloser, err error) - AddFile(ctx *common.PlikContext, u *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) - RemoveFile(ctx *common.PlikContext, u *common.Upload, id string) (err error) - RemoveUpload(ctx *common.PlikContext, u *common.Upload) (err error) + GetFile(ctx *juliet.Context, u *common.Upload, id string) (rc io.ReadCloser, err error) + AddFile(ctx *juliet.Context, u *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) + RemoveFile(ctx *juliet.Context, u *common.Upload, id string) (err error) + RemoveUpload(ctx *juliet.Context, u *common.Upload) (err error) } // GetDataBackend return the primary data backend @@ -72,7 +73,7 @@ func Initialize() { case "weedfs": dataBackend = weedfs.NewWeedFsBackend(common.Config.DataBackendConfig) default: - common.Log().Fatalf("Invalid data backend %s", common.Config.DataBackend) + common.Logger().Fatalf("Invalid data backend %s", common.Config.DataBackend) } } if common.Config.StreamMode { diff --git a/server/dataBackend/file/file.go b/server/dataBackend/file/file.go index e1602e6a..dbfab54e 100644 --- a/server/dataBackend/file/file.go +++ b/server/dataBackend/file/file.go @@ -30,10 +30,11 @@ THE SOFTWARE. package file import ( + "fmt" "io" "os" - "fmt" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" ) @@ -52,13 +53,13 @@ func NewFileBackend(config map[string]interface{}) (fb *Backend) { // GetFile implementation for file data backend will search // on filesystem the asked file and return its reading filehandle -func (fb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, id string) (file io.ReadCloser, err error) { - defer ctx.Finalize(err) +func (fb *Backend) GetFile(ctx *juliet.Context, upload *common.Upload, id string) (file io.ReadCloser, err error) { + log := common.GetLogger(ctx) // Get upload directory directory, err := fb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -69,7 +70,7 @@ func (fb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, id st // to the client response body file, err = os.Open(fullPath) if err != nil { - err = ctx.EWarningf("Unable to open file %s : %s", fullPath, err) + err = log.EWarningf("Unable to open file %s : %s", fullPath, err) return } @@ -78,13 +79,13 @@ func (fb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, id st // AddFile implementation for file data backend will creates a new file for the given upload // and save it on filesystem with the given file reader -func (fb *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) { - defer ctx.Finalize(err) +func (fb *Backend) AddFile(ctx *juliet.Context, upload *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) { + log := common.GetLogger(ctx) // Get upload directory directory, err := fb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -96,16 +97,16 @@ func (fb *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file if err != nil { err = os.MkdirAll(directory, 0777) if err != nil { - err = ctx.EWarningf("Unable to create upload directory %s : %s", directory, err) + err = log.EWarningf("Unable to create upload directory %s : %s", directory, err) return } - ctx.Infof("Folder %s successfully created", directory) + log.Infof("Folder %s successfully created", directory) } // Create file out, err := os.Create(fullPath) if err != nil { - err = ctx.EWarningf("Unable to create file %s : %s", fullPath, err) + err = log.EWarningf("Unable to create file %s : %s", fullPath, err) return } @@ -113,23 +114,23 @@ func (fb *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file // to the file system _, err = io.Copy(out, fileReader) if err != nil { - err = ctx.EWarningf("Unable to save file %s : %s", fullPath, err) + err = log.EWarningf("Unable to save file %s : %s", fullPath, err) return } - ctx.Infof("File %s successfully saved", fullPath) + log.Infof("File %s successfully saved", fullPath) return } // RemoveFile implementation for file data backend will delete the given // file from filesystem -func (fb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, id string) (err error) { - defer ctx.Finalize(err) +func (fb *Backend) RemoveFile(ctx *juliet.Context, upload *common.Upload, id string) (err error) { + log := common.GetLogger(ctx) // Get upload directory directory, err := fb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -139,10 +140,11 @@ func (fb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, id // Remove file err = os.Remove(fullPath) if err != nil { - err = ctx.EWarningf("Unable to remove %s : %s", fullPath, err) + err = log.EWarningf("Unable to remove %s : %s", fullPath, err) return } - ctx.Infof("File %s successfully removed", fullPath) + + log.Infof("File %s successfully removed", fullPath) return } @@ -150,23 +152,25 @@ func (fb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, id // RemoveUpload implementation for file data backend will // delete the whole upload. Given that an upload is a directory, // we remove the whole directory at once. -func (fb *Backend) RemoveUpload(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) +func (fb *Backend) RemoveUpload(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) // Get upload directory fullPath, err := fb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } // Remove everything at once err = os.RemoveAll(fullPath) if err != nil { - err = ctx.EWarningf("Unable to remove %s : %s", fullPath, err) + err = log.EWarningf("Unable to remove %s : %s", fullPath, err) return } + log.Infof("Upload %s successfully removed", fullPath) + return } diff --git a/server/dataBackend/stream/stream.go b/server/dataBackend/stream/stream.go index dabb683c..619f9962 100644 --- a/server/dataBackend/stream/stream.go +++ b/server/dataBackend/stream/stream.go @@ -32,6 +32,7 @@ package stream import ( "io" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" ) @@ -52,12 +53,12 @@ func NewStreamBackend(config map[string]interface{}) (sb *Backend) { // GetFile implementation for steam data backend will search // on filesystem the requested steam and return its reading filehandle -func (sb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, id string) (stream io.ReadCloser, err error) { - defer ctx.Finalize(err) +func (sb *Backend) GetFile(ctx *juliet.Context, upload *common.Upload, id string) (stream io.ReadCloser, err error) { + log := common.GetLogger(ctx) storeID := upload.ID + "/" + id stream, ok := sb.Store[storeID] if !ok { - err = ctx.EWarningf("Missing reader") + err = log.EWarningf("Missing reader") } delete(sb.Store, id) return @@ -65,30 +66,26 @@ func (sb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, id st // AddFile implementation for steam data backend will creates a new steam for the given upload // and save it on filesystem with the given steam reader -func (sb *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file *common.File, stream io.Reader) (backendDetails map[string]interface{}, err error) { - defer ctx.Finalize(err) +func (sb *Backend) AddFile(ctx *juliet.Context, upload *common.Upload, file *common.File, stream io.Reader) (backendDetails map[string]interface{}, err error) { + log := common.GetLogger(ctx) backendDetails = make(map[string]interface{}) id := upload.ID + "/" + file.ID pipeReader, pipeWriter := io.Pipe() sb.Store[id] = pipeReader defer delete(sb.Store, id) + log.Info("Stream data backend waiting for download") // This will block until download begins _, err = io.Copy(pipeWriter, stream) pipeWriter.Close() return } -// RemoveFile implementation for steam data backend will delete the given -// steam from filesystem -func (sb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, id string) (err error) { - defer ctx.Finalize(err) +// RemoveFile is not implemented +func (sb *Backend) RemoveFile(ctx *juliet.Context, upload *common.Upload, id string) (err error) { return } -// RemoveUpload implementation for steam data backend will -// delete the whole upload. Given that an upload is a directory, -// we remove the whole directory at once. -func (sb *Backend) RemoveUpload(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) +// RemoveUpload is not implemented +func (sb *Backend) RemoveUpload(ctx *juliet.Context, upload *common.Upload) (err error) { return } diff --git a/server/dataBackend/swift/swift.go b/server/dataBackend/swift/swift.go index 81d78e2c..ed26452b 100644 --- a/server/dataBackend/swift/swift.go +++ b/server/dataBackend/swift/swift.go @@ -33,6 +33,7 @@ import ( "io" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/ncw/swift" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" "github.com/root-gg/plik/server/common" ) @@ -43,7 +44,7 @@ type Backend struct { connection swift.Connection } -// NewSwiftBackend instantiate a new Openstack Swift Data Backend +// NewSwiftBackend instantiate a new Openjuliet Swift Data Backend // from configuration passed as argument func NewSwiftBackend(config map[string]interface{}) (sb *Backend) { sb = new(Backend) @@ -54,12 +55,8 @@ func NewSwiftBackend(config map[string]interface{}) (sb *Backend) { } // GetFile implementation for Swift Data Backend -func (sb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, fileID string) (reader io.ReadCloser, err error) { - defer func() { - if err != nil { - ctx.Finalize(err) - } - }() // Finalize the context only if error, else let it be finalized by the download goroutine +func (sb *Backend) GetFile(ctx *juliet.Context, upload *common.Upload, fileID string) (reader io.ReadCloser, err error) { + log := common.GetLogger(ctx) err = sb.auth(ctx) if err != nil { @@ -69,11 +66,10 @@ func (sb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, fileI reader, pipeWriter := io.Pipe() uuid := sb.getFileID(upload, fileID) go func() { - defer ctx.Finalize(err) _, err = sb.connection.ObjectGet(sb.config.Container, uuid, pipeWriter, true, nil) defer pipeWriter.Close() if err != nil { - err = ctx.EWarningf("Unable to get object %s : %s", uuid, err) + err = log.EWarningf("Unable to get object %s : %s", uuid, err) return } }() @@ -82,8 +78,8 @@ func (sb *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, fileI } // AddFile implementation for Swift Data Backend -func (sb *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) { - defer ctx.Finalize(err) +func (sb *Backend) AddFile(ctx *juliet.Context, upload *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) { + log := common.GetLogger(ctx) err = sb.auth(ctx) if err != nil { @@ -95,18 +91,18 @@ func (sb *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file _, err = io.Copy(object, fileReader) if err != nil { - err = ctx.EWarningf("Unable to save object %s : %s", uuid, err) + err = log.EWarningf("Unable to save object %s : %s", uuid, err) return } object.Close() - ctx.Infof("Object %s successfully saved", uuid) + log.Infof("Object %s successfully saved", uuid) return } // RemoveFile implementation for Swift Data Backend -func (sb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, fileID string) (err error) { - defer ctx.Finalize(err) +func (sb *Backend) RemoveFile(ctx *juliet.Context, upload *common.Upload, fileID string) (err error) { + log := common.GetLogger(ctx) err = sb.auth(ctx) if err != nil { @@ -116,7 +112,7 @@ func (sb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, fi uuid := sb.getFileID(upload, fileID) err = sb.connection.ObjectDelete(sb.config.Container, uuid) if err != nil { - err = ctx.EWarningf("Unable to remove object %s : %s", uuid, err) + err = log.EWarningf("Unable to remove object %s : %s", uuid, err) return } @@ -125,8 +121,8 @@ func (sb *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, fi // RemoveUpload implementation for Swift Data Backend // Iterates on each upload file and call RemoveFile -func (sb *Backend) RemoveUpload(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) +func (sb *Backend) RemoveUpload(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) err = sb.auth(ctx) if err != nil { @@ -137,7 +133,7 @@ func (sb *Backend) RemoveUpload(ctx *common.PlikContext, upload *common.Upload) uuid := sb.getFileID(upload, fileID) err = sb.connection.ObjectDelete(sb.config.Container, uuid) if err != nil { - err = ctx.EWarningf("Unable to remove object %s : %s", uuid, err) + err = log.EWarningf("Unable to remove object %s : %s", uuid, err) } } @@ -148,9 +144,8 @@ func (sb *Backend) getFileID(upload *common.Upload, fileID string) string { return upload.ID + "." + fileID } -func (sb *Backend) auth(ctx *common.PlikContext) (err error) { - timer := ctx.Time("auth") - defer timer.Stop() +func (sb *Backend) auth(ctx *juliet.Context) (err error) { + log := common.GetLogger(ctx) if sb.connection.Authenticated() { return @@ -166,7 +161,7 @@ func (sb *Backend) auth(ctx *common.PlikContext) (err error) { // Authenticate err = connection.Authenticate() if err != nil { - err = ctx.EWarningf("Unable to autenticate : %s", err) + err = log.EWarningf("Unable to autenticate : %s", err) return err } sb.connection = connection diff --git a/server/dataBackend/weedfs/weedfs.go b/server/dataBackend/weedfs/weedfs.go index d9aa81b9..6b273dfc 100644 --- a/server/dataBackend/weedfs/weedfs.go +++ b/server/dataBackend/weedfs/weedfs.go @@ -39,6 +39,7 @@ import ( "net/url" "strings" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" ) @@ -60,21 +61,21 @@ func NewWeedFsBackend(config map[string]interface{}) (weedFs *Backend) { } // GetFile implementation for WeedFS Data Backend -func (weedFs *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, id string) (reader io.ReadCloser, err error) { - defer ctx.Finalize(err) +func (weedFs *Backend) GetFile(ctx *juliet.Context, upload *common.Upload, id string) (reader io.ReadCloser, err error) { + log := common.GetLogger(ctx) file := upload.Files[id] // Get WeedFS volume from upload metadata if file.BackendDetails["WeedFsVolume"] == nil { - err = ctx.EWarningf("Missing WeedFS volume from backend details") + err = log.EWarningf("Missing WeedFS volume from backend details") return } weedFsVolume := file.BackendDetails["WeedFsVolume"].(string) // Get WeedFS file id from upload metadata if file.BackendDetails["WeedFsFileID"] == nil { - err = ctx.EWarningf("Missing WeedFS file id from backend details") + err = log.EWarningf("Missing WeedFS file id from backend details") return } WeedFsFileID := file.BackendDetails["WeedFsFileID"].(string) @@ -82,17 +83,17 @@ func (weedFs *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, i // Get WeedFS volume url volumeURL, err := weedFs.getvolumeURL(ctx, weedFsVolume) if err != nil { - err = ctx.EWarningf("Unable to get WeedFS volume url %s : %s", weedFsVolume) + err = log.EWarningf("Unable to get WeedFS volume url %s : %s", weedFsVolume) return } // Get file from WeedFS volume, the response will be // piped directly to the client response body fileCompleteURL := "http://" + volumeURL + "/" + weedFsVolume + "," + WeedFsFileID - ctx.Infof("Getting WeedFS file from : %s", fileCompleteURL) + log.Infof("Getting WeedFS file from : %s", fileCompleteURL) resp, err := http.Get(fileCompleteURL) if err != nil { - err = ctx.EWarningf("Error while downloading file from WeedFS at %s : %s", fileCompleteURL, err) + err = log.EWarningf("Error while downloading file from WeedFS at %s : %s", fileCompleteURL, err) return } @@ -100,22 +101,18 @@ func (weedFs *Backend) GetFile(ctx *common.PlikContext, upload *common.Upload, i } // AddFile implementation for WeedFS Data Backend -func (weedFs *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) { - defer func() { - if err != nil { - ctx.Finalize(err) - } - }() // Finalize the context only if error, else let it be finalized by the upload goroutine +func (weedFs *Backend) AddFile(ctx *juliet.Context, upload *common.Upload, file *common.File, fileReader io.Reader) (backendDetails map[string]interface{}, err error) { + log := common.GetLogger(ctx) backendDetails = make(map[string]interface{}) // Request a volume and a new file id from a WeedFS master assignURL := weedFs.Config.MasterURL + "/dir/assign?replication=" + weedFs.Config.ReplicationPattern - ctx.Debugf("Getting volume and file id from WeedFS master at %s", assignURL) + log.Debugf("Getting volume and file id from WeedFS master at %s", assignURL) resp, err := client.Post(assignURL, "", nil) if err != nil { - err = ctx.EWarningf("Error while getting id from WeedFS master at %s : %s", assignURL, err) + err = log.EWarningf("Error while getting id from WeedFS master at %s : %s", assignURL, err) return } defer resp.Body.Close() @@ -123,7 +120,7 @@ func (weedFs *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, f // Read response body bodyStr, err := ioutil.ReadAll(resp.Body) if err != nil { - err = ctx.EWarningf("Unable to read response body from WeedFS master at %s : %s", assignURL, err) + err = log.EWarningf("Unable to read response body from WeedFS master at %s : %s", assignURL, err) return } @@ -131,7 +128,7 @@ func (weedFs *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, f responseMap := make(map[string]interface{}) err = json.Unmarshal(bodyStr, &responseMap) if err != nil { - err = ctx.EWarningf("Unable to unserialize json response \"%s\" from WeedFS master at %s : %s", bodyStr, assignURL, err) + err = log.EWarningf("Unable to unserialize json response \"%s\" from WeedFS master at %s : %s", bodyStr, assignURL, err) return } @@ -141,51 +138,51 @@ func (weedFs *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, f backendDetails["WeedFsVolume"] = splitVolumeFromID[0] backendDetails["WeedFsFileID"] = splitVolumeFromID[1] } else { - err = ctx.EWarningf("Invalid fid from WeedFS master response \"%s\" at %s", bodyStr, assignURL) + err = log.EWarningf("Invalid fid from WeedFS master response \"%s\" at %s", bodyStr, assignURL) return } } else { - err = ctx.EWarningf("Missing fid from WeedFS master response \"%s\" at %", bodyStr, assignURL) + err = log.EWarningf("Missing fid from WeedFS master response \"%s\" at %", bodyStr, assignURL) return } // Construct upload url if responseMap["publicUrl"] == nil || responseMap["publicUrl"].(string) == "" { - err = ctx.EWarningf("Missing publicUrl from WeedFS master response \"%s\" at %s", bodyStr, assignURL) + err = log.EWarningf("Missing publicUrl from WeedFS master response \"%s\" at %s", bodyStr, assignURL) return } fileURL := "http://" + responseMap["publicUrl"].(string) + "/" + responseMap["fid"].(string) var URL *url.URL URL, err = url.Parse(fileURL) if err != nil { - err = ctx.EWarningf("Unable to construct WeedFS upload url \"%s\"", fileURL) + err = log.EWarningf("Unable to construct WeedFS upload url \"%s\"", fileURL) return } - ctx.Infof("Uploading file %s to volume %s to WeedFS at %s", backendDetails["WeedFsFileID"], backendDetails["WeedFsVolume"], fileURL) + log.Infof("Uploading file %s to volume %s to WeedFS at %s", backendDetails["WeedFsFileID"], backendDetails["WeedFsVolume"], fileURL) // Pipe the uploaded file from the client request body // to the WeedFS request body without buffering pipeReader, pipeWriter := io.Pipe() multipartWriter := multipart.NewWriter(pipeWriter) go func() { - defer ctx.Finalize(err) + log := common.GetLogger(ctx) filePart, err := multipartWriter.CreateFormFile("file", file.Name) if err != nil { - ctx.Warningf("Unable to create multipart form : %s", err) + log.Warningf("Unable to create multipart form : %s", err) return } _, err = io.Copy(filePart, fileReader) if err != nil { - ctx.Warningf("Unable to copy file to WeedFS request body : %s", err) + log.Warningf("Unable to copy file to WeedFS request body : %s", err) pipeWriter.CloseWithError(err) return } err = multipartWriter.Close() if err != nil { - ctx.Warningf("Unable to close multipartWriter : %s", err) + log.Warningf("Unable to close multipartWriter : %s", err) } pipeWriter.CloseWithError(err) }() @@ -193,13 +190,13 @@ func (weedFs *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, f // Upload file to WeedFS volume req, err := http.NewRequest("PUT", URL.String(), pipeReader) if err != nil { - err = ctx.EWarningf("Unable to create PUT request to %s : %s", URL.String(), err) + err = log.EWarningf("Unable to create PUT request to %s : %s", URL.String(), err) return } req.Header.Add("Content-Type", multipartWriter.FormDataContentType()) resp, err = client.Do(req) if err != nil { - err = ctx.EWarningf("Unable to upload file to WeedFS at %s : %s", URL.String(), err) + err = log.EWarningf("Unable to upload file to WeedFS at %s : %s", URL.String(), err) return } defer resp.Body.Close() @@ -208,21 +205,21 @@ func (weedFs *Backend) AddFile(ctx *common.PlikContext, upload *common.Upload, f } // RemoveFile implementation for WeedFS Data Backend -func (weedFs *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, id string) (err error) { - defer ctx.Finalize(err) +func (weedFs *Backend) RemoveFile(ctx *juliet.Context, upload *common.Upload, id string) (err error) { + log := common.GetLogger(ctx) // Get file metadata file := upload.Files[id] // Get WeedFS volume and file id from upload metadata if file.BackendDetails["WeedFsVolume"] == nil { - err = ctx.EWarningf("Missing WeedFS volume from backend details") + err = log.EWarningf("Missing WeedFS volume from backend details") return } weedFsVolume := file.BackendDetails["WeedFsVolume"].(string) if file.BackendDetails["WeedFsFileID"] == nil { - err = ctx.EWarningf("Missing WeedFS file id from backend details") + err = log.EWarningf("Missing WeedFS file id from backend details") return } WeedFsFileID := file.BackendDetails["WeedFsFileID"].(string) @@ -238,21 +235,21 @@ func (weedFs *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload var URL *url.URL URL, err = url.Parse(fileURL) if err != nil { - err = ctx.EWarningf("Unable to construct WeedFS url \"%s\"", fileURL) + err = log.EWarningf("Unable to construct WeedFS url \"%s\"", fileURL) return } - ctx.Infof("Removing file %s from WeedFS volume %s at %s", WeedFsFileID, weedFsVolume, fileURL) + log.Infof("Removing file %s from WeedFS volume %s at %s", WeedFsFileID, weedFsVolume, fileURL) // Remove file from WeedFS volume req, err := http.NewRequest("DELETE", URL.String(), nil) if err != nil { - err = ctx.EWarningf("Unable to create DELETE request to %s : %s", URL.String(), err) + err = log.EWarningf("Unable to create DELETE request to %s : %s", URL.String(), err) return } resp, err := client.Do(req) if err != nil { - err = ctx.EWarningf("Unable to delete file from WeedFS volume at %s : %s", URL.String(), err) + err = log.EWarningf("Unable to delete file from WeedFS volume at %s : %s", URL.String(), err) return } resp.Body.Close() @@ -262,11 +259,9 @@ func (weedFs *Backend) RemoveFile(ctx *common.PlikContext, upload *common.Upload // RemoveUpload implementation for WeedFS Data Backend // Iterates on every file and call RemoveFile -func (weedFs *Backend) RemoveUpload(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) - +func (weedFs *Backend) RemoveUpload(ctx *juliet.Context, upload *common.Upload) (err error) { for fileID := range upload.Files { - err = weedFs.RemoveFile(ctx.Fork("remove file"), upload, fileID) + err = weedFs.RemoveFile(ctx, upload, fileID) if err != nil { return } @@ -275,15 +270,14 @@ func (weedFs *Backend) RemoveUpload(ctx *common.PlikContext, upload *common.Uplo return nil } -func (weedFs *Backend) getvolumeURL(ctx *common.PlikContext, volumeID string) (URL string, err error) { - timer := ctx.Time("get volume url") - defer timer.Stop() +func (weedFs *Backend) getvolumeURL(ctx *juliet.Context, volumeID string) (URL string, err error) { + log := common.GetLogger(ctx) // Ask a WeedFS master the volume urls URL = weedFs.Config.MasterURL + "/dir/lookup?volumeId=" + volumeID resp, err := client.Post(URL, "", nil) if err != nil { - err = ctx.EWarningf("Unable to get volume %s url from WeedFS master at %s : %s", volumeID, URL, err) + err = log.EWarningf("Unable to get volume %s url from WeedFS master at %s : %s", volumeID, URL, err) return } defer resp.Body.Close() @@ -291,7 +285,7 @@ func (weedFs *Backend) getvolumeURL(ctx *common.PlikContext, volumeID string) (U // Read response body bodyStr, err := ioutil.ReadAll(resp.Body) if err != nil { - err = ctx.EWarningf("Unable to read response from WeedFS master at %s : %s", volumeID, URL, err) + err = log.EWarningf("Unable to read response from WeedFS master at %s : %s", volumeID, URL, err) return } @@ -299,7 +293,7 @@ func (weedFs *Backend) getvolumeURL(ctx *common.PlikContext, volumeID string) (U responseMap := make(map[string]interface{}) err = json.Unmarshal(bodyStr, &responseMap) if err != nil { - err = ctx.EWarningf("Unable to unserialize json response \"%s\"from WeedFS master at %s : %s", bodyStr, URL, err) + err = log.EWarningf("Unable to unserialize json response \"%s\"from WeedFS master at %s : %s", bodyStr, URL, err) return } @@ -307,7 +301,7 @@ func (weedFs *Backend) getvolumeURL(ctx *common.PlikContext, volumeID string) (U // available url for a given volume var urlsFound []string if responseMap["locations"] == nil { - err = ctx.EWarningf("Missing url from WeedFS master response \"%s\" at %s", bodyStr, URL) + err = log.EWarningf("Missing url from WeedFS master response \"%s\" at %s", bodyStr, URL) return } if locationsArray, ok := responseMap["locations"].([]interface{}); ok { @@ -322,7 +316,7 @@ func (weedFs *Backend) getvolumeURL(ctx *common.PlikContext, volumeID string) (U } } if len(urlsFound) == 0 { - err = ctx.EWarningf("No url found for WeedFS volume %s", volumeID) + err = log.EWarningf("No url found for WeedFS volume %s", volumeID) return } diff --git a/server/gen_build_info.sh b/server/gen_build_info.sh index 83d9d6c8..77d9cee9 100755 --- a/server/gen_build_info.sh +++ b/server/gen_build_info.sh @@ -34,6 +34,7 @@ user=$(whoami) host=$(hostname) repo=$(pwd) date=$(date "+%s") +goVersion=$(go version) isRelease=false isMint=false @@ -121,6 +122,8 @@ type BuildInfo struct { IsRelease bool \`json:"isRelease"\` IsMint bool \`json:"isMint"\` + GoVersion string \`json:"goVersion"\` + Clients []*Client \`json:"clients"\` } @@ -144,6 +147,7 @@ func GetBuildInfo() *BuildInfo { buildInfo.User = "$user" buildInfo.Host = "$host" + buildInfo.GoVersion = "$goVersion" buildInfo.GitShortRevision = "$short_rev" buildInfo.GitFullRevision = "$full_rev" diff --git a/server/handlers/addFile.go b/server/handlers/addFile.go new file mode 100644 index 00000000..c3201b35 --- /dev/null +++ b/server/handlers/addFile.go @@ -0,0 +1,234 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "crypto/md5" + "fmt" + "io" + "net/http" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/mux" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/dataBackend" + "github.com/root-gg/plik/server/metadataBackend" +) + +// AddFile add a file to an existing upload. +func AddFile(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + user := common.GetUser(ctx) + if user == nil && !common.IsWhitelisted(ctx) { + log.Warning("Unable to add file from untrusted source IP address") + common.Fail(ctx, req, resp, "Unable to add file from untrusted source IP address. Please login or use a cli token.", 403) + return + } + + // Get upload from context + upload := common.GetUpload(ctx) + if upload == nil { + // This should never append + log.Critical("Missing upload in AddFileHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Check authorization + if !upload.IsAdmin { + log.Warningf("Unable to add file : unauthorized") + common.Fail(ctx, req, resp, "You are not allowed to add file to this upload", 403) + return + } + + // Get the file id from the url params + vars := mux.Vars(req) + fileID := vars["fileID"] + + // Create a new file object + var newFile *common.File + if fileID == "" { + newFile = common.NewFile() + newFile.Type = "application/octet-stream" + } else { + if _, ok := upload.Files[fileID]; ok { + newFile = upload.Files[fileID] + } else { + log.Warningf("Invalid file id %s", fileID) + common.Fail(ctx, req, resp, "Invalid file id", 404) + return + } + } + + // Update request logger prefix + prefix := fmt.Sprintf("%s[%s]", log.Prefix, newFile.ID) + log.SetPrefix(prefix) + + ctx.Set("file", newFile) + + // Get file handle from multipart request + var file io.Reader + multiPartReader, err := req.MultipartReader() + if err != nil { + log.Warningf("Failed to get file from multipart request : %s", err) + common.Fail(ctx, req, resp, "Failed to get file from multipart request", 400) + return + } + + // Read multipart body until the "file" part + for { + part, errPart := multiPartReader.NextPart() + if errPart == io.EOF { + break + } + if part.FormName() == "file" { + file = part + + // Check file name length + if len(part.FileName()) > 1024 { + log.Warning("File name is too long") + common.Fail(ctx, req, resp, "File name is too long. Maximum length is 1024 characters", 400) + return + } + + newFile.Name = part.FileName() + break + } + } + if file == nil { + log.Warning("Missing file from multipart request") + common.Fail(ctx, req, resp, "Missing file from multipart request", 400) + return + } + if newFile.Name == "" { + log.Warning("Missing file name from multipart request") + common.Fail(ctx, req, resp, "Missing file name from multipart request", 400) + return + } + + // Update request logger prefix + prefix = fmt.Sprintf("%s[%s]", log.Prefix, newFile.Name) + log.SetPrefix(prefix) + + // Pipe file data from the request body to a preprocessing goroutine + // - Guess content type + // - Compute md5sum + // - Limit upload size + preprocessReader, preprocessWriter := io.Pipe() + md5Hash := md5.New() + totalBytes := 0 + go func() { + for { + buf := make([]byte, 1024) + bytesRead, err := file.Read(buf) + if err != nil { + if err != io.EOF { + log.Warningf("Unable to read data from request body : %s", err) + } + + preprocessWriter.Close() + return + } + + // Detect the content-type using the 512 first bytes + if totalBytes == 0 { + newFile.Type = http.DetectContentType(buf) + } + + // Increment size + totalBytes += bytesRead + + // Compute md5sum + md5Hash.Write(buf[:bytesRead]) + + // Check upload max size limit + if int64(totalBytes) > common.Config.MaxFileSize { + err = fmt.Errorf("File too big (limit is set to %d bytes)", common.Config.MaxFileSize) + log.Warning(err.Error()) + preprocessWriter.CloseWithError(err) + return + } + + // Pass file data to data backend + preprocessWriter.Write(buf[:bytesRead]) + } + }() + + // Save file in the data backend + var backend dataBackend.DataBackend + if upload.Stream { + backend = dataBackend.GetStreamBackend() + } else { + backend = dataBackend.GetDataBackend() + } + backendDetails, err := backend.AddFile(ctx, upload, newFile, preprocessReader) + if err != nil { + log.Warningf("Unable to save file : %s", err) + common.Fail(ctx, req, resp, "Unable to save file", 500) + return + } + + // Fill-in file information + newFile.CurrentSize = int64(totalBytes) + if upload.Stream { + newFile.Status = "downloaded" + } else { + newFile.Status = "uploaded" + } + newFile.Md5 = fmt.Sprintf("%x", md5Hash.Sum(nil)) + newFile.UploadDate = time.Now().Unix() + newFile.BackendDetails = backendDetails + + // Update upload metadata + upload.Files[newFile.ID] = newFile + err = metadataBackend.GetMetaDataBackend().AddOrUpdateFile(ctx, upload, newFile) + if err != nil { + log.Warningf("Unable to update metadata : %s", err) + common.Fail(ctx, req, resp, "Unable to update upload metadata", 500) + return + } + + // Remove all private information (ip, data backend details, ...) before + // sending metadata back to the client + newFile.Sanitize() + + // Print file metadata in the json response. + var json []byte + if json, err = utils.ToJson(newFile); err == nil { + resp.Write(json) + } else { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } +} diff --git a/server/handlers/createUpload.go b/server/handlers/createUpload.go new file mode 100644 index 00000000..7559765f --- /dev/null +++ b/server/handlers/createUpload.go @@ -0,0 +1,247 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" + "github.com/root-gg/plik/server/shortenBackend" +) + +// CreateUpload create a new upload +func CreateUpload(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + user := common.GetUser(ctx) + if user == nil && !common.IsWhitelisted(ctx) { + log.Warning("Unable to create upload from untrusted source IP address") + common.Fail(ctx, req, resp, "Unable to create upload from untrusted source IP address. Please login or use a cli token.", 403) + return + } + + upload := common.NewUpload() + // Save upload in the request context + ctx.Set("upload", upload) + + // Read request body + defer req.Body.Close() + req.Body = http.MaxBytesReader(resp, req.Body, 1048576) + body, err := ioutil.ReadAll(req.Body) + if err != nil { + log.Warningf("Unable to read request body : %s", err) + common.Fail(ctx, req, resp, "Unable to read request body", 500) + return + } + + // Deserialize json body + if len(body) > 0 { + err = json.Unmarshal(body, upload) + if err != nil { + log.Warningf("Unable to deserialize request body : %s", err) + common.Fail(ctx, req, resp, "Unable to deserialize json request bodyy", 500) + return + } + } + + // Set upload id, creation date, upload token, ... + upload.Create() + + // Update request logger prefix + prefix := fmt.Sprintf("%s[%s]", log.Prefix, upload.ID) + log.SetPrefix(prefix) + ctx.Set("upload", upload) + + // Set upload remote IP + upload.RemoteIP = common.GetSourceIP(ctx).String() + + // Set upload user and token + if user != nil { + upload.User = user.ID + token := common.GetToken(ctx) + if token != nil { + upload.Token = token.Token + } + } + + if upload.Stream { + if !common.Config.StreamMode { + log.Warning("Stream mode is not enabled") + common.Fail(ctx, req, resp, "Stream mode is not enabled", 403) + return + } + upload.OneShot = true + } + + // TTL = Time in second before the upload expiration + // 0 -> No ttl specified : default value from configuration + // -1 -> No expiration : checking with configuration if that's ok + switch upload.TTL { + case 0: + upload.TTL = common.Config.DefaultTTL + case -1: + if common.Config.MaxTTL != -1 { + log.Warningf("Cannot set infinite ttl (maximum allowed is : %d)", common.Config.MaxTTL) + common.Fail(ctx, req, resp, fmt.Sprintf("Cannot set infinite ttl (maximum allowed is : %d)", common.Config.MaxTTL), 400) + return + } + default: + if upload.TTL <= 0 { + log.Warningf("Invalid value for ttl : %d", upload.TTL) + common.Fail(ctx, req, resp, fmt.Sprintf("Invalid value for ttl : %d", upload.TTL), 400) + return + } + if common.Config.MaxTTL > 0 && upload.TTL > common.Config.MaxTTL { + log.Warningf("Cannot set ttl to %d (maximum allowed is : %d)", upload.TTL, common.Config.MaxTTL) + common.Fail(ctx, req, resp, fmt.Sprintf("Cannot set ttl to %d (maximum allowed is : %d)", upload.TTL, common.Config.MaxTTL), 400) + return + } + } + + // Protect upload with HTTP basic auth + // Add Authorization header to the response for convenience + // So clients can just copy this header into the next request + if upload.Password != "" { + upload.ProtectedByPassword = true + if upload.Login == "" { + upload.Login = "plik" + } + + // The Authorization header will contain the base64 version of "login:password" + // Save only the md5sum of this string to authenticate further requests + b64str := base64.StdEncoding.EncodeToString([]byte(upload.Login + ":" + upload.Password)) + upload.Password, err = utils.Md5sum(b64str) + if err != nil { + log.Warningf("Unable to generate password hash : %s", err) + common.Fail(ctx, req, resp, common.NewResult("Unable to generate password hash", nil).ToJSONString(), 500) + return + } + resp.Header().Add("Authorization", "Basic "+b64str) + } + + // Check the token validity with api.yubico.com + // Only the Yubikey id part of the token is stored + // The yubikey id is the 12 first characters of the token + // The 32 lasts characters are the actual OTP + if upload.Yubikey != "" { + upload.ProtectedByYubikey = true + + if !common.Config.YubikeyEnabled { + log.Warningf("Got a Yubikey upload but Yubikey backend is disabled") + common.Fail(ctx, req, resp, "Yubikey are disabled on this server", 403) + return + } + + _, ok, err := common.Config.YubiAuth.Verify(upload.Yubikey) + if err != nil { + log.Warningf("Unable to validate yubikey token : %s", err) + common.Fail(ctx, req, resp, "Unable to validate yubikey token", 500) + return + } + + if !ok { + log.Warningf("Invalid yubikey token") + common.Fail(ctx, req, resp, "Invalid yubikey token", 400) + return + } + + upload.Yubikey = upload.Yubikey[:12] + } + + // A short url is created for each upload if a shorten backend is specified in the configuration. + // Referer header is used to get the url of incoming request, clients have to set it in order + // to get this feature working + if shortenBackend.GetShortenBackend() != nil { + if req.Header.Get("Referer") != "" { + u, err := url.Parse(req.Header.Get("Referer")) + if err != nil { + log.Warningf("Unable to parse referer url : %s", err) + } + longURL := u.Scheme + "://" + u.Host + "#/?id=" + upload.ID + shortURL, err := shortenBackend.GetShortenBackend().Shorten(ctx, longURL) + if err == nil { + upload.ShortURL = shortURL + } else { + log.Warningf("Unable to shorten url %s : %s", longURL, err) + } + } + } + + // Create files + for i, file := range upload.Files { + + // Check file name length + if len(file.Name) > 1024 { + log.Warning("File name is too long") + common.Fail(ctx, req, resp, "File name is too long. Maximum length is 1024 characters", 400) + return + } + + file.GenerateID() + file.Status = "missing" + delete(upload.Files, i) + upload.Files[file.ID] = file + } + + // Save the metadata + err = metadataBackend.GetMetaDataBackend().Create(ctx, upload) + if err != nil { + log.Warningf("Create new upload error : %s", err) + common.Fail(ctx, req, resp, "Unable to create new upload", 500) + return + } + + // Remove all private information (ip, data backend details, ...) before + // sending metadata back to the client + uploadToken := upload.UploadToken + upload.Sanitize() + + // Show upload token since its an upload creation + upload.UploadToken = uploadToken + upload.IsAdmin = true + + // Print upload metadata in the json response. + var json []byte + if json, err = utils.ToJson(upload); err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + + resp.Write(json) +} diff --git a/server/handlers/getFile.go b/server/handlers/getFile.go new file mode 100644 index 00000000..e0504728 --- /dev/null +++ b/server/handlers/getFile.go @@ -0,0 +1,183 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "fmt" + "io" + "net/http" + "strconv" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/mux" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/dataBackend" + "github.com/root-gg/plik/server/metadataBackend" +) + +// GetFile download a file +func GetFile(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get upload from context + upload := common.GetUpload(ctx) + if upload == nil { + // This should never append + log.Critical("Missing upload in getFileHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Get file from context + file := common.GetFile(ctx) + if file == nil { + // This should never append + log.Critical("Missing file in getFileHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // If upload has OneShot option, test if file has not been already downloaded once + if upload.OneShot && file.Status == "downloaded" { + log.Warningf("File %s has already been downloaded", file.Name) + common.Fail(ctx, req, resp, "File %s has already been downloaded", 404) + return + } + + // If the file is marked as deleted by a previous call, we abort request + if file.Status == "removed" { + log.Warningf("File %s has been removed", file.Name) + common.Fail(ctx, req, resp, "File %s has been removed", 404) + return + } + + // If upload is yubikey protected, user must send an OTP when he wants to get a file. + if upload.Yubikey != "" { + + // Error if yubikey is disabled on server, and enabled on upload + if !common.Config.YubikeyEnabled { + log.Warningf("Got a Yubikey upload but Yubikey backend is disabled") + common.Fail(ctx, req, resp, "Yubikey are disabled on this server", 403) + return + } + + vars := mux.Vars(req) + token := vars["yubikey"] + if token == "" { + log.Warningf("Missing yubikey token") + common.Fail(ctx, req, resp, "Invalid yubikey token", 401) + return + } + if len(token) != 44 { + log.Warningf("Invalid yubikey token : %s", token) + common.Fail(ctx, req, resp, "Invalid yubikey token", 401) + return + } + if token[:12] != upload.Yubikey { + log.Warningf("Invalid yubikey device : %s", token) + common.Fail(ctx, req, resp, "Invalid yubikey token", 401) + return + } + + _, isValid, err := common.Config.YubiAuth.Verify(token) + if err != nil { + log.Warningf("Failed to validate yubikey token : %s", err) + common.Fail(ctx, req, resp, "Invalid yubikey token", 500) + return + } + if !isValid { + log.Warningf("Invalid yubikey token : %s", token) + common.Fail(ctx, req, resp, "Invalid yubikey token", 401) + return + } + } + + // Set content type and print file + resp.Header().Set("Content-Type", file.Type) + if file.CurrentSize > 0 { + resp.Header().Set("Content-Length", strconv.Itoa(int(file.CurrentSize))) + } + + // If "dl" GET params is set + // -> Set Content-Disposition header + // -> The client should download file instead of displaying it + dl := req.URL.Query().Get("dl") + if dl != "" { + resp.Header().Set("Content-Disposition", fmt.Sprintf(`attachement; filename="%s"`, file.Name)) + } else { + resp.Header().Set("Content-Disposition", fmt.Sprintf(`filename="%s"`, file.Name)) + } + + // HEAD Request => Do not print file, user just wants http headers + // GET Request => Print file content + if req.Method == "GET" { + // Get file in data backend + var backend dataBackend.DataBackend + if upload.Stream { + backend = dataBackend.GetStreamBackend() + } else { + backend = dataBackend.GetDataBackend() + } + fileReader, err := backend.GetFile(ctx, upload, file.ID) + if err != nil { + log.Warningf("Failed to get file %s in upload %s : %s", file.Name, upload.ID, err) + common.Fail(ctx, req, resp, fmt.Sprintf("Failed to read file %s", file.Name), 404) + return + } + defer fileReader.Close() + + // Update metadata if oneShot option is set + if upload.OneShot { + file.Status = "downloaded" + err = metadataBackend.GetMetaDataBackend().AddOrUpdateFile(ctx, upload, file) + if err != nil { + log.Warningf("Error while deleting file %s from upload %s metadata : %s", file.Name, upload.ID, err) + } + } + + // File is piped directly to http response body without buffering + _, err = io.Copy(resp, fileReader) + if err != nil { + log.Warningf("Error while copying file to response : %s", err) + } + + // Remove file from data backend if oneShot option is set + if upload.OneShot { + err = backend.RemoveFile(ctx, upload, file.ID) + if err != nil { + log.Warningf("Error while deleting file %s from upload %s : %s", file.Name, upload.ID, err) + return + } + } + + // Remove upload if no files anymore + RemoveUploadIfNoFileAvailable(ctx, upload) + } +} diff --git a/server/handlers/getUpload.go b/server/handlers/getUpload.go new file mode 100644 index 00000000..a3d27b3e --- /dev/null +++ b/server/handlers/getUpload.go @@ -0,0 +1,66 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" +) + +// GetUpload return upload metadata +func GetUpload(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get upload from context + upload := common.GetUpload(ctx) + if upload == nil { + // This should never append + log.Critical("Missing upload in getUploadHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Remove all private information (ip, data backend details, ...) before + // sending metadata back to the client + upload.Sanitize() + + // Print upload metadata in the json response. + json, err := utils.ToJson(upload) + if err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + + resp.Write(json) +} diff --git a/server/handlers/google.go b/server/handlers/google.go new file mode 100644 index 00000000..cf151838 --- /dev/null +++ b/server/handlers/google.go @@ -0,0 +1,267 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> Copyright holders list can be found in AUTHORS file + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "fmt" + "net/http" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2" + "github.com/root-gg/plik/server/Godeps/_workspace/src/golang.org/x/oauth2/google" + api_oauth2 "github.com/root-gg/plik/server/Godeps/_workspace/src/google.golang.org/api/oauth2/v2" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" +) + +// GoogleLogin return google api user consent URL. +func GoogleLogin(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + if !common.Config.Authentication { + log.Warning("Authentication is disabled") + common.Fail(ctx, req, resp, "Authentication is disabled", 400) + return + } + + if !common.Config.GoogleAuthentication { + log.Warning("Missing google api credentials") + common.Fail(ctx, req, resp, "Missing google API credentials", 500) + return + } + + origin := req.Header.Get("referer") + if origin == "" { + log.Warning("Missing referer header") + common.Fail(ctx, req, resp, "Missing referer herader", 400) + return + } + + conf := &oauth2.Config{ + ClientID: common.Config.GoogleAPIClientID, + ClientSecret: common.Config.GoogleAPISecret, + RedirectURL: origin + "auth/google/callback", + Scopes: []string{ + api_oauth2.UserinfoEmailScope, + api_oauth2.UserinfoProfileScope, + }, + Endpoint: google.Endpoint, + } + + /* Generate state */ + state := jwt.New(jwt.SigningMethodHS256) + state.Claims["origin"] = origin + state.Claims["expire"] = time.Now().Add(time.Minute * 5).Unix() + + /* Sign state */ + b64state, err := state.SignedString([]byte(common.Config.GoogleAPISecret)) + if err != nil { + log.Warningf("Unable to sign state : %s", err) + common.Fail(ctx, req, resp, "Unable to sign state", 500) + return + } + + // Redirect user to Google's consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL(b64state) + + resp.Write([]byte(url)) +} + +// GoogleCallback authenticate google user. +func GoogleCallback(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + if !common.Config.Authentication { + log.Warning("Authentication is disabled") + common.Fail(ctx, req, resp, "Authentication is disabled", 400) + return + } + + if common.Config.GoogleAPIClientID == "" || common.Config.GoogleAPISecret == "" { + log.Warning("Missing google api credentials") + common.Fail(ctx, req, resp, "Missing google API credentials", 500) + return + } + + code := req.URL.Query().Get("code") + if code == "" { + log.Warning("Missing oauth2 authorization code") + common.Fail(ctx, req, resp, "Missing oauth2 authorization code", 400) + return + } + + b64state := req.URL.Query().Get("state") + if b64state == "" { + log.Warning("Missing oauth2 state") + common.Fail(ctx, req, resp, "Missing oauth2 state", 400) + return + } + + /* Parse state */ + state, err := jwt.Parse(b64state, func(token *jwt.Token) (interface{}, error) { + // Verify signing algorithm + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("Unexpected siging method : %v", token.Header["alg"]) + } + + // Verify expiration date + if expire, ok := token.Claims["expire"]; ok { + if _, ok = expire.(float64); ok { + if time.Now().Unix() > (int64)(expire.(float64)) { + return nil, fmt.Errorf("State has expired") + } + } else { + return nil, fmt.Errorf("Invalid expiration date") + } + } else { + return nil, fmt.Errorf("Missing expiration date") + } + + return []byte(common.Config.GoogleAPISecret), nil + }) + if err != nil { + log.Warning("Invalid oauth2 state : %s") + common.Fail(ctx, req, resp, "Invalid oauth2 state", 400) + return + } + + origin := state.Claims["origin"].(string) + + conf := &oauth2.Config{ + ClientID: common.Config.GoogleAPIClientID, + ClientSecret: common.Config.GoogleAPISecret, + RedirectURL: origin + "auth/google/callback", + Scopes: []string{ + api_oauth2.UserinfoEmailScope, + api_oauth2.UserinfoProfileScope, + }, + Endpoint: google.Endpoint, + } + + token, err := conf.Exchange(oauth2.NoContext, code) + if err != nil { + log.Warningf("Unable to create google API token : %s", err) + common.Fail(ctx, req, resp, "Unable to get user info from google API", 500) + return + } + + client, err := api_oauth2.New(conf.Client(oauth2.NoContext, token)) + if err != nil { + log.Warningf("Unable to create google API client : %s", err) + common.Fail(ctx, req, resp, "Unable to get user info from google API", 500) + return + } + + userInfo, err := client.Userinfo.Get().Do() + if err != nil { + log.Warningf("Unable to get userinfo from google API : %s", err) + common.Fail(ctx, req, resp, "Unable to get user info from google API", 500) + return + } + userID := "google:" + userInfo.Id + + // Get user from metadata backend + user, err := metadataBackend.GetMetaDataBackend().GetUser(ctx, userID, "") + if err != nil { + log.Warningf("Unable to get user : %s", err) + common.Fail(ctx, req, resp, "Unable to get user", 500) + return + } + + if user == nil { + if common.IsWhitelisted(ctx) { + // Create new user + user = common.NewUser() + user.ID = userID + user.Login = userInfo.Email + user.Name = userInfo.Name + user.Email = userInfo.Email + + // Save user to metadata backend + err = metadataBackend.GetMetaDataBackend().SaveUser(ctx, user) + if err != nil { + log.Warningf("Unable to save user to metadata backend : %s", err) + common.Fail(ctx, req, resp, "Authentification error", 403) + return + } + } else { + log.Warning("Unable to create user from untrusted source IP address") + common.Fail(ctx, req, resp, "Unable to create user from untrusted source IP address", 403) + return + } + } + + // Generate session jwt + session := jwt.New(jwt.SigningMethodHS256) + session.Claims["uid"] = user.ID + session.Claims["provider"] = "google" + + // Generate xsrf token + xsrfToken, err := uuid.NewV4() + if err != nil { + log.Warning("Unable to generate xsrf token") + common.Fail(ctx, req, resp, "Unable to generate xsrf token", 500) + return + } + session.Claims["xsrf"] = xsrfToken.String() + + sessionString, err := session.SignedString([]byte(common.Config.GoogleAPISecret)) + if err != nil { + log.Warningf("Unable to sign session cookie : %s", err) + common.Fail(ctx, req, resp, "Authentification error", 403) + return + } + + // Store session jwt in secure cookie + sessionCookie := &http.Cookie{} + sessionCookie.HttpOnly = true + sessionCookie.Secure = true + sessionCookie.Name = "plik-session" + sessionCookie.Value = sessionString + sessionCookie.MaxAge = int(time.Now().Add(10 * 365 * 24 * time.Hour).Unix()) + sessionCookie.Path = "/" + http.SetCookie(resp, sessionCookie) + + // Store xsrf token cookie + xsrfCookie := &http.Cookie{} + xsrfCookie.HttpOnly = false + xsrfCookie.Secure = true + xsrfCookie.Name = "plik-xsrf" + xsrfCookie.Value = xsrfToken.String() + xsrfCookie.MaxAge = int(time.Now().Add(10 * 365 * 24 * time.Hour).Unix()) + xsrfCookie.Path = "/" + http.SetCookie(resp, xsrfCookie) + + http.Redirect(resp, req, "/#/login", 301) +} diff --git a/server/handlers/me.go b/server/handlers/me.go new file mode 100644 index 00000000..111b0181 --- /dev/null +++ b/server/handlers/me.go @@ -0,0 +1,229 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> Copyright holders list can be found in AUTHORS file + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "fmt" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" + "net/http" + "strconv" +) + +// UserInfo return user information ( name / email / tokens / ... ) +func UserInfo(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get user from context + user := common.GetUser(ctx) + if user == nil { + common.Fail(ctx, req, resp, "Missing user, Please login first", 401) + return + } + + // Serialize user to JSON + // Print token in the json response. + json, err := utils.ToJson(user) + if err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + resp.Write(json) +} + +// DeleteAccount remove a user account +func DeleteAccount(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get user from context + user := common.GetUser(ctx) + if user == nil { + // This should never append + common.Fail(ctx, req, resp, "Missing user, Please login first", 401) + return + } + + err := metadataBackend.GetMetaDataBackend().RemoveUser(ctx, user) + if err != nil { + log.Warningf("Unable to remove user %s : %s", user.ID, err) + common.Fail(ctx, req, resp, "Unable to remove user", 500) + return + } +} + +// GetUserUploads get user uploads +func GetUserUploads(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get user from context + user := common.GetUser(ctx) + if user == nil { + common.Fail(ctx, req, resp, "Missing user, Please login first", 401) + return + } + + // Get token from URL query parameter + var token *common.Token + tokenStr := req.URL.Query().Get("token") + + if tokenStr != "" { + for _, t := range user.Tokens { + if t.Token == tokenStr { + token = t + break + } + } + if token == nil { + log.Warningf("Unable to get uploads for token %s : Invalid token", tokenStr) + common.Fail(ctx, req, resp, "Unable to get uploads : Invalid token", 400) + return + } + } + + // Get uploads + ids, err := metadataBackend.GetMetaDataBackend().GetUserUploads(ctx, user, token) + if err != nil { + log.Warningf("Unable to get uploads for user %s : %s", user.ID, err) + common.Fail(ctx, req, resp, "Unable to get uploads", 500) + return + } + + // Get size from URL query parameter + size := 100 + sizeStr := req.URL.Query().Get("size") + if sizeStr != "" { + size, err = strconv.Atoi(sizeStr) + if err != nil || size <= 0 || size > 100 { + log.Warningf("Invalid size parameter : %s", sizeStr) + common.Fail(ctx, req, resp, "Invalid size parameter", 400) + return + } + } + + // Get offset from URL query parameter + offset := 0 + offsetStr := req.URL.Query().Get("offset") + if offsetStr != "" { + offset, err = strconv.Atoi(offsetStr) + if err != nil || offset < 0 { + log.Warningf("Invalid offset parameter : %s", offsetStr) + common.Fail(ctx, req, resp, "Invalid offset parameter", 400) + return + } + } + + // Adjust offset + if offset > len(ids) { + offset = len(ids) + } + + // Adjust size + if offset+size > len(ids) { + size = len(ids) - offset + } + + uploads := []*common.Upload{} + for _, id := range ids[offset : offset+size] { + upload, err := metadataBackend.GetMetaDataBackend().Get(ctx, id) + if err != nil { + log.Warningf("Unable to get upload %s : %s", id, err) + continue + } + + if !upload.IsExpired() { + token := upload.Token + upload.Sanitize() + upload.Token = token + upload.IsAdmin = true + uploads = append(uploads, upload) + } + } + + // Print uploads in the json response. + var json []byte + if json, err = utils.ToJson(uploads); err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + resp.Write(json) +} + +// RemoveUserUploads delete all user uploads +func RemoveUserUploads(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get user from context + user := common.GetUser(ctx) + if user == nil { + common.Fail(ctx, req, resp, "Missing user, Please login first", 401) + return + } + + // Get token from URL query parameter + var token *common.Token + tokenStr := req.URL.Query().Get("token") + if tokenStr != "" { + for _, t := range user.Tokens { + if t.Token == tokenStr { + token = t + } + } + } + + // Get uploads + ids, err := metadataBackend.GetMetaDataBackend().GetUserUploads(ctx, user, token) + if err != nil { + log.Warningf("Unable to get uploads for user %s : %s", user.ID, err) + common.Fail(ctx, req, resp, "Unable to get uploads", 500) + return + } + + removed := 0 + for _, id := range ids { + upload, err := metadataBackend.GetMetaDataBackend().Get(ctx, id) + if err != nil { + log.Warningf("Unable to get upload %s : %s", id, err) + continue + } + + err = metadataBackend.GetMetaDataBackend().Remove(ctx, upload) + if err != nil { + log.Warningf("Unable to remove upload %s : %s", id, err) + } else { + removed++ + } + } + + resp.Write(common.NewResult(fmt.Sprintf("%d uploads removed", removed), nil).ToJSON()) +} diff --git a/server/handlers/misc.go b/server/handlers/misc.go new file mode 100644 index 00000000..e7b319f3 --- /dev/null +++ b/server/handlers/misc.go @@ -0,0 +1,153 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "image/png" + "net/http" + "strconv" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boombuler/barcode" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/dataBackend" + "github.com/root-gg/plik/server/metadataBackend" +) + +// GetVersion return the build information. +func GetVersion(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Print version and build information in the json response. + json, err := utils.ToJson(common.GetBuildInfo()) + if err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + + resp.Write(json) +} + +// GetConfiguration return the server configuration +func GetConfiguration(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Print configuration in the json response. + json, err := utils.ToJson(common.Config) + if err != nil { + log.Warningf("Unable to serialize response body : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize response body", 500) + return + } + resp.Write(json) +} + +// Logout return the server configuration +func Logout(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + common.Logout(resp) +} + +// GetQrCode return a QRCode for the requested URL +func GetQrCode(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Check params + urlParam := req.FormValue("url") + sizeParam := req.FormValue("size") + + // Parse int on size + sizeInt, err := strconv.Atoi(sizeParam) + if err != nil { + sizeInt = 250 + } + if sizeInt > 1000 { + log.Warning("QRCode size must be lower than 1000") + common.Fail(ctx, req, resp, "QRCode size must be lower than 1000", 400) + return + } + + // Generate QRCode png from url + qrcode, err := qr.Encode(urlParam, qr.H, qr.Auto) + if err != nil { + log.Warningf("Unable to generate QRCode : %s", err) + common.Fail(ctx, req, resp, "Unable to generate QRCode", 500) + return + } + + // Scale QRCode png size + qrcode, err = barcode.Scale(qrcode, sizeInt, sizeInt) + if err != nil { + log.Warningf("Unable to scale QRCode : %s", err) + common.Fail(ctx, req, resp, "Unable to generate QRCode", 500) + return + } + + resp.Header().Add("Content-Type", "image/png") + err = png.Encode(resp, qrcode) + if err != nil { + log.Warningf("Unable to encode png : %s", err) + } +} + +// RemoveUploadIfNoFileAvailable iterates on upload files and remove upload files +// and metadata if all the files have been downloaded (usefull for OneShot uploads) +func RemoveUploadIfNoFileAvailable(ctx *juliet.Context, upload *common.Upload) { + log := common.GetLogger(ctx) + + // Test if there are remaining files + filesInUpload := len(upload.Files) + for _, f := range upload.Files { + if f.Status != "uploaded" { + filesInUpload-- + } + } + + if filesInUpload == 0 { + log.Debugf("No more files in upload. Removing.") + + if !upload.Stream { + err := dataBackend.GetDataBackend().RemoveUpload(ctx, upload) + if err != nil { + log.Warningf("Unable to remove upload : %s", err) + return + } + } + err := metadataBackend.GetMetaDataBackend().Remove(ctx, upload) + if err != nil { + log.Warningf("Unable to remove upload : %s", err) + return + } + } + + return +} diff --git a/server/handlers/ovh.go b/server/handlers/ovh.go new file mode 100644 index 00000000..73eedc4f --- /dev/null +++ b/server/handlers/ovh.go @@ -0,0 +1,353 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> Copyright holders list can be found in AUTHORS file + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "crypto/sha1" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/nu7hatch/gouuid" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" +) + +type ovhError struct { + ErrorCode string `json:"errorCode"` + HTTPCode string `json:"httpCode"` + Message string `json:"message"` +} + +type ovhUserConsentResponse struct { + ValidationURL string `json:"validationUrl"` + ConsumerKey string `json:"consumerKey"` +} + +type ovhUserResponse struct { + Nichandle string `json:"nichandle"` + Email string `json:"email"` + FirstName string `json:"firstname"` + LastName string `json:"name"` +} + +func decodeOVHResponse(resp *http.Response) ([]byte, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("Unable to read response body : %s", err) + } + + if resp.StatusCode > 399 { + // Decode OVH error information from response + if body != nil && len(body) > 0 { + var ovhErr ovhError + err := json.Unmarshal(body, &ovhErr) + if err == nil { + return nil, fmt.Errorf("%s : %s", resp.Status, ovhErr.Message) + } + return nil, fmt.Errorf("%s : %s : %s", resp.Status, "Unable to unserialize ovh error", string(body)) + } + return nil, fmt.Errorf("%s", resp.Status) + } + + return body, nil +} + +const ovhAPIEndpoint string = "https://eu.api.ovh.com" +const ovhAPIVersion string = "1.0" + +// OvhLogin return ovh api user consent URL. +func OvhLogin(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + if !common.Config.Authentication { + log.Warning("Authentication is disabled") + common.Fail(ctx, req, resp, "Authentication is disabled", 400) + return + } + + if !common.Config.OvhAuthentication { + log.Warning("Missing ovh api credentials") + common.Fail(ctx, req, resp, "Missing OVH API credentials", 500) + return + } + + origin := req.Header.Get("referer") + if origin == "" { + log.Warning("Missing referer header") + common.Fail(ctx, req, resp, "Missing referer herader", 400) + return + } + + // Prepare request + redirectionURL := origin + "auth/ovh/callback" + ovhReqBody := "{\"accessRules\":[{\"method\":\"GET\",\"path\":\"/me\"}], \"redirection\":\"" + redirectionURL + "\"}" + + url := fmt.Sprintf("%s/%s/auth/credential", ovhAPIEndpoint, ovhAPIVersion) + + ovhReq, err := http.NewRequest("POST", url, strings.NewReader(ovhReqBody)) + ovhReq.Header.Add("X-Ovh-Application", common.Config.OvhAPIKey) + ovhReq.Header.Add("Content-type", "application/json") + + // Do request + client := &http.Client{} + ovhResp, err := client.Do(ovhReq) + if err != nil { + log.Warningf("Error with ovh API %s : %s", url, err) + common.Fail(ctx, req, resp, "Error with OVH API ", 500) + return + } + defer ovhResp.Body.Close() + ovhRespBody, err := decodeOVHResponse(ovhResp) + if err != nil { + log.Warningf("Error with ovh API %s : %s", url, err) + common.Fail(ctx, req, resp, fmt.Sprintf("Error with OVH API : %s", err), 500) + return + } + + var userConsentResponse ovhUserConsentResponse + err = json.Unmarshal(ovhRespBody, &userConsentResponse) + if err != nil { + log.Warningf("Unable to unserialize OVH API response : %s", err) + common.Fail(ctx, req, resp, "Unable to unserialize OVH API response", 500) + return + } + + // Generate session jwt + session := jwt.New(jwt.SigningMethodHS256) + session.Claims["ovh-consumer-key"] = userConsentResponse.ConsumerKey + session.Claims["ovh-api-endpoint"] = ovhAPIEndpoint + "/" + ovhAPIVersion + + sessionString, err := session.SignedString([]byte(common.Config.OvhAPISecret)) + if err != nil { + log.Warningf("Unable to sign OVH session cookie : %s", err) + common.Fail(ctx, req, resp, "Unable to sign OVH session cookie", 500) + return + } + + // Store session jwt in secure cookie + sessionCookie := &http.Cookie{} + sessionCookie.HttpOnly = true + sessionCookie.Secure = true + sessionCookie.Name = "plik-ovh-session" + sessionCookie.Value = sessionString + sessionCookie.MaxAge = int(time.Now().Add(5 * time.Minute).Unix()) + sessionCookie.Path = "/" + http.SetCookie(resp, sessionCookie) + + resp.Write([]byte(userConsentResponse.ValidationURL)) +} + +// OvhCallback authenticate ovh user. +func OvhCallback(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + if !common.Config.Authentication { + log.Warning("Authentication is disabled") + common.Fail(ctx, req, resp, "Authentication is disabled", 400) + return + } + + if common.Config.OvhAPIKey == "" || common.Config.OvhAPISecret == "" { + log.Warning("Missing ovh api credentials") + common.Fail(ctx, req, resp, "Missing ovh api credentials", 500) + return + } + + // Get state from secure cookie + ovhSessionCookie, err := req.Cookie("plik-ovh-session") + if err != nil && ovhSessionCookie != nil { + log.Warning("Missing OVH session cookie") + common.Fail(ctx, req, resp, "Missing OVH session cookie", 400) + return + } + + // Parse session cookie + ovhSession, err := jwt.Parse(ovhSessionCookie.Value, func(t *jwt.Token) (interface{}, error) { + // Verify signing algorithm + if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("Unexpected siging method : %v", t.Header["alg"]) + } + + return []byte(common.Config.OvhAPISecret), nil + }) + if err != nil { + log.Warningf("Invalid OVH session cookie : %s", err) + common.Fail(ctx, req, resp, "Invalid OVH session cookie", 400) + return + } + + // Get OVH consumer key from session + ovhConsumerKey, ok := ovhSession.Claims["ovh-consumer-key"] + if !ok { + log.Warning("Invalid OVH session cookie : missing ovh-consumer-key") + common.Fail(ctx, req, resp, "Invalid OVH session cookie : missing ovh-consumer-key", 500) + return + } + + // Get OVH API endpoint + endpoint, ok := ovhSession.Claims["ovh-api-endpoint"] + if !ok { + log.Warning("Invalid OVH session cookie : missing ovh-api-endpoint") + common.Fail(ctx, req, resp, "Invalid OVH session cookie : missing ovh-api-endpoint", 400) + return + } + + // Prepare OVH API /me request + url := endpoint.(string) + "/me" + ovhReq, err := http.NewRequest("GET", url, nil) + if err != nil { + log.Warningf("Unable to create new http GET request to %s : %s", url, err) + common.Fail(ctx, req, resp, "Unable to create new http GET request to OVH API", 500) + return + } + + timestamp := time.Now().Unix() + ovhReq.Header.Add("X-Ovh-Application", common.Config.OvhAPIKey) + ovhReq.Header.Add("X-Ovh-Timestamp", fmt.Sprintf("%d", timestamp)) + ovhReq.Header.Add("X-Ovh-Consumer", ovhConsumerKey.(string)) + + // Sign request + h := sha1.New() + h.Write([]byte(fmt.Sprintf("%s+%s+%s+%s+%s+%d", + common.Config.OvhAPISecret, + ovhConsumerKey.(string), + "GET", + url, + "", + timestamp, + ))) + ovhReq.Header.Add("X-Ovh-Signature", fmt.Sprintf("$1$%x", h.Sum(nil))) + + // Do request + client := &http.Client{} + ovhResp, err := client.Do(ovhReq) + if err != nil { + log.Warningf("Error with ovh API %s : %s", url, err) + common.Fail(ctx, req, resp, "Error with ovh API", 500) + return + } + defer ovhResp.Body.Close() + ovhRespBody, err := decodeOVHResponse(ovhResp) + if err != nil { + log.Warningf("Error with ovh API %s : %s", url, err) + common.Fail(ctx, req, resp, fmt.Sprintf("Error with ovh API : %s", err), 500) + return + } + + // Unserialize response + var userInfo ovhUserResponse + err = json.Unmarshal(ovhRespBody, &userInfo) + if err != nil { + log.Warningf("Unable to unserialize OVH API response : %s", err) + common.Fail(ctx, req, resp, "Unable to unserialize OVH API response", 500) + return + } + + userID := "ovh:" + userInfo.Nichandle + + // Get user from metadata backend + user, err := metadataBackend.GetMetaDataBackend().GetUser(ctx, userID, "") + if err != nil { + err = fmt.Errorf("Unable to get user from metadata backend : %s", err) + return + } + + if user == nil { + if common.IsWhitelisted(ctx) { + // Create new user + user = common.NewUser() + user.ID = userID + user.Login = userInfo.Nichandle + user.Name = userInfo.FirstName + " " + userInfo.LastName + user.Email = userInfo.Email + + // Save user to metadata backend + err = metadataBackend.GetMetaDataBackend().SaveUser(ctx, user) + if err != nil { + log.Warningf("Unable to save user to metadata backend : %s", err) + common.Fail(ctx, req, resp, "Authentification error", 403) + return + } + } else { + log.Warning("Unable to create user from untrusted source IP address") + common.Fail(ctx, req, resp, "Unable to create user from untrusted source IP address", 403) + return + } + } + + // Generate session jwt + session := jwt.New(jwt.SigningMethodHS256) + session.Claims["uid"] = user.ID + session.Claims["provider"] = "ovh" + + // Generate xsrf token + xsrfToken, err := uuid.NewV4() + if err != nil { + log.Warning("Unable to generate xsrf token") + common.Fail(ctx, req, resp, "Unable to generate xsrf token", 500) + return + } + session.Claims["xsrf"] = xsrfToken.String() + + sessionString, err := session.SignedString([]byte(common.Config.OvhAPISecret)) + if err != nil { + log.Warningf("Unable to sign session cookie : %s", err) + common.Fail(ctx, req, resp, "Authentification error", 403) + return + } + + // Store session jwt in secure cookie + sessionCookie := &http.Cookie{} + sessionCookie.HttpOnly = true + sessionCookie.Secure = true + sessionCookie.Name = "plik-session" + sessionCookie.Value = sessionString + sessionCookie.MaxAge = int(time.Now().Add(10 * 365 * 24 * time.Hour).Unix()) + sessionCookie.Path = "/" + http.SetCookie(resp, sessionCookie) + + // Store xsrf token cookie + xsrfCookie := &http.Cookie{} + xsrfCookie.HttpOnly = false + xsrfCookie.Secure = true + xsrfCookie.Name = "plik-xsrf" + xsrfCookie.Value = xsrfToken.String() + xsrfCookie.MaxAge = int(time.Now().Add(10 * 365 * 24 * time.Hour).Unix()) + xsrfCookie.Path = "/" + http.SetCookie(resp, xsrfCookie) + + http.Redirect(resp, req, "/#/login", 301) +} diff --git a/server/handlers/removeFile.go b/server/handlers/removeFile.go new file mode 100644 index 00000000..9300c4c4 --- /dev/null +++ b/server/handlers/removeFile.go @@ -0,0 +1,114 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "fmt" + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/dataBackend" + "github.com/root-gg/plik/server/metadataBackend" +) + +// RemoveFile remove a file from an existing upload +func RemoveFile(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get upload from context + upload := common.GetUpload(ctx) + if upload == nil { + // This should never append + log.Critical("Missing upload in removeFileHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Check authorization + if !upload.Removable && !upload.IsAdmin { + log.Warningf("Unable to remove file : unauthorized") + common.Fail(ctx, req, resp, "You are not allowed to remove file from this upload", 403) + return + } + + // Get file from context + file := common.GetFile(ctx) + if file == nil { + // This should never append + log.Critical("Missing file in removeFileHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Check if file is not already removed + if file.Status == "removed" { + log.Warning("Can't remove an already removed file") + common.Fail(ctx, req, resp, fmt.Sprintf("File %s has already been removed", file.Name), 404) + return + } + + // Set status to removed, and save metadatas + file.Status = "removed" + if err := metadataBackend.GetMetaDataBackend().AddOrUpdateFile(ctx, upload, file); err != nil { + log.Warningf("Unable to update metadata : %s", err) + common.Fail(ctx, req, resp, "Unable to update upload metadata", 500) + return + } + + // Remove file from data backend + // Get file in data backend + var backend dataBackend.DataBackend + if upload.Stream { + backend = dataBackend.GetStreamBackend() + } else { + backend = dataBackend.GetDataBackend() + } + + if err := backend.RemoveFile(ctx, upload, file.ID); err != nil { + log.Warningf("Unable to delete file : %s", err) + common.Fail(ctx, req, resp, "Unable to delete file", 500) + return + } + + // Remove upload if no files anymore + RemoveUploadIfNoFileAvailable(ctx, upload) + + // Print upload metadata in the json response. + json, err := utils.ToJson(upload) + if err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + + resp.Write(json) +} diff --git a/server/handlers/removeUpload.go b/server/handlers/removeUpload.go new file mode 100644 index 00000000..7ee0633b --- /dev/null +++ b/server/handlers/removeUpload.go @@ -0,0 +1,75 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/dataBackend" + "github.com/root-gg/plik/server/metadataBackend" +) + +// RemoveUpload create a new upload +func RemoveUpload(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get upload from context + upload := common.GetUpload(ctx) + if upload == nil { + // This should never append + log.Critical("Missing upload in removeUploadHandler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Check authorization + if !upload.Removable && !upload.IsAdmin { + log.Warningf("Unable to remove upload : unauthorized") + common.Fail(ctx, req, resp, "You are not allowed to remove this upload", 403) + return + } + + // Remove from data backend + err := dataBackend.GetDataBackend().RemoveUpload(ctx, upload) + if err != nil { + log.Warningf("Unable to remove upload data : %s", err) + common.Fail(ctx, req, resp, "Unable to remove upload", 500) + return + } + + // Remove from metadata backend + err = metadataBackend.GetMetaDataBackend().Remove(ctx, upload) + if err != nil { + log.Warningf("Unable to remove upload metadata : %s", err) + common.Fail(ctx, req, resp, "Unable to remove upload metadata", 500) + } +} diff --git a/server/handlers/token.go b/server/handlers/token.go new file mode 100644 index 00000000..bbb90de9 --- /dev/null +++ b/server/handlers/token.go @@ -0,0 +1,146 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package handlers + +import ( + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/mux" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" +) + +// CreateToken create a new token +func CreateToken(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get user from context + user := common.GetUser(ctx) + if user == nil { + common.Fail(ctx, req, resp, "Missing user, Please login first", 401) + return + } + + // Create token + token := common.NewToken() + + // Read request body + defer req.Body.Close() + req.Body = http.MaxBytesReader(resp, req.Body, 1048576) + body, err := ioutil.ReadAll(req.Body) + if err != nil { + log.Warningf("Unable to read request body : %s", err) + common.Fail(ctx, req, resp, "Unable to read request body", 403) + return + } + + // Deserialize json body + if len(body) > 0 { + err = json.Unmarshal(body, token) + if err != nil { + log.Warningf("Unable to deserialize json request body : %s", err) + common.Fail(ctx, req, resp, "Unable to deserialize json request body", 400) + return + } + } + + // Initialize token + token.Create() + + // Add token to user + user.Tokens = append(user.Tokens, token) + + // Save token + err = metadataBackend.GetMetaDataBackend().SaveUser(ctx, user) + if err != nil { + log.Warningf("Unable to save user to metadata backend : %s", err) + common.Fail(ctx, req, resp, "Unable to create token", 500) + return + } + + // Print token in the json response. + var json []byte + if json, err = utils.ToJson(token); err != nil { + log.Warningf("Unable to serialize json response : %s", err) + common.Fail(ctx, req, resp, "Unable to serialize json response", 500) + return + } + resp.Write(json) +} + +// RevokeToken remove a token +func RevokeToken(ctx *juliet.Context, resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + + // Get user from context + user := common.GetUser(ctx) + if user == nil { + common.Fail(ctx, req, resp, "Missing user, Please login first", 401) + return + } + + // Get token to remove from URL params + vars := mux.Vars(req) + tokenStr, ok := vars["token"] + if !ok || tokenStr == "" { + common.Fail(ctx, req, resp, "Missing token", 400) + } + + // Get token from user + index := -1 + for i, t := range user.Tokens { + if t.Token == tokenStr { + index = i + break + } + } + if index < 0 { + log.Warningf("Unable to get token %s from user %s", tokenStr, user.ID) + common.Fail(ctx, req, resp, "Invalid token", 403) + return + } + + // TODO RACE CONDITION if simulatneous delete occur + + // Delete token + user.Tokens = append(user.Tokens[:index], user.Tokens[index+1:]...) + + // Save user to metadata backend + err := metadataBackend.GetMetaDataBackend().SaveUser(ctx, user) + if err != nil { + log.Warningf("Unable to save user to metadata backend : %s", err) + common.Fail(ctx, req, resp, "Unable to create token", 500) + return + } +} diff --git a/server/metadataBackend/bolt/bolt.go b/server/metadataBackend/bolt/bolt.go new file mode 100644 index 00000000..a76ac21c --- /dev/null +++ b/server/metadataBackend/bolt/bolt.go @@ -0,0 +1,623 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package bolt + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "log" + "time" + + "bytes" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boltdb/bolt" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" +) + +// MetadataBackend object +type MetadataBackend struct { + Config *MetadataBackendConfig + + db *bolt.DB +} + +// NewBoltMetadataBackend instantiate a new Bolt Metadata Backend +// from configuration passed as argument +func NewBoltMetadataBackend(config map[string]interface{}) (bmb *MetadataBackend) { + bmb = new(MetadataBackend) + bmb.Config = NewBoltMetadataBackendConfig(config) + + // Open the Bolt database + var err error + bmb.db, err = bolt.Open(bmb.Config.Path, 0600, &bolt.Options{Timeout: 10 * time.Second}) + if err != nil { + log.Fatalf("Unable to open Bolt database %s : %s", bmb.Config.Path, err) + } + + // Create Bolt buckets if needed + err = bmb.db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists([]byte("uploads")) + if err != nil { + return fmt.Errorf("Unable to create metadata bucket : %s", err) + } + + _, err = tx.CreateBucketIfNotExists([]byte("users")) + if err != nil { + return fmt.Errorf("Unable to create user bucket : %s", err) + } + + return nil + }) + if err != nil { + log.Fatalf("Unable to create Bolt buckets : %s", err) + } + + return +} + +// Create implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) Create(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to save upload : Missing upload") + return + } + + // Serialize metadata to json + j, err := json.Marshal(upload) + if err != nil { + err = log.EWarningf("Unable to serialize metadata to json : %s", err) + return + } + + // Save json metadata to Bolt database + err = bmb.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("uploads")) + if bucket == nil { + return fmt.Errorf("Unable to get metadata Bolt bucket") + } + + err := bucket.Put([]byte(upload.ID), j) + if err != nil { + return fmt.Errorf("Unable save metadata : %s", err) + } + + // User index + if upload.User != "" { + // User index key is build as follow : + // - User index prefix 2 byte ( "_u" ) + // - The user id + // - The upload date reversed ( 8 bytes ) + // - The upload id ( 16 bytes ) + // Upload id is stored in the key to ensure uniqueness + // AuthToken is stored in the value to permit byToken filtering + timestamp := make([]byte, 8) + binary.BigEndian.PutUint64(timestamp, ^uint64(0)-uint64(upload.Creation)) + + key := append([]byte{'_', 'u'}, []byte(upload.User)...) + key = append(key, timestamp...) + key = append(key, []byte(upload.ID)...) + + err := bucket.Put(key, []byte(upload.Token)) + if err != nil { + return fmt.Errorf("Unable to save user index : %s", err) + } + } + + // Expire date index + if upload.TTL > 0 { + // Expire index is build as follow : + // - Expire index prefix 2 byte ( "_e" ) + // - The expire timestamp ( 8 bytes ) + // - The upload id ( 16 bytes ) + // Upload id is stored in the key to ensure uniqueness + timestamp := make([]byte, 8) + expiredTs := upload.Creation + int64(upload.TTL) + binary.BigEndian.PutUint64(timestamp, uint64(expiredTs)) + + key := append([]byte{'_', 'e'}, timestamp...) + key = append(key, []byte(upload.ID)...) + + err := bucket.Put(key, []byte{}) + if err != nil { + return fmt.Errorf("Unable to save expire index : %s", err) + } + } + + return nil + }) + if err != nil { + return + } + + log.Infof("Upload metadata successfully saved") + return +} + +// Get implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) Get(ctx *juliet.Context, id string) (upload *common.Upload, err error) { + log := common.GetLogger(ctx) + var b []byte + + if id == "" { + err = log.EWarning("Unable to get upload : Missing upload id") + return + } + + // Get json metadata from Bolt database + err = bmb.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("uploads")) + if bucket == nil { + return fmt.Errorf("Unable to get metadata Bolt bucket") + } + + b = bucket.Get([]byte(id)) + if b == nil || len(b) == 0 { + return fmt.Errorf("Unable to get upload metadata from Bolt bucket") + } + + return nil + }) + if err != nil { + return + } + + // Unserialize metadata from json + upload = new(common.Upload) + if err = json.Unmarshal(b, upload); err != nil { + err = log.EWarningf("Unable to unserialize metadata from json \"%s\" : %s", string(b), err) + return + } + + return +} + +// AddOrUpdateFile implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) AddOrUpdateFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to add file : Missing upload") + return + } + + if file == nil { + err = log.EWarning("Unable to add file : Missing file") + return + } + + // Update json metadata to Bolt database + err = bmb.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("uploads")) + if bucket == nil { + return fmt.Errorf("Unable to get metadata Bolt bucket") + } + + // Get json + b := bucket.Get([]byte(upload.ID)) + if b == nil || len(b) == 0 { + return fmt.Errorf("Unable to get upload metadata from Bolt bucket") + } + + // Unserialize metadata from json + upload := new(common.Upload) + if err = json.Unmarshal(b, upload); err != nil { + return log.EWarningf("Unable to unserialize metadata from json \"%s\" : %s", string(b), err) + } + + // Add file to upload + upload.Files[file.ID] = file + + // Serialize metadata to json + j, err := json.Marshal(upload) + if err != nil { + return log.EWarningf("Unable to serialize metadata to json : %s", err) + } + + // Update Bolt database + return bucket.Put([]byte(upload.ID), j) + }) + if err != nil { + return + } + + log.Infof("Upload metadata successfully updated") + return +} + +// RemoveFile implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) RemoveFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to remove file : Missing upload") + return + } + + if file == nil { + err = log.EWarning("Unable to remove file : Missing file") + return + } + + // Update json metadata to Bolt database + err = bmb.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("uploads")) + if bucket == nil { + return fmt.Errorf("Unable to get metadata Bolt bucket") + } + + b := bucket.Get([]byte(upload.ID)) + if b == nil { + return fmt.Errorf("Unable to get upload metadata from Bolt bucket") + } + + // Unserialize metadata from json + var j []byte + upload = new(common.Upload) + if err = json.Unmarshal(b, upload); err != nil { + return log.EWarningf("Unable to unserialize metadata from json \"%s\" : %s", string(j), err) + } + + // Remove file from upload + _, ok := upload.Files[file.ID] + if ok { + delete(upload.Files, file.ID) + + // Serialize metadata to json + j, err := json.Marshal(upload) + if err != nil { + return log.EWarningf("Unable to serialize metadata to json : %s", err) + } + + // Update bolt database + err = bucket.Put([]byte(upload.ID), j) + return err + } + + return err + }) + if err != nil { + return + } + + log.Infof("Upload metadata successfully updated") + return nil +} + +// Remove implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) Remove(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to remove upload : Missing upload") + return + } + + // Remove upload from bolt database + err = bmb.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("uploads")) + err := bucket.Delete([]byte(upload.ID)) + if err != nil { + return err + } + + // Remove upload user index + if upload.User != "" { + // User index key is build as follow : + // - User index prefix 2 byte ( "_u" ) + // - The user id + // - The upload date reversed ( 8 bytes ) + // - The upload id ( 16 bytes ) + // Upload id is stored in the key to ensure uniqueness + // AuthToken is stored in the value to permit byToken filtering + timestamp := make([]byte, 8) + binary.BigEndian.PutUint64(timestamp, ^uint64(0)-uint64(upload.Creation)) + + key := append([]byte{'_', 'u'}, []byte(upload.User)...) + key = append(key, timestamp...) + key = append(key, []byte(upload.ID)...) + + err := bucket.Delete(key) + if err != nil { + return fmt.Errorf("Unable to delete user index : %s", err) + } + } + + // Remove upload expire date index + if upload.TTL > 0 { + // Expire index is build as follow : + // - Expire index prefix 2 byte ( "_e" ) + // - The expire timestamp ( 8 bytes ) + // - The upload id ( 16 bytes ) + // Upload id is stored in the key to ensure uniqueness + timestamp := make([]byte, 8) + expiredTs := upload.Creation + int64(upload.TTL) + binary.BigEndian.PutUint64(timestamp, uint64(expiredTs)) + key := append([]byte{'_', 'e'}, timestamp...) + key = append(key, []byte(upload.ID)...) + + err := bucket.Delete(key) + if err != nil { + return fmt.Errorf("Unable to delete expire index : %s", err) + } + } + + return nil + }) + if err != nil { + return + } + + log.Infof("Upload metadata successfully removed") + return +} + +// SaveUser implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) SaveUser(ctx *juliet.Context, user *common.User) (err error) { + log := common.GetLogger(ctx) + + if user == nil { + err = log.EWarning("Unable to save user : Missing user") + return + } + + // Serialize user to json + j, err := json.Marshal(user) + if err != nil { + err = log.EWarningf("Unable to serialize user to json : %s", err) + return + } + + // Save json user to Bolt database + err = bmb.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("users")) + if bucket == nil { + return fmt.Errorf("Unable to get users Bolt bucket") + } + + // Get current tokens + tokens := make(map[string]*common.Token) + b := bucket.Get([]byte(user.ID)) + if b != nil && len(b) != 0 { + // Unserialize user from json + u := common.NewUser() + if err = json.Unmarshal(b, u); err != nil { + return fmt.Errorf("Unable unserialize json user : %s", err) + } + + for _, token := range u.Tokens { + tokens[token.Token] = token + } + } + + // Save user + err := bucket.Put([]byte(user.ID), j) + if err != nil { + return fmt.Errorf("Unable save user : %s", err) + } + + // Update token index + for _, token := range user.Tokens { + if _, ok := tokens[token.Token]; !ok { + // New token + err := bucket.Put([]byte(token.Token), []byte(user.ID)) + if err != nil { + return fmt.Errorf("Unable save new token index : %s", err) + } + } + delete(tokens, token.Token) + } + + for _, token := range tokens { + // Deleted token + err := bucket.Delete([]byte(token.Token)) + if err != nil { + return fmt.Errorf("Unable delete token index : %s", err) + } + } + + return nil + }) + if err != nil { + return + } + + log.Infof("User successfully saved") + + return +} + +// GetUser implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) GetUser(ctx *juliet.Context, id string, token string) (u *common.User, err error) { + log := common.GetLogger(ctx) + var b []byte + + if id == "" && token == "" { + err = log.EWarning("Unable to get user : Missing user id or token") + return + } + + // Get json user from Bolt database + err = bmb.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("users")) + if bucket == nil { + return fmt.Errorf("Unable to get users Bolt bucket") + } + + if id == "" && token != "" { + // token index lookup + idBytes := bucket.Get([]byte(token)) + if idBytes == nil || len(idBytes) == 0 { + return nil + } + id = string(idBytes) + } + + b = bucket.Get([]byte(id)) + return nil + }) + if err != nil { + err = log.EWarningf("Unable to get user : %s", err) + return + } + + // User not found but no error + if b == nil || len(b) == 0 { + return + } + + // Unserialize user from json + u = common.NewUser() + if err = json.Unmarshal(b, u); err != nil { + return + } + + return +} + +// RemoveUser implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) RemoveUser(ctx *juliet.Context, user *common.User) (err error) { + log := common.GetLogger(ctx) + + if user == nil { + err = log.EWarning("Unable to remove user : Missing user") + return + } + + // Remove user from bolt database + err = bmb.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("users")) + err := bucket.Delete([]byte(user.ID)) + if err != nil { + return err + } + + // Update token index + for _, token := range user.Tokens { + err := bucket.Delete([]byte(token.Token)) + if err != nil { + return fmt.Errorf("Unable delete token index : %s", err) + } + } + + return nil + }) + if err != nil { + return + } + + log.Infof("User successfully removed") + + return +} + +// GetUserUploads implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) GetUserUploads(ctx *juliet.Context, user *common.User, token *common.Token) (ids []string, err error) { + log := common.GetLogger(ctx) + + if user == nil { + err = log.EWarning("Unable to get user uploads : Missing user") + return + } + + err = bmb.db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("uploads")).Cursor() + + // User index key is build as follow : + // - User index prefix 2 byte ( "_u" ) + // - The user id + // - The upload date reversed ( 8 bytes ) + // - The upload id ( 16 bytes ) + // Upload id is stored in the key to ensure uniqueness + // AuthToken is stored in the value to permit byToken filtering + startKey := append([]byte{'_', 'u'}, []byte(user.ID)...) + + k, t := c.Seek(startKey) + for k != nil && bytes.HasPrefix(k, startKey) { + + // byToken filter + if token == nil || string(t) == token.Token { + // Extract upload id from key ( 16 last bytes ) + ids = append(ids, string(k[len(k)-16:])) + } + + // Scan the bucket forward + k, t = c.Next() + } + + return nil + }) + if err != nil { + return + } + + return +} + +// GetUploadsToRemove implementation for Bolt Metadata Backend +func (bmb *MetadataBackend) GetUploadsToRemove(ctx *juliet.Context) (ids []string, err error) { + err = bmb.db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("uploads")).Cursor() + + // Expire index is build as follow : + // - Expire index prefix 2 byte ( "_e" ) + // - The expire timestamp ( 8 bytes ) + // - The upload id ( 16 bytes ) + // Upload id is stored in the key to ensure uniqueness + + // Create seek key at current timestamp + 1 + timestamp := make([]byte, 8) + binary.BigEndian.PutUint64(timestamp, uint64(time.Now().Unix()+1)) + startKey := append([]byte{'_', 'e'}, timestamp...) + + // Seek just after the seek key + // All uploads above the cursor are expired + c.Seek(startKey) + for { + // Scan the bucket upwards + k, _ := c.Prev() + if k == nil || !bytes.HasPrefix(k, []byte("_e")) { + break + } + + // Extract upload id from key ( 16 last bytes ) + ids = append(ids, string(k[8:])) + } + + return nil + }) + if err != nil { + return + } + + return +} diff --git a/server/metadataBackend/bolt/config.go b/server/metadataBackend/bolt/config.go new file mode 100644 index 00000000..b21c6817 --- /dev/null +++ b/server/metadataBackend/bolt/config.go @@ -0,0 +1,48 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package bolt + +import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" +) + +// MetadataBackendConfig object +type MetadataBackendConfig struct { + Path string +} + +// NewBoltMetadataBackendConfig configures the backend +// from config passed as argument +func NewBoltMetadataBackendConfig(config map[string]interface{}) (mbc *MetadataBackendConfig) { + mbc = new(MetadataBackendConfig) + mbc.Path = "plik.db" + utils.Assign(mbc, config) + return +} diff --git a/server/metadataBackend/file/config.go b/server/metadataBackend/file/config.go index 3de01371..5313c91d 100644 --- a/server/metadataBackend/file/config.go +++ b/server/metadataBackend/file/config.go @@ -40,13 +40,13 @@ type MetadataBackendConfig struct { // NewFileMetadataBackendConfig configures the backend // from config passed as argument -func NewFileMetadataBackendConfig(config map[string]interface{}) (fmb *MetadataBackendConfig) { - fmb = new(MetadataBackendConfig) +func NewFileMetadataBackendConfig(config map[string]interface{}) (mbc *MetadataBackendConfig) { + mbc = new(MetadataBackendConfig) // Default upload directory is ./files // this is the same as the default file // data backend so by default files and // metadata are colocated - fmb.Directory = "files" - utils.Assign(fmb, config) + mbc.Directory = "files" + utils.Assign(mbc, config) return } diff --git a/server/metadataBackend/file/file.go b/server/metadataBackend/file/file.go index bdd160f5..3b9d769e 100644 --- a/server/metadataBackend/file/file.go +++ b/server/metadataBackend/file/file.go @@ -37,6 +37,7 @@ import ( "sync" "time" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" ) @@ -59,13 +60,18 @@ func NewFileMetadataBackend(config map[string]interface{}) (fmb *MetadataBackend } // Create implementation for File Metadata Backend -func (fmb *MetadataBackend) Create(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) +func (fmb *MetadataBackend) Create(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to save upload : Missing upload") + return + } // Get upload directory directory, err := fmb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -75,23 +81,23 @@ func (fmb *MetadataBackend) Create(ctx *common.PlikContext, upload *common.Uploa // Serialize metadata to json b, err := json.MarshalIndent(upload, "", " ") if err != nil { - err = ctx.EWarningf("Unable to serialize metadata to json : %s", err) + err = log.EWarningf("Unable to serialize metadata to json : %s", err) return } // Create upload directory if needed if _, err = os.Stat(directory); err != nil { if err = os.MkdirAll(directory, 0777); err != nil { - err = ctx.EWarningf("Unable to create upload directory %s : %s", directory, err) + err = log.EWarningf("Unable to create upload directory %s : %s", directory, err) return } - ctx.Infof("Upload directory %s successfully created", directory) + log.Infof("Upload directory %s successfully created", directory) } // Create metadata file f, err := os.OpenFile(metadataFile, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) if err != nil { - err = ctx.EWarningf("Unable to create metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to create metadata file %s : %s", metadataFile, err) return } defer f.Close() @@ -99,29 +105,34 @@ func (fmb *MetadataBackend) Create(ctx *common.PlikContext, upload *common.Uploa // Print content _, err = f.Write(b) if err != nil { - err = ctx.EWarningf("Unable to write metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to write metadata file %s : %s", metadataFile, err) return } // Sync on disk err = f.Sync() if err != nil { - err = ctx.EWarningf("Unable to sync metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to sync metadata file %s : %s", metadataFile, err) return } - ctx.Infof("Metadata file successfully saved %s", metadataFile) + log.Infof("Metadata file successfully saved %s", metadataFile) return } // Get implementation for File Metadata Backend -func (fmb *MetadataBackend) Get(ctx *common.PlikContext, id string) (upload *common.Upload, err error) { - defer ctx.Finalize(err) +func (fmb *MetadataBackend) Get(ctx *juliet.Context, id string) (upload *common.Upload, err error) { + log := common.GetLogger(ctx) + + if id == "" { + err = log.EWarning("Unable to get upload : Missing upload id") + return + } // Get upload directory directory, err := fmb.getDirectoryFromUploadID(id) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -132,14 +143,14 @@ func (fmb *MetadataBackend) Get(ctx *common.PlikContext, id string) (upload *com var buffer []byte buffer, err = ioutil.ReadFile(metadataFile) if err != nil { - err = ctx.EWarningf("Unable read metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable read metadata file %s : %s", metadataFile, err) return } // Unserialize metadata from json upload = new(common.Upload) if err = json.Unmarshal(buffer, upload); err != nil { - err = ctx.EWarningf("Unable to unserialize metadata from json \"%s\" : %s", string(buffer), err) + err = log.EWarningf("Unable to unserialize metadata from json \"%s\" : %s", string(buffer), err) return } @@ -147,15 +158,25 @@ func (fmb *MetadataBackend) Get(ctx *common.PlikContext, id string) (upload *com } // AddOrUpdateFile implementation for File Metadata Backend -func (fmb *MetadataBackend) AddOrUpdateFile(ctx *common.PlikContext, upload *common.Upload, file *common.File) (err error) { - defer ctx.Finalize(err) +func (fmb *MetadataBackend) AddOrUpdateFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to add file : Missing upload") + return + } + + if file == nil { + err = log.EWarning("Unable to add file : Missing file") + return + } // avoid race condition lock(upload.ID) defer unlock(upload.ID) // The first thing to do is to reload the file from disk - upload, err = fmb.Get(ctx.Fork("reload metadata"), upload.ID) + upload, err = fmb.Get(ctx, upload.ID) // Add file metadata to upload metadata upload.Files[file.ID] = file @@ -163,14 +184,14 @@ func (fmb *MetadataBackend) AddOrUpdateFile(ctx *common.PlikContext, upload *com // Serialize metadata to json b, err := json.MarshalIndent(upload, "", " ") if err != nil { - err = ctx.EWarningf("Unable to serialize metadata to json : %s", err) + err = log.EWarningf("Unable to serialize metadata to json : %s", err) return } // Get upload directory directory, err := fmb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -180,47 +201,57 @@ func (fmb *MetadataBackend) AddOrUpdateFile(ctx *common.PlikContext, upload *com // Create directory if needed if _, err = os.Stat(directory); err != nil { if err = os.MkdirAll(directory, 0777); err != nil { - err = ctx.EWarningf("Unable to create upload directory %s : %s", directory, err) + err = log.EWarningf("Unable to create upload directory %s : %s", directory, err) return } - ctx.Infof("Upload directory %s successfully created", directory) + log.Infof("Upload directory %s successfully created", directory) } // Override metadata file f, err := os.OpenFile(metadataFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(0666)) if err != nil { - err = ctx.EWarningf("Unable to create metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to create metadata file %s : %s", metadataFile, err) return } // Print content _, err = f.Write(b) if err != nil { - err = ctx.EWarningf("Unable to write metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to write metadata file %s : %s", metadataFile, err) return } // Sync on disk err = f.Sync() if err != nil { - err = ctx.EWarningf("Unable to sync metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to sync metadata file %s : %s", metadataFile, err) return } - ctx.Infof("Metadata file successfully updated %s", metadataFile) + log.Infof("Metadata file successfully updated %s", metadataFile) return } // RemoveFile implementation for File Metadata Backend -func (fmb *MetadataBackend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, file *common.File) (err error) { - defer ctx.Finalize(err) +func (fmb *MetadataBackend) RemoveFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to remove file : Missing upload") + return + } + + if file == nil { + err = log.EWarning("Unable to remove file : Missing file") + return + } // avoid race condition lock(upload.ID) defer unlock(upload.ID) // The first thing to do is to reload the file from disk - upload, err = fmb.Get(ctx.Fork("reload metadata"), upload.ID) + upload, err = fmb.Get(ctx, upload.ID) // Remove file metadata from upload metadata delete(upload.Files, file.Name) @@ -228,14 +259,14 @@ func (fmb *MetadataBackend) RemoveFile(ctx *common.PlikContext, upload *common.U // Serialize metadata to json b, err := json.MarshalIndent(upload, "", " ") if err != nil { - err = ctx.EWarningf("Unable to serialize metadata to json : %s", err) + err = log.EWarningf("Unable to serialize metadata to json : %s", err) return } // Get upload directory directory, err := fmb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -245,35 +276,41 @@ func (fmb *MetadataBackend) RemoveFile(ctx *common.PlikContext, upload *common.U // Override metadata file f, err := os.OpenFile(metadataFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(0666)) if err != nil { - err = ctx.EWarningf("Unable to create metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to create metadata file %s : %s", metadataFile, err) return } // Print content _, err = f.Write(b) if err != nil { - err = ctx.EWarningf("Unable to write metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to write metadata file %s : %s", metadataFile, err) return } // Sync on disk err = f.Sync() if err != nil { - err = ctx.EWarningf("Unable to sync metadata file %s : %s", metadataFile, err) + err = log.EWarningf("Unable to sync metadata file %s : %s", metadataFile, err) return } - ctx.Infof("Metadata file successfully updated %s", metadataFile) + log.Infof("Metadata file successfully updated %s", metadataFile) return nil } // Remove implementation for File Metadata Backend -func (fmb *MetadataBackend) Remove(ctx *common.PlikContext, upload *common.Upload) (err error) { +func (fmb *MetadataBackend) Remove(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to remove upload : Missing upload") + return + } // Get upload directory directory, err := fmb.getDirectoryFromUploadID(upload.ID) if err != nil { - ctx.Warningf("Unable to get upload directory : %s", err) + log.Warningf("Unable to get upload directory : %s", err) return } @@ -283,22 +320,24 @@ func (fmb *MetadataBackend) Remove(ctx *common.PlikContext, upload *common.Uploa // Test if file exist _, err = os.Stat(metadataFile) if err != nil { - ctx.Infof("Metadata file is already deleted") + log.Infof("Metadata file is already deleted") return nil } // Remove all metadata at once err = os.Remove(metadataFile) if err != nil { - err = ctx.EWarningf("Unable to remove upload directory %s : %s", metadataFile, err) + err = log.EWarningf("Unable to remove upload directory %s : %s", metadataFile, err) return } + log.Infof("Metadata file successfully removed : %s", metadataFile) return } // GetUploadsToRemove implementation for File Metadata Backend -func (fmb *MetadataBackend) GetUploadsToRemove(ctx *common.PlikContext) (ids []string, err error) { +func (fmb *MetadataBackend) GetUploadsToRemove(ctx *juliet.Context) (ids []string, err error) { + log := common.GetLogger(ctx) // Init ids list ids = make([]string, 0) @@ -321,7 +360,7 @@ func (fmb *MetadataBackend) GetUploadsToRemove(ctx *common.PlikContext) (ids []s // Get upload metadata upload, err := fmb.Get(ctx, uploadDirectory.Name()) if err != nil { - ctx.EWarningf("Unable to get upload metadata %s : %s", uploadDirectory.Name(), err) + log.EWarningf("Unable to get upload metadata %s : %s", uploadDirectory.Name(), err) continue } @@ -369,3 +408,33 @@ func unlock(uploadID string) { delete(locks, uploadID) }() } + +/* !!! NOT IMPLEMENTED IN FILE METADATA BACKEND !!! */ + +// SaveUser implementation for File Metadata Backend +func (fmb *MetadataBackend) SaveUser(ctx *juliet.Context, user *common.User) (err error) { + log := common.GetLogger(ctx) + err = log.EWarningf("Unable to save user : Not implemented") + return +} + +// GetUser implementation for File Metadata Backend +func (fmb *MetadataBackend) GetUser(ctx *juliet.Context, id string, token string) (user *common.User, err error) { + log := common.GetLogger(ctx) + err = log.EWarningf("Unable to get user : Not implemented") + return +} + +// RemoveUser implementation for File Metadata Backend +func (fmb *MetadataBackend) RemoveUser(ctx *juliet.Context, user *common.User) (err error) { + log := common.GetLogger(ctx) + err = log.EWarningf("Unable to remove user : Not implemented") + return +} + +// GetUserUploads implementation for File Metadata Backend +func (fmb *MetadataBackend) GetUserUploads(ctx *juliet.Context, user *common.User, token *common.Token) (ids []string, err error) { + log := common.GetLogger(ctx) + err = log.EWarningf("Unable to get user uploads : Not implemented") + return +} diff --git a/server/metadataBackend/metadataBackend.go b/server/metadataBackend/metadataBackend.go index ee6fe2ed..b2eab02f 100644 --- a/server/metadataBackend/metadataBackend.go +++ b/server/metadataBackend/metadataBackend.go @@ -30,7 +30,9 @@ THE SOFTWARE. package metadataBackend import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend/bolt" "github.com/root-gg/plik/server/metadataBackend/file" "github.com/root-gg/plik/server/metadataBackend/mongo" ) @@ -40,12 +42,18 @@ var metadataBackend MetadataBackend // MetadataBackend interface describes methods that metadata backends // must implements to be compatible with plik. type MetadataBackend interface { - Create(ctx *common.PlikContext, u *common.Upload) (err error) - Get(ctx *common.PlikContext, id string) (u *common.Upload, err error) - AddOrUpdateFile(ctx *common.PlikContext, u *common.Upload, file *common.File) (err error) - RemoveFile(ctx *common.PlikContext, u *common.Upload, file *common.File) (err error) - Remove(ctx *common.PlikContext, u *common.Upload) (err error) - GetUploadsToRemove(ctx *common.PlikContext) (ids []string, err error) + Create(ctx *juliet.Context, upload *common.Upload) (err error) + Get(ctx *juliet.Context, id string) (upload *common.Upload, err error) + AddOrUpdateFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) + RemoveFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) + Remove(ctx *juliet.Context, upload *common.Upload) (err error) + + SaveUser(ctx *juliet.Context, user *common.User) (err error) + GetUser(ctx *juliet.Context, id string, token string) (user *common.User, err error) + RemoveUser(ctx *juliet.Context, user *common.User) (err error) + + GetUserUploads(ctx *juliet.Context, user *common.User, token *common.Token) (ids []string, err error) + GetUploadsToRemove(ctx *juliet.Context) (ids []string, err error) } // GetMetaDataBackend is a singleton pattern. @@ -65,8 +73,10 @@ func Initialize() { metadataBackend = file.NewFileMetadataBackend(common.Config.MetadataBackendConfig) case "mongo": metadataBackend = mongo.NewMongoMetadataBackend(common.Config.MetadataBackendConfig) + case "bolt": + metadataBackend = bolt.NewBoltMetadataBackend(common.Config.MetadataBackendConfig) default: - common.Log().Fatalf("Invalid metadata backend %s", common.Config.DataBackend) + common.Logger().Fatalf("Invalid metadata backend %s", common.Config.DataBackend) } } } diff --git a/server/metadataBackend/mongo/config.go b/server/metadataBackend/mongo/config.go index b6906388..4b1f98f4 100644 --- a/server/metadataBackend/mongo/config.go +++ b/server/metadataBackend/mongo/config.go @@ -35,21 +35,23 @@ import ( // MetadataBackendConfig object type MetadataBackendConfig struct { - URL string - Database string - Collection string - Username string - Password string - Ssl bool + URL string + Database string + Collection string + UserCollection string + Username string + Password string + Ssl bool } // NewMongoMetadataBackendConfig configures the backend // from config passed as argument -func NewMongoMetadataBackendConfig(config map[string]interface{}) (mmb *MetadataBackendConfig) { - mmb = new(MetadataBackendConfig) - mmb.URL = "127.0.0.1:27017" - mmb.Database = "plik" - mmb.Collection = "meta" - utils.Assign(mmb, config) +func NewMongoMetadataBackendConfig(config map[string]interface{}) (mbc *MetadataBackendConfig) { + mbc = new(MetadataBackendConfig) + mbc.URL = "127.0.0.1:27017" + mbc.Database = "plik" + mbc.Collection = "meta" + mbc.UserCollection = "tokens" + utils.Assign(mbc, config) return } diff --git a/server/metadataBackend/mongo/mongo.go b/server/metadataBackend/mongo/mongo.go index f7609509..39d72ce6 100644 --- a/server/metadataBackend/mongo/mongo.go +++ b/server/metadataBackend/mongo/mongo.go @@ -35,6 +35,7 @@ import ( "strconv" "time" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" mgo "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2" "github.com/root-gg/plik/server/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" "github.com/root-gg/plik/server/common" @@ -73,7 +74,7 @@ func NewMongoMetadataBackend(config map[string]interface{}) (mmb *MetadataBacken var err error mmb.session, err = mgo.DialWithInfo(dialInfo) if err != nil { - common.Log().Fatalf("Unable to contact mongodb at %s : %s", mmb.config.URL, err.Error()) + common.Logger().Fatalf("Unable to contact mongodb at %s : %s", mmb.config.URL, err.Error()) } // Ensure everything is persisted and replicated @@ -83,74 +84,224 @@ func NewMongoMetadataBackend(config map[string]interface{}) (mmb *MetadataBacken } // Create implementation from MongoDB Metadata Backend -func (mmb *MetadataBackend) Create(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) +func (mmb *MetadataBackend) Create(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to save upload : Missing upload") + return + } + session := mmb.session.Copy() defer session.Close() collection := session.DB(mmb.config.Database).C(mmb.config.Collection) err = collection.Insert(&upload) if err != nil { - err = ctx.EWarningf("Unable to append metadata to mongodb : %s", err) + err = log.EWarningf("Unable to append metadata to mongodb : %s", err) } return } // Get implementation from MongoDB Metadata Backend -func (mmb *MetadataBackend) Get(ctx *common.PlikContext, id string) (u *common.Upload, err error) { - defer ctx.Finalize(err) +func (mmb *MetadataBackend) Get(ctx *juliet.Context, id string) (u *common.Upload, err error) { + log := common.GetLogger(ctx) + + if id == "" { + err = log.EWarning("Unable to get upload : Missing upload id") + return + } + session := mmb.session.Copy() defer session.Close() collection := session.DB(mmb.config.Database).C(mmb.config.Collection) u = &common.Upload{} err = collection.Find(bson.M{"id": id}).One(u) if err != nil { - err = ctx.EWarningf("Unable to get metadata from mongodb : %s", err) + err = log.EWarningf("Unable to get metadata from mongodb : %s", err) } return } // AddOrUpdateFile implementation from MongoDB Metadata Backend -func (mmb *MetadataBackend) AddOrUpdateFile(ctx *common.PlikContext, upload *common.Upload, file *common.File) (err error) { - defer ctx.Finalize(err) +func (mmb *MetadataBackend) AddOrUpdateFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to add file : Missing upload") + return + } + + if file == nil { + err = log.EWarning("Unable to add file : Missing file") + return + } + session := mmb.session.Copy() defer session.Close() collection := session.DB(mmb.config.Database).C(mmb.config.Collection) err = collection.Update(bson.M{"id": upload.ID}, bson.M{"$set": bson.M{"files." + file.ID: file}}) if err != nil { - err = ctx.EWarningf("Unable to get metadata from mongodb : %s", err) + err = log.EWarningf("Unable to get metadata from mongodb : %s", err) } return } // RemoveFile implementation from MongoDB Metadata Backend -func (mmb *MetadataBackend) RemoveFile(ctx *common.PlikContext, upload *common.Upload, file *common.File) (err error) { - defer ctx.Finalize(err) +func (mmb *MetadataBackend) RemoveFile(ctx *juliet.Context, upload *common.Upload, file *common.File) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to remove file : Missing upload") + return + } + + if file == nil { + err = log.EWarning("Unable to remove file : Missing file") + return + } + session := mmb.session.Copy() defer session.Close() collection := session.DB(mmb.config.Database).C(mmb.config.Collection) err = collection.Update(bson.M{"id": upload.ID}, bson.M{"$unset": bson.M{"files." + file.Name: ""}}) if err != nil { - err = ctx.EWarningf("Unable to get remove file from mongodb : %s", err) + err = log.EWarningf("Unable to remove file from mongodb : %s", err) } return } // Remove implementation from MongoDB Metadata Backend -func (mmb *MetadataBackend) Remove(ctx *common.PlikContext, upload *common.Upload) (err error) { - defer ctx.Finalize(err) +func (mmb *MetadataBackend) Remove(ctx *juliet.Context, upload *common.Upload) (err error) { + log := common.GetLogger(ctx) + + if upload == nil { + err = log.EWarning("Unable to remove upload : Missing upload") + return + } + session := mmb.session.Copy() defer session.Close() collection := session.DB(mmb.config.Database).C(mmb.config.Collection) err = collection.Remove(bson.M{"id": upload.ID}) if err != nil { - err = ctx.EWarningf("Unable to get remove file from mongodb : %s", err) + err = log.EWarningf("Unable to remove upload from mongodb : %s", err) + } + return +} + +// SaveUser implementation from MongoDB Metadata Backend +func (mmb *MetadataBackend) SaveUser(ctx *juliet.Context, user *common.User) (err error) { + log := common.GetLogger(ctx) + + if user == nil { + err = log.EWarning("Unable to save user : Missing user") + return + } + + session := mmb.session.Copy() + defer session.Close() + collection := session.DB(mmb.config.Database).C(mmb.config.UserCollection) + + _, err = collection.Upsert(bson.M{"id": user.ID}, &user) + if err != nil { + err = log.EWarningf("Unable to save user to mongodb : %s", err) + } + return +} + +// GetUser implementation from MongoDB Metadata Backend +func (mmb *MetadataBackend) GetUser(ctx *juliet.Context, id string, token string) (user *common.User, err error) { + log := common.GetLogger(ctx) + + if id == "" && token == "" { + err = log.EWarning("Unable to get user : Missing user id or token") + return + } + + session := mmb.session.Copy() + defer session.Close() + collection := session.DB(mmb.config.Database).C(mmb.config.UserCollection) + + user = &common.User{} + if id != "" { + err = collection.Find(bson.M{"id": id}).One(user) + if err == mgo.ErrNotFound { + return nil, nil + } else if err != nil { + err = log.EWarningf("Unable to get user from mongodb : %s", err) + } + } else if token != "" { + err = collection.Find(bson.M{"tokens.token": token}).One(user) + if err == mgo.ErrNotFound { + return nil, nil + } else if err != nil { + err = log.EWarningf("Unable to get user from mongodb : %s", err) + } + } else { + err = log.EWarning("Unable to get user from mongodb : Missing user id or token") + } + + return +} + +// RemoveUser implementation from MongoDB Metadata Backend +func (mmb *MetadataBackend) RemoveUser(ctx *juliet.Context, user *common.User) (err error) { + log := common.GetLogger(ctx) + + if user == nil { + err = log.EWarning("Unable to remove user : Missing user") + return + } + + session := mmb.session.Copy() + defer session.Close() + collection := session.DB(mmb.config.Database).C(mmb.config.UserCollection) + + err = collection.Remove(bson.M{"id": user.ID}) + if err != nil { + err = log.EWarningf("Unable to remove user from mongodb : %s", err) + } + + return +} + +// GetUserUploads implementation from MongoDB Metadata Backend +func (mmb *MetadataBackend) GetUserUploads(ctx *juliet.Context, user *common.User, token *common.Token) (ids []string, err error) { + log := common.GetLogger(ctx) + + if user == nil { + err = log.EWarning("Unable to get user uploads : Missing user") + return + } + + session := mmb.session.Copy() + defer session.Close() + collection := session.DB(mmb.config.Database).C(mmb.config.Collection) + + b := bson.M{"user": user.ID} + if token != nil { + b["token"] = token.Token + } + + var uploads []*common.Upload + err = collection.Find(b).Select(bson.M{"id": 1}).Sort("-uploadDate").All(&uploads) + if err != nil { + err = log.EWarningf("Unable to get user uploads : %s", err) + return } + + // Get all ids + for _, upload := range uploads { + ids = append(ids, upload.ID) + } + return } // GetUploadsToRemove implementation from MongoDB Metadata Backend -func (mmb *MetadataBackend) GetUploadsToRemove(ctx *common.PlikContext) (ids []string, err error) { - defer ctx.Finalize(err) +func (mmb *MetadataBackend) GetUploadsToRemove(ctx *juliet.Context) (ids []string, err error) { + log := common.GetLogger(ctx) + session := mmb.session.Copy() defer session.Close() collection := session.DB(mmb.config.Database).C(mmb.config.Collection) @@ -159,13 +310,13 @@ func (mmb *MetadataBackend) GetUploadsToRemove(ctx *common.PlikContext) (ids []s var uploads []*common.Upload b := bson.M{"$where": "this.ttl > 0 && " + strconv.Itoa(int(time.Now().Unix())) + " > this.uploadDate + this.ttl"} - err = collection.Find(b).All(&uploads) + err = collection.Find(b).Select(bson.M{"id": 1}).All(&uploads) if err != nil { - err = ctx.EWarningf("Unable to get uploads to remove : %s", err) + err = log.EWarningf("Unable to get uploads to remove : %s", err) return } - // Append all ids to the toRemove list + // Get all ids for _, upload := range uploads { ids = append(ids, upload.ID) } diff --git a/server/middleware/authenticate.go b/server/middleware/authenticate.go new file mode 100644 index 00000000..9d54d8e7 --- /dev/null +++ b/server/middleware/authenticate.go @@ -0,0 +1,174 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> Copyright holders list can be found in AUTHORS file + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "net/http" + + "fmt" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" +) + +// Authenticate verify that a request has either a whitelisted url or a valid auth token +func Authenticate(allowToken bool) juliet.ContextMiddleware { + return func(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + log.Debug("User middleware") + + if common.Config.Authentication { + if allowToken { + // Get user from token header + tokenHeader := req.Header.Get("X-PlikToken") + if tokenHeader != "" { + user, err := metadataBackend.GetMetaDataBackend().GetUser(ctx, "", tokenHeader) + if err != nil { + log.Warningf("Unable to get user from token %s : %s", tokenHeader, err) + common.Fail(ctx, req, resp, "Unable to get user", 500) + return + } + if user == nil { + log.Warningf("Unable to get user from token %s", tokenHeader) + common.Fail(ctx, req, resp, "Invalid token", 403) + return + } + + // Get token from user + var token *common.Token + for _, t := range user.Tokens { + if t.Token == tokenHeader { + token = t + break + } + } + if token == nil { + log.Warningf("Unable to get token %s from user %s", tokenHeader, user.ID) + common.Fail(ctx, req, resp, "Invalid token", 403) + return + } + + // Save user and token in the request context + ctx.Set("user", user) + ctx.Set("token", token) + } + } + + // Get user from session cookie + sessionCookie, err := req.Cookie("plik-session") + if err == nil && sessionCookie != nil { + + // Parse session cookie + session, err := jwt.Parse(sessionCookie.Value, func(t *jwt.Token) (interface{}, error) { + // Verify signing algorithm + if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("Unexpected siging method : %v", t.Header["alg"]) + } + + // Get authentication provider + provider, ok := t.Claims["provider"] + if !ok { + return nil, fmt.Errorf("Missing authentication provider") + } + + switch provider { + case "google": + if !common.Config.GoogleAuthentication { + return nil, fmt.Errorf("Missing Google API credentials") + } + return []byte(common.Config.GoogleAPISecret), nil + case "ovh": + if !common.Config.OvhAuthentication { + return nil, fmt.Errorf("Missing OVH API credentials") + } + return []byte(common.Config.OvhAPISecret), nil + default: + return nil, fmt.Errorf("Invalid authentication provider : %s", provider) + } + }) + if err != nil { + log.Warningf("Invalid session : %s", err) + common.Logout(resp) + common.Fail(ctx, req, resp, "Invalid session", 403) + return + } + + // Verify xsrf token + if req.Method != "GET" && req.Method != "HEAD" { + if xsrfCookie, ok := session.Claims["xsrf"]; ok { + xsrfHeader := req.Header.Get("X-XRSFToken") + if xsrfHeader == "" { + log.Warning("Missing xsrf header") + common.Logout(resp) + common.Fail(ctx, req, resp, "Missing xsrf header", 403) + return + } + if xsrfCookie != xsrfHeader { + log.Warning("Invalid xsrf header") + common.Logout(resp) + common.Fail(ctx, req, resp, "Invalid xsrf header", 403) + return + } + } else { + log.Warning("Invalid session : missing xsrf token") + common.Logout(resp) + common.Fail(ctx, req, resp, "Invalid session : missing xsrf token", 500) + return + } + } + + // Get user from session + if userID, ok := session.Claims["uid"]; ok { + user, err := metadataBackend.GetMetaDataBackend().GetUser(ctx, userID.(string), "") + if err != nil { + log.Warningf("Unable to get user from session : %s", err) + common.Logout(resp) + common.Fail(ctx, req, resp, "Unable to get user", 500) + return + } + if user == nil { + log.Warningf("Invalid session : user does not exists") + common.Logout(resp) + common.Fail(ctx, req, resp, "Invalid session : User does not exists", 403) + return + } + + // Save user in the request context + ctx.Set("user", user) + } + } + } + + next.ServeHTTP(resp, req) + }) + } +} diff --git a/server/middleware/file.go b/server/middleware/file.go new file mode 100644 index 00000000..1e87d2a2 --- /dev/null +++ b/server/middleware/file.go @@ -0,0 +1,93 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "fmt" + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/mux" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" +) + +// File retrieve the requested file metadata from the metadataBackend and save it in the request context. +func File(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + log.Debug("File handler") + + // Get upload from context + upload := common.GetUpload(ctx) + if upload == nil { + // This should never append + log.Critical("Missing upload in file handler") + common.Fail(ctx, req, resp, "Internal error", 500) + return + } + + // Get the file id from the url params + vars := mux.Vars(req) + fileID := vars["fileID"] + if fileID == "" { + log.Warning("Missing file id") + common.Fail(ctx, req, resp, "Missing file id", 400) + return + } + + // Get the file name from the url params + fileName := vars["filename"] + if fileName == "" { + log.Warning("Missing file name") + common.Fail(ctx, req, resp, "Missing file name", 400) + return + } + + // Get file object in upload metadata + file, ok := upload.Files[fileID] + if !ok { + log.Warningf("File %s not found", fileID) + common.Fail(ctx, req, resp, fmt.Sprintf("File %s not found", fileID), 404) + return + } + + // Compare url filename with upload filename + if file.Name != fileName { + log.Warningf("Invalid filename %s mismatch %s", fileName, file.Name) + common.Fail(ctx, req, resp, fmt.Sprintf("File %s not found", fileName), 404) + return + } + + // Save file in the request context + ctx.Set("file", file) + + next.ServeHTTP(resp, req) + }) +} diff --git a/server/middleware/log.go b/server/middleware/log.go new file mode 100644 index 00000000..503883b5 --- /dev/null +++ b/server/middleware/log.go @@ -0,0 +1,69 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "net/http" + "net/http/httputil" + "strings" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/logger" + "github.com/root-gg/plik/server/common" +) + +// Log the http request +func Log(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + log.Debug("Log handler") + + if log.LogIf(logger.DEBUG) { + + // Don't dump request body for file upload + dumpBody := true + if strings.HasPrefix(req.URL.Path, "/file") && req.Method == "POST" { + dumpBody = false + } + + // Dump the full http request + dump, err := httputil.DumpRequest(req, dumpBody) + if err == nil { + log.Debug(string(dump)) + } else { + log.Warningf("Unable to dump HTTP request : %s", err) + } + } else { + log.Infof("%v %v", req.Method, req.RequestURI) + } + + next.ServeHTTP(resp, req) + }) +} diff --git a/server/middleware/logger.go b/server/middleware/logger.go new file mode 100644 index 00000000..f2686f95 --- /dev/null +++ b/server/middleware/logger.go @@ -0,0 +1,47 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" +) + +// Logger create a new Logger instance for this request and save it to the request context +func Logger(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.Logger().Copy() + log.Debug("Logger handler") + ctx.Set("logger", log) + next.ServeHTTP(resp, req) + }) +} diff --git a/server/middleware/redirect.go b/server/middleware/redirect.go new file mode 100644 index 00000000..4ee7b360 --- /dev/null +++ b/server/middleware/redirect.go @@ -0,0 +1,49 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" +) + +// RedirectOnFailure enable webapp http redirection instead of string error +func RedirectOnFailure(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + log.Debug("Redirect handler") + + ctx.Set("redirect", true) + + next.ServeHTTP(resp, req) + }) +} diff --git a/server/middleware/sourceIp.go b/server/middleware/sourceIp.go new file mode 100644 index 00000000..33cb02de --- /dev/null +++ b/server/middleware/sourceIp.go @@ -0,0 +1,78 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "fmt" + "net" + "net/http" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/common" +) + +// SourceIP extract the source IP address from the request and save it to the request context +func SourceIP(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + log.Debug("SourceIP handler") + + var sourceIPstr string + if common.Config.SourceIPHeader != "" { + // Get source ip from header if behind reverse proxy. + sourceIPstr = req.Header.Get(common.Config.SourceIPHeader) + } else { + var err error + sourceIPstr, _, err = net.SplitHostPort(req.RemoteAddr) + if err != nil { + common.Logger().Warningf("Unable to parse source IP address %s", req.RemoteAddr) + common.Fail(ctx, req, resp, "Unable to parse source IP address", 500) + return + } + } + + // Parse source IP address + sourceIP := net.ParseIP(sourceIPstr) + if sourceIP == nil { + common.Logger().Warningf("Unable to parse source IP address %s", sourceIPstr) + common.Fail(ctx, req, resp, "Unable to parse source IP address", 500) + return + } + + // Save source IP address in the context + ctx.Set("ip", sourceIP) + + // Update request logger prefix + prefix := fmt.Sprintf("%s[%s]", log.Prefix, sourceIP.String()) + log.SetPrefix(prefix) + + next.ServeHTTP(resp, req) + }) +} diff --git a/server/middleware/upload.go b/server/middleware/upload.go new file mode 100644 index 00000000..9baea501 --- /dev/null +++ b/server/middleware/upload.go @@ -0,0 +1,139 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package middleware + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/mux" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" + "github.com/root-gg/plik/server/common" + "github.com/root-gg/plik/server/metadataBackend" +) + +// Upload retrieve the requested upload metadata from the metadataBackend and save it to the request context. +func Upload(ctx *juliet.Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log := common.GetLogger(ctx) + log.Debug("Upload handler") + + // Get the upload id from the url params + vars := mux.Vars(req) + uploadID := vars["uploadID"] + if uploadID == "" { + log.Warning("Missing upload id") + common.Fail(ctx, req, resp, "Missing upload id", 400) + return + } + + // Get upload metadata + upload, err := metadataBackend.GetMetaDataBackend().Get(ctx, uploadID) + if err != nil { + log.Warningf("Upload not found : %s", err) + common.Fail(ctx, req, resp, fmt.Sprintf("Upload %s not found", uploadID), 404) + return + } + + // Update request logger prefix + prefix := fmt.Sprintf("%s[%s]", log.Prefix, uploadID) + log.SetPrefix(prefix) + + // Test if upload is not expired + if upload.IsExpired() { + log.Warningf("Upload is expired since %s", time.Since(time.Unix(upload.Creation, int64(0)).Add(time.Duration(upload.TTL)*time.Second)).String()) + common.Fail(ctx, req, resp, fmt.Sprintf("Upload %s has expired", uploadID), 404) + return + } + + // Save upload in the request context + ctx.Set("upload", upload) + + forbidden := func() { + resp.Header().Set("WWW-Authenticate", "Basic realm=\"plik\"") + common.Fail(ctx, req, resp, "Please provide valid credentials to access this upload", 401) + } + + // Handle basic auth if upload is password protected + if upload.ProtectedByPassword { + if req.Header.Get("Authorization") == "" { + log.Warning("Missing Authorization header") + forbidden() + return + } + + // Basic auth Authorization header must be set to + // "Basic base64("login:password")". Only the md5sum + // of the base64 string is saved in the upload metadata + auth := strings.Split(req.Header.Get("Authorization"), " ") + if len(auth) != 2 { + log.Warningf("Inavlid Authorization header %s", req.Header.Get("Authorization")) + forbidden() + return + } + if auth[0] != "Basic" { + log.Warningf("Inavlid http authorization scheme : %s", auth[0]) + forbidden() + return + } + var md5sum string + md5sum, err = utils.Md5sum(auth[1]) + if err != nil { + log.Warningf("Unable to hash credentials : %s", err) + forbidden() + return + } + if md5sum != upload.Password { + log.Warning("Invalid credentials") + forbidden() + return + } + } + + // Check upload token + uploadToken := req.Header.Get("X-UploadToken") + if uploadToken != "" && uploadToken == upload.UploadToken { + upload.IsAdmin = true + } else { + // Check if upload belongs to user + if common.Config.Authentication && upload.User != "" { + user := common.GetUser(ctx) + if user != nil && user.ID == upload.User { + upload.IsAdmin = true + } + } + } + + next.ServeHTTP(resp, req) + }) +} diff --git a/server/plik.go b/server/plik.go index cb0326f2..a9b49e91 100644 --- a/server/plik.go +++ b/server/plik.go @@ -25,37 +25,26 @@ THE SOFTWARE. */ package main import ( - "crypto/md5" "crypto/rand" "crypto/tls" - "encoding/base64" - "encoding/json" - "errors" "flag" "fmt" - "image/png" - "io" - "io/ioutil" "math/big" - "net" "net/http" - "net/url" "os" "runtime" "strconv" - "strings" "time" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boombuler/barcode" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/boombuler/barcode/qr" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/facebookgo/httpdown" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/gorilla/mux" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/logger" - "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" - "github.com/root-gg/plik/server/common" "github.com/root-gg/plik/server/dataBackend" + "github.com/root-gg/plik/server/handlers" "github.com/root-gg/plik/server/metadataBackend" + "github.com/root-gg/plik/server/middleware" "github.com/root-gg/plik/server/shortenBackend" ) @@ -64,7 +53,7 @@ var log *logger.Logger func main() { runtime.GOMAXPROCS(runtime.NumCPU()) - log = common.Log() + log = common.Logger() var configFile = flag.String("config", "plikd.cfg", "Configuration file (default: plikd.cfg") var version = flag.Bool("version", false, "Show version of plikd") @@ -94,22 +83,47 @@ func main() { KillTimeout: 1 * time.Second, } + // Initialize middleware chain + stdChain := juliet.NewChain(middleware.Logger, middleware.SourceIP, middleware.Log) + + // Get user from session cookie + authChain := stdChain.Append(middleware.Authenticate(false)) + + // Get user from session cookie or X-PlikToken header + tokenChain := stdChain.Append(middleware.Authenticate(true)) + + // Redirect on error for webapp + stdChainWithRedirect := juliet.NewChain(middleware.RedirectOnFailure).AppendChain(stdChain) + authChainWithRedirect := juliet.NewChain(middleware.RedirectOnFailure).AppendChain(authChain) + // HTTP Api routes configuration r := mux.NewRouter() - r.HandleFunc("/config", getConfigurationHandler).Methods("GET") - r.HandleFunc("/version", getVersionHandler).Methods("GET") - r.HandleFunc("/upload", createUploadHandler).Methods("POST") - r.HandleFunc("/upload/{uploadID}", getUploadHandler).Methods("GET") - r.HandleFunc("/file/{uploadID}", addFileHandler).Methods("POST") - r.HandleFunc("/file/{uploadID}/{fileID}/{filename}", addFileHandler).Methods("POST") - r.HandleFunc("/file/{uploadID}/{fileID}/{filename}", removeFileHandler).Methods("DELETE") - r.HandleFunc("/file/{uploadID}/{fileID}/{filename}", getFileHandler).Methods("HEAD", "GET") - r.HandleFunc("/file/{uploadID}/{fileID}/{filename}/yubikey/{yubikey}", getFileHandler).Methods("GET") - r.HandleFunc("/stream/{uploadID}/{fileID}/{filename}", addFileHandler).Methods("POST") - r.HandleFunc("/stream/{uploadID}/{fileID}/{filename}", removeFileHandler).Methods("DELETE") - r.HandleFunc("/stream/{uploadID}/{fileID}/{filename}", getFileHandler).Methods("HEAD", "GET") - r.HandleFunc("/stream/{uploadID}/{fileID}/{filename}/yubikey/{yubikey}", getFileHandler).Methods("GET") - r.HandleFunc("/qrcode", getQrCodeHandler).Methods("GET") + r.Handle("/config", stdChain.Then(handlers.GetConfiguration)).Methods("GET") + r.Handle("/version", stdChain.Then(handlers.GetVersion)).Methods("GET") + r.Handle("/upload", tokenChain.Then(handlers.CreateUpload)).Methods("POST") + r.Handle("/upload/{uploadID}", authChain.Append(middleware.Upload).Then(handlers.GetUpload)).Methods("GET") + r.Handle("/upload/{uploadID}", authChain.Append(middleware.Upload).Then(handlers.RemoveUpload)).Methods("DELETE") + r.Handle("/file/{uploadID}", tokenChain.Append(middleware.Upload).Then(handlers.AddFile)).Methods("POST") + r.Handle("/file/{uploadID}/{fileID}/{filename}", tokenChain.Append(middleware.Upload, middleware.File).Then(handlers.AddFile)).Methods("POST") + r.Handle("/file/{uploadID}/{fileID}/{filename}", authChain.Append(middleware.Upload, middleware.File).Then(handlers.RemoveFile)).Methods("DELETE") + r.Handle("/file/{uploadID}/{fileID}/{filename}", authChainWithRedirect.Append(middleware.Upload, middleware.File).Then(handlers.GetFile)).Methods("HEAD", "GET") + r.Handle("/file/{uploadID}/{fileID}/{filename}/yubikey/{yubikey}", authChainWithRedirect.Then(handlers.GetFile)).Methods("GET") + r.Handle("/stream/{uploadID}/{fileID}/{filename}", tokenChain.Append(middleware.Upload, middleware.File).Then(handlers.AddFile)).Methods("POST") + r.Handle("/stream/{uploadID}/{fileID}/{filename}", authChain.Append(middleware.Upload, middleware.File).Then(handlers.RemoveFile)).Methods("DELETE") + r.Handle("/stream/{uploadID}/{fileID}/{filename}", authChainWithRedirect.Append(middleware.Upload, middleware.File).Then(handlers.GetFile)).Methods("HEAD", "GET") + r.Handle("/stream/{uploadID}/{fileID}/{filename}/yubikey/{yubikey}", authChainWithRedirect.Then(handlers.GetFile)).Methods("GET") + r.Handle("/auth/google/login", authChain.Then(handlers.GoogleLogin)).Methods("GET") + r.Handle("/auth/google/callback", stdChainWithRedirect.Then(handlers.GoogleCallback)).Methods("GET") + r.Handle("/auth/ovh/login", authChain.Then(handlers.OvhLogin)).Methods("GET") + r.Handle("/auth/ovh/callback", stdChainWithRedirect.Then(handlers.OvhCallback)).Methods("GET") + r.Handle("/auth/logout", authChain.Then(handlers.Logout)).Methods("GET") + r.Handle("/me", authChain.Then(handlers.UserInfo)).Methods("GET") + r.Handle("/me", authChain.Then(handlers.DeleteAccount)).Methods("DELETE") + r.Handle("/me/token", authChain.Then(handlers.CreateToken)).Methods("POST") + r.Handle("/me/token/{token}", authChain.Then(handlers.RevokeToken)).Methods("DELETE") + r.Handle("/me/uploads", authChain.Then(handlers.GetUserUploads)).Methods("GET") + r.Handle("/me/uploads", authChain.Then(handlers.RemoveUserUploads)).Methods("DELETE") + r.Handle("/qrcode", stdChain.Then(handlers.GetQrCode)).Methods("GET") r.PathPrefix("/clients/").Handler(http.StripPrefix("/clients/", http.FileServer(http.Dir("../clients")))) r.PathPrefix("/").Handler(http.FileServer(http.Dir("./public/"))) http.Handle("/", r) @@ -143,886 +157,13 @@ func main() { } -/* - * HTTP HANDLERS - */ - -func getQrCodeHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("get qrcode handler", req) - defer ctx.Finalize(err) - - // Check that source IP address is valid and whitelisted - code, err := checkSourceIP(ctx, true) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), code) - return - } - - // Check params - urlParam := req.FormValue("url") - sizeParam := req.FormValue("size") - - // Parse int on size - sizeInt, err := strconv.Atoi(sizeParam) - if err != nil { - sizeInt = 250 - } - if sizeInt > 1000 { - http.Error(resp, common.NewResult("QRCode size must be lower than 1000", nil).ToJSONString(), 403) - return - } - - // Generate QRCode png from url - qrcode, err := qr.Encode(urlParam, qr.H, qr.Auto) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), 500) - return - } - - // Scale QRCode png size - qrcode, err = barcode.Scale(qrcode, sizeInt, sizeInt) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), 500) - return - } - - resp.Header().Add("Content-Type", "image/png") - png.Encode(resp, qrcode) -} - -func createUploadHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("create upload handler", req) - defer ctx.Finalize(err) - - // Check that source IP address is valid and whitelisted - code, err := checkSourceIP(ctx, true) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), code) - return - } - - upload := common.NewUpload() - ctx.SetUpload(upload.ID) - - // Read request body - defer req.Body.Close() - req.Body = http.MaxBytesReader(resp, req.Body, 1048576) - body, err := ioutil.ReadAll(req.Body) - if err != nil { - ctx.Warningf("Unable to read request body : %s", err) - http.Error(resp, common.NewResult("Unable to read request body", nil).ToJSONString(), 500) - return - } - - // Deserialize json body - if len(body) > 0 { - err = json.Unmarshal(body, upload) - if err != nil { - ctx.Warningf("Unable to deserialize request body : %s", err) - http.Error(resp, common.NewResult("Unable to deserialize json request body", nil).ToJSONString(), 500) - return - } - } - - // Set upload id, creation date, upload token, ... - upload.Create() - ctx.SetUpload(upload.ID) - upload.RemoteIP = req.RemoteAddr - uploadToken := upload.UploadToken - - if upload.Stream { - if !common.Config.StreamMode { - ctx.Warning("Stream mode is not enabled") - http.Error(resp, common.NewResult("Stream mode is not enabled", nil).ToJSONString(), 400) - return - } - upload.OneShot = true - } - - // TTL = Time in second before the upload expiration - // 0 -> No ttl specified : default value from configuration - // -1 -> No expiration : checking with configuration if that's ok - switch upload.TTL { - case 0: - upload.TTL = common.Config.DefaultTTL - case -1: - if common.Config.MaxTTL != -1 { - ctx.Warningf("Cannot set infinite ttl (maximum allowed is : %d)", common.Config.MaxTTL) - http.Error(resp, common.NewResult(fmt.Sprintf("Cannot set infinite ttl (maximum allowed is : %d)", common.Config.MaxTTL), nil).ToJSONString(), 400) - return - } - default: - if upload.TTL <= 0 { - ctx.Warningf("Invalid value for ttl : %d", upload.TTL) - http.Error(resp, common.NewResult(fmt.Sprintf("Invalid value for ttl : %d", upload.TTL), nil).ToJSONString(), 400) - return - } - if common.Config.MaxTTL > 0 && upload.TTL > common.Config.MaxTTL { - ctx.Warningf("Cannot set ttl to %d (maximum allowed is : %d)", upload.TTL, common.Config.MaxTTL) - http.Error(resp, common.NewResult(fmt.Sprintf("Cannot set ttl to %d (maximum allowed is : %d)", upload.TTL, common.Config.MaxTTL), nil).ToJSONString(), 400) - return - } - } - - // Protect upload with HTTP basic auth - // Add Authorization header to the response for convenience - // So clients can just copy this header into the next request - if upload.Password != "" { - upload.ProtectedByPassword = true - if upload.Login == "" { - upload.Login = "plik" - } - - // The Authorization header will contain the base64 version of "login:password" - // Save only the md5sum of this string to authenticate further requests - b64str := base64.StdEncoding.EncodeToString([]byte(upload.Login + ":" + upload.Password)) - upload.Password, err = utils.Md5sum(b64str) - if err != nil { - ctx.Warningf("Unable to generate password hash : %s", err) - http.Error(resp, common.NewResult("Unable to generate password hash", nil).ToJSONString(), 500) - return - } - resp.Header().Add("Authorization", "Basic "+b64str) - } - - // Check the token validity with api.yubico.com - // Only the Yubikey id part of the token is stored - // The yubikey id is the 12 first characters of the token - // The 32 lasts characters are the actual OTP - if upload.Yubikey != "" { - upload.ProtectedByYubikey = true - - if !common.Config.YubikeyEnabled { - ctx.Warningf("Got a Yubikey upload but Yubikey backend is disabled") - http.Error(resp, common.NewResult("Yubikey are disabled on this server", nil).ToJSONString(), 500) - return - } - - _, ok, err := common.Config.YubiAuth.Verify(upload.Yubikey) - if err != nil { - ctx.Warningf("Unable to validate yubikey token : %s", err) - http.Error(resp, common.NewResult("Unable to validate yubikey token", nil).ToJSONString(), 500) - return - } - - if !ok { - ctx.Warningf("Invalid yubikey token") - http.Error(resp, common.NewResult("Invalid yubikey token", nil).ToJSONString(), 401) - return - } - - upload.Yubikey = upload.Yubikey[:12] - } - - // A short url is created for each upload if a shorten backend is specified in the configuration. - // Referer header is used to get the url of incoming request, clients have to set it in order - // to get this feature working - if shortenBackend.GetShortenBackend() != nil { - if req.Header.Get("Referer") != "" { - u, err := url.Parse(req.Header.Get("Referer")) - if err != nil { - ctx.Warningf("Unable to parse referer url : %s", err) - } - longURL := u.Scheme + "://" + u.Host + "#/?id=" + upload.ID - shortURL, err := shortenBackend.GetShortenBackend().Shorten(ctx.Fork("shorten url"), longURL) - if err == nil { - upload.ShortURL = shortURL - } else { - ctx.Warningf("Unable to shorten url %s : %s", longURL, err) - } - } - } - - // Create files - for i, file := range upload.Files { - - // Check file name length - if len(file.Name) > 1024 { - http.Error(resp, common.NewResult("File name is too long. Maximum length is 1024 characters", nil).ToJSONString(), 401) - return - } - - file.GenerateID() - file.Status = "missing" - delete(upload.Files, i) - upload.Files[file.ID] = file - } - - // Save the metadata - err = metadataBackend.GetMetaDataBackend().Create(ctx.Fork("create metadata"), upload) - if err != nil { - ctx.Warningf("Create new upload error : %s", err) - http.Error(resp, common.NewResult("Unable to create new upload", nil).ToJSONString(), 500) - return - } - - // Remove all private informations (ip, data backend details, ...) before - // sending metadata back to the client - upload.Sanitize() - - // Show upload token since its an upload creation - upload.UploadToken = uploadToken - - // Print upload metadata in the json response. - var json []byte - if json, err = utils.ToJson(upload); err != nil { - ctx.Warningf("Unable to serialize response body : %s", err) - http.Error(resp, common.NewResult("Unable to serialize response body", nil).ToJSONString(), 500) - } - - resp.Write(json) -} - -func getUploadHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("get upload handler", req) - defer ctx.Finalize(err) - - // Check that source IP address is valid - code, err := checkSourceIP(ctx, false) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), code) - return - } - - // Get the upload id and file id from the url params - vars := mux.Vars(req) - uploadID := vars["uploadID"] - ctx.SetUpload(uploadID) - - // Retrieve upload metadata - upload, err := metadataBackend.GetMetaDataBackend().Get(ctx.Fork("get metadata"), uploadID) - if err != nil { - ctx.Warningf("Upload %s not found : %s", uploadID, err) - http.Error(resp, common.NewResult(fmt.Sprintf("Upload %s not found", uploadID), nil).ToJSONString(), 404) - return - } - - ctx.Infof("Got upload from metadata backend") - - // Handle basic auth if upload is password protected - err = httpBasicAuth(req, resp, upload) - if err != nil { - ctx.Warningf("Unauthorized %s : %s", upload.ID, err) - return - } - - // Remove all private informations (ip, data backend details, ...) before - // sending metadata back to the client - upload.Sanitize() - - // Print upload metadata in the json response. - var json []byte - if json, err = utils.ToJson(upload); err != nil { - ctx.Warningf("Unable to serialize response body : %s", err) - http.Error(resp, common.NewResult("Unable to serialize response body", nil).ToJSONString(), 500) - } - resp.Write(json) -} - -func getFileHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("get file handler", req) - defer ctx.Finalize(err) - - // Check that source IP address is valid - code, err := checkSourceIP(ctx, false) - if err != nil { - redirect(req, resp, err, code) - return - } - - // Get the upload id and file id from the url params - vars := mux.Vars(req) - uploadID := vars["uploadID"] - fileID := vars["fileID"] - fileName := vars["filename"] - if uploadID == "" { - ctx.Warning("Missing upload id") - redirect(req, resp, errors.New("Missing upload id"), 404) - return - } - if fileID == "" { - ctx.Warning("Missing file id") - redirect(req, resp, errors.New("Missing file id"), 404) - return - } - ctx.SetUpload(uploadID) - - // Get the upload informations from the metadata backend - upload, err := metadataBackend.GetMetaDataBackend().Get(ctx.Fork("get metadata"), uploadID) - if err != nil { - ctx.Warningf("Upload %s not found : %s", uploadID, err) - redirect(req, resp, fmt.Errorf("Upload %s not found", uploadID), 404) - return - } - - // Handle basic auth if upload is password protected - err = httpBasicAuth(req, resp, upload) - if err != nil { - ctx.Warningf("Unauthorized : %s", err) - return - } - - // Test if upload is not expired - if upload.TTL > 0 { - if time.Now().Unix() >= (upload.Creation + int64(upload.TTL)) { - ctx.Warningf("Upload is expired since %s", time.Since(time.Unix(upload.Creation, int64(0)).Add(time.Duration(upload.TTL)*time.Second)).String()) - redirect(req, resp, fmt.Errorf("Upload %s has expired", upload.ID), 404) - return - } - } - - // Retrieve file using data backend - if _, ok := upload.Files[fileID]; !ok { - ctx.Warningf("File %s not found", fileID) - redirect(req, resp, fmt.Errorf("File %s not found", fileID), 404) - return - } - - file := upload.Files[fileID] - ctx.SetFile(file.Name) - - // Compare url filename with upload filename - if file.Name != fileName { - ctx.Warningf("Invalid filename %s mismatch %s", fileName, file.Name) - redirect(req, resp, fmt.Errorf("File %s not found", fileName), 404) - return - } - - // If upload has OneShot option, test if file has not been already downloaded once - if upload.OneShot && file.Status == "downloaded" { - ctx.Warningf("File %s has already been downloaded in upload %s", file.Name, upload.ID) - redirect(req, resp, fmt.Errorf("File %s has already been downloaded", file.Name), 404) - return - } - - // If the file is marked as deleted by a previous call, we abort request - if upload.Removable && file.Status == "removed" { - ctx.Warningf("File %s has been removed", file.Name) - redirect(req, resp, fmt.Errorf("File %s has been removed", file.Name), 404) - return - } - - // Check yubikey - // If upload is yubikey protected, user must send an OTP when he wants to get a file. - if upload.Yubikey != "" { - token := vars["yubikey"] - if token == "" { - ctx.Warningf("Missing yubikey token") - redirect(req, resp, errors.New("Invalid yubikey token"), 401) - return - } - if len(token) != 44 { - ctx.Warningf("Invalid yubikey token : %s", token) - redirect(req, resp, errors.New("Invalid yubikey token"), 401) - return - } - if token[:12] != upload.Yubikey { - ctx.Warningf("Invalid yubikey device : %s", token) - redirect(req, resp, errors.New("Invalid yubikey token"), 401) - return - } - - // Error if yubikey is disabled on server, and enabled on upload - if !common.Config.YubikeyEnabled { - ctx.Warningf("Got a Yubikey upload but Yubikey backend is disabled") - redirect(req, resp, errors.New("Yubikey are disabled on this server"), 500) - return - } - - _, isValid, err := common.Config.YubiAuth.Verify(token) - if err != nil { - ctx.Warningf("Failed to validate yubikey token : %s", err) - redirect(req, resp, errors.New("Invalid yubikey token"), 401) - return - } - if !isValid { - ctx.Warningf("Invalid yubikey token : %s", token) - redirect(req, resp, errors.New("Invalid yubikey token"), 401) - return - } - } - - // Set content type and print file - resp.Header().Set("Content-Type", file.Type) - if file.CurrentSize > 0 { - resp.Header().Set("Content-Length", strconv.Itoa(int(file.CurrentSize))) - } - - // If "dl" GET params is set - // -> Set Content-Disposition header - // -> The client should download file instead of displaying it - dl := req.URL.Query().Get("dl") - if dl != "" { - resp.Header().Set("Content-Disposition", fmt.Sprintf(`attachement; filename="%s"`, file.Name)) - } else { - resp.Header().Set("Content-Disposition", fmt.Sprintf(`filename="%s"`, file.Name)) - } - - // HEAD Request => Do not print file, user just wants http headers - // GET Request => Print file content - ctx.Infof("Got a %s request", req.Method) - - if req.Method == "GET" { - // Get file in data backend - var backend dataBackend.DataBackend - if upload.Stream { - backend = dataBackend.GetStreamBackend() - } else { - backend = dataBackend.GetDataBackend() - } - fileReader, err := backend.GetFile(ctx.Fork("get file"), upload, file.ID) - if err != nil { - ctx.Warningf("Failed to get file %s in upload %s : %s", file.Name, upload.ID, err) - redirect(req, resp, fmt.Errorf("Failed to read file %s", file.Name), 404) - return - } - defer fileReader.Close() - - // Update metadata if oneShot option is set - if upload.OneShot { - file.Status = "downloaded" - err = metadataBackend.GetMetaDataBackend().AddOrUpdateFile(ctx.Fork("update metadata"), upload, file) - if err != nil { - ctx.Warningf("Error while deleting file %s from upload %s metadata : %s", file.Name, upload.ID, err) - } - } - - // File is piped directly to http response body without buffering - _, err = io.Copy(resp, fileReader) - if err != nil { - ctx.Warningf("Error while copying file to response : %s", err) - } - - // Remove file from data backend if oneShot option is set - if upload.OneShot { - err = backend.RemoveFile(ctx.Fork("remove file"), upload, file.ID) - if err != nil { - ctx.Warningf("Error while deleting file %s from upload %s : %s", file.Name, upload.ID, err) - return - } - } - - // Remove upload if no files are available - err = RemoveUploadIfNoFileAvailable(ctx, upload) - if err != nil { - ctx.Warningf("Error while checking if upload can be removed : %s", err) - } - } -} - -func addFileHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("add file handler", req) - defer ctx.Finalize(err) - - // Check that source IP address is valid - code, err := checkSourceIP(ctx, false) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), code) - return - } - - // Get the upload id from the url params - vars := mux.Vars(req) - uploadID := vars["uploadID"] - fileID := vars["fileID"] - ctx.SetUpload(uploadID) - - // Get upload metadata - upload, err := metadataBackend.GetMetaDataBackend().Get(ctx.Fork("get metadata"), uploadID) - if err != nil { - ctx.Warningf("Upload metadata not found") - http.Error(resp, common.NewResult(fmt.Sprintf("Upload %s not found", uploadID), nil).ToJSONString(), 404) - return - } - - // Handle basic auth if upload is password protected - err = httpBasicAuth(req, resp, upload) - if err != nil { - ctx.Warningf("Unauthorized : %s", err) - return - } - - // Check upload token - if req.Header.Get("X-UploadToken") != upload.UploadToken { - ctx.Warningf("Invalid upload token %s", req.Header.Get("X-UploadToken")) - http.Error(resp, common.NewResult("Invalid upload token in X-UploadToken header", nil).ToJSONString(), 404) - return - } - - // Create a new file object - var newFile *common.File - if fileID != "" { - if _, ok := upload.Files[fileID]; ok { - newFile = upload.Files[fileID] - } else { - ctx.Warningf("Invalid file id %s", fileID) - http.Error(resp, common.NewResult("Invalid file id", nil).ToJSONString(), 404) - return - } - } else { - newFile = common.NewFile() - newFile.Type = "application/octet-stream" - } - ctx.SetFile(newFile.ID) - - // Get file handle from multipart request - var file io.Reader - multiPartReader, err := req.MultipartReader() - if err != nil { - ctx.Warningf("Failed to get file from multipart request : %s", err) - http.Error(resp, common.NewResult(fmt.Sprintf("Failed to get file from multipart request"), nil).ToJSONString(), 500) - return - } - - // Read multipart body until the "file" part - for { - part, errPart := multiPartReader.NextPart() - if errPart == io.EOF { - break - } - if part.FormName() == "file" { - file = part - - // Check file name length - if len(part.FileName()) > 1024 { - http.Error(resp, common.NewResult("File name is too long. Maximum length is 1024 characters", nil).ToJSONString(), 401) - return - } - - newFile.Name = part.FileName() - break - } - } - if file == nil { - ctx.Warning("Missing file from multipart request") - http.Error(resp, common.NewResult("Missing file from multipart request", nil).ToJSONString(), 400) - return - } - if newFile.Name == "" { - ctx.Warning("Missing file name from multipart request") - http.Error(resp, common.NewResult("Missing file name from multipart request", nil).ToJSONString(), 400) - } - ctx.SetFile(newFile.Name) - - // Pipe file data from the request body to a preprocessing goroutine - // - Guess content type - // - Compute md5sum - // - Limit upload size - preprocessReader, preprocessWriter := io.Pipe() - md5Hash := md5.New() - totalBytes := 0 - go func() { - for { - buf := make([]byte, 1024) - bytesRead, err := file.Read(buf) - if err != nil { - if err != io.EOF { - ctx.Warningf("Unable to read data from request body : %s", err) - } - - preprocessWriter.Close() - return - } - - // Detect the content-type using the 512 first bytes - if totalBytes == 0 { - newFile.Type = http.DetectContentType(buf) - ctx.Infof("Got Content-Type : %s", newFile.Type) - } - - // Increment size - totalBytes += bytesRead - - // Compute md5sum - md5Hash.Write(buf[:bytesRead]) - - // Check upload max size limit - if int64(totalBytes) > common.Config.MaxFileSize { - err = ctx.EWarningf("File too big (limit is set to %d bytes)", common.Config.MaxFileSize) - preprocessWriter.CloseWithError(err) - return - } - - // Pass file data to data backend - preprocessWriter.Write(buf[:bytesRead]) - } - }() - - // Save file in the data backend - var backend dataBackend.DataBackend - if upload.Stream { - backend = dataBackend.GetStreamBackend() - } else { - backend = dataBackend.GetDataBackend() - } - backendDetails, err := backend.AddFile(ctx.Fork("save file"), upload, newFile, preprocessReader) - if err != nil { - ctx.Warningf("Unable to save file : %s", err) - http.Error(resp, common.NewResult(fmt.Sprintf("Error saving file %s in upload %s : %s", newFile.Name, upload.ID, err), nil).ToJSONString(), 500) - return - } - - // Fill-in file informations - newFile.CurrentSize = int64(totalBytes) - if upload.Stream { - newFile.Status = "downloaded" - } else { - newFile.Status = "uploaded" - } - newFile.Md5 = fmt.Sprintf("%x", md5Hash.Sum(nil)) - newFile.UploadDate = time.Now().Unix() - newFile.BackendDetails = backendDetails - - // Update upload metadata - upload.Files[newFile.ID] = newFile - err = metadataBackend.GetMetaDataBackend().AddOrUpdateFile(ctx.Fork("update metadata"), upload, newFile) - if err != nil { - ctx.Warningf("Unable to update metadata : %s", err) - http.Error(resp, common.NewResult(fmt.Sprintf("Error adding file %s to upload %s metadata : %s", newFile.Name, upload.ID, err), nil).ToJSONString(), 500) - return - } - - // Remove all private informations (ip, data backend details, ...) before - // sending metadata back to the client - newFile.Sanitize() - - // Print file metadata in the json response. - var json []byte - if json, err = utils.ToJson(newFile); err == nil { - resp.Write(json) - } else { - http.Error(resp, common.NewResult("Unable to serialize response body", nil).ToJSONString(), 500) - } -} - -func removeFileHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("remove file handler", req) - defer ctx.Finalize(err) - - // Check that source IP address is valid - code, err := checkSourceIP(ctx, false) - if err != nil { - http.Error(resp, common.NewResult(err.Error(), nil).ToJSONString(), code) - return - } - - // Get the upload id and file id from the url params - vars := mux.Vars(req) - uploadID := vars["uploadID"] - fileID := vars["fileID"] - if uploadID == "" { - ctx.Warning("Missing upload id") - http.Error(resp, common.NewResult(fmt.Sprintf("Upload %s not found", uploadID), nil).ToJSONString(), 404) - return - } - if fileID == "" { - ctx.Warning("Missing file id") - http.Error(resp, common.NewResult(fmt.Sprintf("File %s not found", fileID), nil).ToJSONString(), 404) - return - } - ctx.SetUpload(uploadID) - - // Retrieve Upload - upload, err := metadataBackend.GetMetaDataBackend().Get(ctx.Fork("get metadata"), uploadID) - if err != nil { - ctx.Warning("Upload not found") - http.Error(resp, common.NewResult(fmt.Sprintf("Upload not %s found", uploadID), nil).ToJSONString(), 404) - return - } - - // Handle basic auth if upload is password protected - err = httpBasicAuth(req, resp, upload) - if err != nil { - ctx.Warningf("Unauthorized : %s", err) - return - } - - // Test if upload is removable - if !upload.Removable { - ctx.Warningf("User tried to remove file %s of an non removeable upload", fileID) - http.Error(resp, common.NewResult("Can't remove files on this upload", nil).ToJSONString(), 401) - return - } - - // Check upload token - if req.Header.Get("X-UploadToken") != upload.UploadToken { - ctx.Warningf("Invalid upload token %s", req.Header.Get("X-UploadToken")) - http.Error(resp, common.NewResult("Invalid upload token in X-UploadToken header", nil).ToJSONString(), 403) - return - } - - // Retrieve file informations in upload - file, ok := upload.Files[fileID] - if !ok { - ctx.Warningf("File not found") - http.Error(resp, common.NewResult(fmt.Sprintf("File %s not found in upload %s", fileID, upload.ID), nil).ToJSONString(), 404) - return - } - - // Set status to removed, and save metadatas - file.Status = "removed" - if err := metadataBackend.GetMetaDataBackend().AddOrUpdateFile(ctx.Fork("update metadata"), upload, file); err != nil { - ctx.Warningf("Error while updating file metadata : %s", err) - http.Error(resp, common.NewResult(fmt.Sprintf("Error while updating file %s metadata in upload %s", file.Name, upload.ID), nil).ToJSONString(), 500) - return - } - - // Remove file from data backend - // Get file in data backend - var backend dataBackend.DataBackend - if upload.Stream { - backend = dataBackend.GetStreamBackend() - } else { - backend = dataBackend.GetDataBackend() - } - if err := backend.RemoveFile(ctx.Fork("remove file"), upload, file.ID); err != nil { - - ctx.Warningf("Error while deleting file : %s", err) - http.Error(resp, common.NewResult(fmt.Sprintf("Error while deleting file %s in upload %s", file.Name, upload.ID), nil).ToJSONString(), 500) - return - } - - // Remove upload if no files anymore - err = RemoveUploadIfNoFileAvailable(ctx, upload) - if err != nil { - ctx.Warningf("Error occured when checking if upload can be removed : %s", err) - } - - // Print upload metadata in the json response. - var json []byte - if json, err = utils.ToJson(upload); err != nil { - ctx.Warningf("Unable to serialize response body : %s", err) - http.Error(resp, common.NewResult("Unable to serialize response body", nil).ToJSONString(), 500) - } - resp.Write(json) -} - -func getConfigurationHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("get configuration handler", req) - defer ctx.Finalize(err) - - // Print configuration in the json response. - var json []byte - if json, err = utils.ToJson(common.Config); err != nil { - ctx.Warningf("Unable to serialize response body : %s", err) - http.Error(resp, common.NewResult("Unable to serialize response body", nil).ToJSONString(), 500) - } - resp.Write(json) -} - -func getVersionHandler(resp http.ResponseWriter, req *http.Request) { - var err error - ctx := common.NewPlikContext("get version handler", req) - defer ctx.Finalize(err) - - // Print version and build informations in the json response. - var json []byte - if json, err = utils.ToJson(common.GetBuildInfo()); err != nil { - ctx.Warningf("Unable to serialize response body : %s", err) - http.Error(resp, common.NewResult("Unable to serialize response body", nil).ToJSONString(), 500) - } - resp.Write(json) -} - // //// Misc functions // -// Check if source IP address is valid and whitelisted -func checkSourceIP(ctx *common.PlikContext, whitelist bool) (code int, err error) { - // Get source IP address from context - sourceIPstr, ok := ctx.Get("RemoteIP") - if !ok || sourceIPstr.(string) == "" { - ctx.Warning("Unable to get source IP address from context") - err = errors.New("Unable to get source IP address") - code = 401 - return - } - - // Parse source IP address - sourceIP := net.ParseIP(sourceIPstr.(string)) - if sourceIP == nil { - ctx.Warningf("Unable to parse source IP address %s", sourceIPstr) - err = errors.New("Unable to parse source IP address") - code = 401 - return - } - - // If needed check that source IP address is in whitelist - if whitelist && len(common.UploadWhitelist) > 0 { - for _, net := range common.UploadWhitelist { - if net.Contains(sourceIP) { - return - } - } - ctx.Warningf("Unauthorized source IP address %s", sourceIPstr) - err = errors.New("Unauthorized source IP address") - code = 403 - } - return -} - -func httpBasicAuth(req *http.Request, resp http.ResponseWriter, upload *common.Upload) (err error) { - if upload.ProtectedByPassword { - if req.Header.Get("Authorization") == "" { - err = errors.New("Missing Authorization header") - } else { - // Basic auth Authorization header must be set to - // "Basic base64("login:password")". Only the md5sum - // of the base64 string is saved in the upload metadata - auth := strings.Split(req.Header.Get("Authorization"), " ") - if len(auth) != 2 { - err = fmt.Errorf("Inavlid Authorization header %s", req.Header.Get("Authorization")) - } - if auth[0] != "Basic" { - err = fmt.Errorf("Inavlid http authorization scheme : %s", auth[0]) - } - var md5sum string - md5sum, err = utils.Md5sum(auth[1]) - if err != nil { - err = fmt.Errorf("Unable to hash credentials : %s", err) - } - if md5sum != upload.Password { - err = errors.New("Invalid credentials") - } - } - if err != nil { - // WWW-Authenticate header tells the client to retry the request - // with valid http basic credentials set in the Authorization headers. - resp.Header().Set("WWW-Authenticate", "Basic realm=\"plik\"") - http.Error(resp, "Please provide valid credentials to download this file", 401) - } - } - return -} - -var userAgents = []string{"wget", "curl", "python-urllib", "libwwww-perl", "php", "pycurl"} - -func redirect(req *http.Request, resp http.ResponseWriter, err error, status int) { - // The web client uses http redirect to get errors - // from http redirect and display a nice HTML error message - // But cli clients needs a clean string response - userAgent := strings.ToLower(req.UserAgent()) - for _, ua := range userAgents { - if strings.HasPrefix(userAgent, ua) { - http.Error(resp, err.Error(), status) - return - } - } - http.Redirect(resp, req, fmt.Sprintf("/#/?err=%s&errcode=%d&uri=%s", err.Error(), status, req.RequestURI), 301) - return -} - // UploadsCleaningRoutine periodicaly remove expired uploads func UploadsCleaningRoutine() { - ctx := common.RootContext().Fork("clean expired uploads") + ctx := juliet.NewContext() for { // Sleep between 2 hours and 3 hours @@ -1041,30 +182,23 @@ func UploadsCleaningRoutine() { } else { // Remove them for _, uploadID := range uploadIds { - ctx.SetUpload(uploadID) log.Infof("Removing expired upload %s", uploadID) // Get upload metadata - childCtx := ctx.Fork("get metadata") - childCtx.AutoDetach() - upload, err := metadataBackend.GetMetaDataBackend().Get(childCtx, uploadID) + upload, err := metadataBackend.GetMetaDataBackend().Get(ctx, uploadID) if err != nil { log.Warningf("Unable to get infos for upload: %s", err) continue } // Remove from data backend - childCtx = ctx.Fork("remove upload data") - childCtx.AutoDetach() - err = dataBackend.GetDataBackend().RemoveUpload(childCtx, upload) + err = dataBackend.GetDataBackend().RemoveUpload(ctx, upload) if err != nil { log.Warningf("Unable to remove upload data : %s", err) continue } // Remove from metadata backend - childCtx = ctx.Fork("remove upload metadata") - childCtx.AutoDetach() - err = metadataBackend.GetMetaDataBackend().Remove(childCtx, upload) + err = metadataBackend.GetMetaDataBackend().Remove(ctx, upload) if err != nil { log.Warningf("Unable to remove upload metadata : %s", err) } @@ -1072,33 +206,3 @@ func UploadsCleaningRoutine() { } } } - -// RemoveUploadIfNoFileAvailable iterates on upload files and remove upload files -// and metadata if all the files have been downloaded (usefull for OneShot uploads) -func RemoveUploadIfNoFileAvailable(ctx *common.PlikContext, upload *common.Upload) (err error) { - // Test if there are remaining files - filesInUpload := len(upload.Files) - for _, f := range upload.Files { - if f.Status == "downloaded" { - filesInUpload-- - } - } - - if filesInUpload == 0 { - - ctx.Debugf("No more files in upload. Removing all informations.") - - if !upload.Stream { - err = dataBackend.GetDataBackend().RemoveUpload(ctx, upload) - if err != nil { - return - } - } - err = metadataBackend.GetMetaDataBackend().Remove(ctx, upload) - if err != nil { - return - } - } - - return -} diff --git a/server/plik_test.go b/server/plik_test.go index 029dad00..b32ba2f5 100644 --- a/server/plik_test.go +++ b/server/plik_test.go @@ -135,20 +135,27 @@ func TestOneShot(t *testing.T) { test("getFile", upload, file, 404, t) } -func TestRemovable(t *testing.T) { +func TestRemoveUpload(t *testing.T) { upload := createUpload(&common.Upload{}, t) - uploadRemovable := createUpload(&common.Upload{Removable: true}, t) - file := uploadFile(upload, "test", "", readerForUpload, t) - fileRemovable := uploadFile(uploadRemovable, "test", "", readerForUpload, t) + // Remove upload + test("removeUpload", upload, nil, 200, t) + + // Get removed upload + test("getUpload", upload, nil, 404, t) +} - // Should fail on classic upload - test("removeFile", upload, file, 401, t) +func TestRemoveFile(t *testing.T) { + upload := createUpload(&common.Upload{}, t) + uploadRemovable := createUpload(&common.Upload{}, t) - // Should work on removable upload + uploadFile(upload, "test", "", readerForUpload, t) + fileRemovable := uploadFile(uploadRemovable, "test", "", readerForUpload, t) + + // Remove file test("removeFile", uploadRemovable, fileRemovable, 200, t) - // Test if it worked on removable + // Test if file has been removed test("getFile", uploadRemovable, fileRemovable, 404, t) } @@ -378,6 +385,31 @@ func getUpload(uploadID string) (httpCode int, upload *common.Upload, err error) return } +func removeUpload(upload *common.Upload) (httpCode int, err error) { + var URL *url.URL + URL, err = url.Parse(plikURL + "/upload/" + upload.ID) + if err != nil { + return + } + + var req *http.Request + req, err = http.NewRequest("DELETE", URL.String(), nil) + if err != nil { + return + } + + req.Header.Set("X-UploadToken", upload.UploadToken) + req.Header.Set("User-Agent", "curl") + + resp, err := client.Do(req) + if err != nil { + return + } + httpCode = resp.StatusCode + + return +} + func getFile(upload *common.Upload, file *common.File) (httpCode int, content string, err error) { var URL *url.URL @@ -460,6 +492,22 @@ func test(action string, upload *common.Upload, file *common.File, expectedHTTPC t.Logf(" -> Got a %d. Good", code) } + case "removeUpload": + + t.Logf("Try to %s on upload %s. We should get a %d : ", action, upload.ID, expectedHTTPCode) + + code, err := removeUpload(upload) + if err != nil { + t.Fatalf("Failed to remove upload : %s", err) + } + + // Test code + if code != expectedHTTPCode { + t.Fatalf("We got http code %d on action %s on upload %s. We expected %d", code, action, upload.ID, expectedHTTPCode) + } else { + t.Logf(" -> Got a %d. Good", code) + } + case "getFile": t.Logf("Try to %s file %s on upload %s. We should get a %d : ", action, file.Name, upload.ID, expectedHTTPCode) diff --git a/server/plikd.cfg b/server/plikd.cfg index 681d613f..e0cce907 100644 --- a/server/plikd.cfg +++ b/server/plikd.cfg @@ -7,7 +7,7 @@ # Global params # -LogLevel = "INFO" # Other levels : DEBUG, WARNING, CRITICAL, FATAL +LogLevel = "DEBUG" # Other levels : DEBUG, INFO, WARNING, CRITICAL, FATAL ListenPort = 8080 ListenAddress = "0.0.0.0" MaxFileSize = 10737418240 # 10GB @@ -26,11 +26,17 @@ YubikeyAPISecret = "" # Yubikey API Token SourceIpHeader = "" # If behind reverse proxy (expl: X-FORWARDED-FOR) UploadWhitelist = [] # Restrict upload to one or more ip range +Authentication = false # Enable authentication +GoogleApiClientID = "" # Google api client ID +GoogleApiSecret = "" # Google api client secret +OvhApiKey = "" # OVH api application key +OvhApiSecret = "" # OVH api application secret + # # Backend choices # -MetadataBackend = "file" # Available : file, mongo +MetadataBackend = "bolt" # Available : bolt, mongo, file (deprecated) DataBackend = "file" # Available : file, swift, weedfs ShortenBackend = "" # Available : is.gd, w000t.me StreamMode = true # Enable stream mode diff --git a/server/public/css/style.css b/server/public/css/style.css index 87020e12..226de54c 100644 --- a/server/public/css/style.css +++ b/server/public/css/style.css @@ -71,11 +71,16 @@ body { background-size: cover; } -.col-centered{ +.col-centered { float: none; margin: 0 auto; } +.auth-btn { + display:inline-block; + padding:5px; +} + /* Background */ .wallpaper { @@ -267,4 +272,10 @@ header a:hover, header a:visited, header a:link, header a:active{ .client-max-width { max-width:550px; +} + +/* Tokens */ +.token-speach { + //font-size: 12px; + font-family: sans-serif; } \ No newline at end of file diff --git a/server/public/css/water_drop.css b/server/public/css/water_drop.css index d122beb6..20c84c07 100644 --- a/server/public/css/water_drop.css +++ b/server/public/css/water_drop.css @@ -1,3 +1,29 @@ +/* +The MIT License (MIT) + +Copyright (c) <2015> +- Mathieu Bodjikian +- Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + .pulse1 { position: fixed; width: 100px; diff --git a/server/public/index.html b/server/public/index.html index 5f6d90f9..6dfcac56 100644 --- a/server/public/index.html +++ b/server/public/index.html @@ -45,19 +45,27 @@ @@ -169,7 +169,7 @@
-
+
@@ -230,7 +230,7 @@ -
diff --git a/server/public/partials/token.html b/server/public/partials/token.html new file mode 100644 index 00000000..7809d2bb --- /dev/null +++ b/server/public/partials/token.html @@ -0,0 +1,127 @@ + +
+
+
+
+
+
+

+ Plik tokens allow you to upload files without source IP restriction. +

    +
  • + Tokens can only be generated from a valid source IP. +
  • +
  • + You can save a token to the local storage of your web browser + by clicking the remember button. +
  • +
  • + If you are using the command line client + you can use a token by adding a Token = "xxxx" line to your ~/.plikrc file. +
  • +
  • + You can list all uploads owned by a token with the browse button. +
  • +
+

+
+
+
+
+
+ + +
+ +
+
+
+

+
+ +
+ +
+ +
+ +
+ +
+ +
+
+
+
+
+
+
+ +
+
+
+
+
+ + {{ upload.id }} +
+ + uploaded : {{ upload.uploadDate * 1000 | date:'medium' }} +
+ + expire : {{ upload.ttl == -1 ? 'never expire' : (upload.uploadDate + upload.ttl) * 1000 | date:'medium' }} +
+
+
+ {{ file.fileName }} + + {{ humanReadableSize(file.fileSize) }} + +
+
+
+ + +
+
+
+
+
\ No newline at end of file diff --git a/server/shortenBackend/isgd/isgd.go b/server/shortenBackend/isgd/isgd.go index b1851710..544fdde0 100644 --- a/server/shortenBackend/isgd/isgd.go +++ b/server/shortenBackend/isgd/isgd.go @@ -40,6 +40,7 @@ import ( "strings" "time" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" ) @@ -63,13 +64,13 @@ func NewIsGdShortenBackend(_ map[string]interface{}) *ShortenBackendIsGd { } // Shorten implementation for is.gd shorten backend -func (sb *ShortenBackendIsGd) Shorten(ctx *common.PlikContext, longURL string) (shortURL string, err error) { - defer ctx.Finalize(err) +func (sb *ShortenBackendIsGd) Shorten(ctx *juliet.Context, longURL string) (shortURL string, err error) { + log := common.GetLogger(ctx) // Request short url resp, err := client.Get(sb.URL + "&url=" + url.QueryEscape(longURL)) if err != nil { - err = ctx.EWarningf("Unable to request short url from is.gd : %s", err) + err = log.EWarningf("Unable to request short url from is.gd : %s", err) return } defer resp.Body.Close() @@ -77,16 +78,16 @@ func (sb *ShortenBackendIsGd) Shorten(ctx *common.PlikContext, longURL string) ( // Read response body respBody, err := ioutil.ReadAll(resp.Body) if err != nil { - err = ctx.EWarningf("Unable to read response from is.gd : %s", err) + err = log.EWarningf("Unable to read response from is.gd : %s", err) return } // Got url ? :) if !strings.HasPrefix(string(respBody), "http") { - err = ctx.EWarningf("Invalid response from is.gd") + err = log.EWarningf("Invalid response from is.gd") return } - ctx.Infof("Shortlink successfully created : %s", string(respBody)) + log.Infof("Shortlink successfully created : %s", string(respBody)) return string(respBody), nil } diff --git a/server/shortenBackend/shortenBackend.go b/server/shortenBackend/shortenBackend.go index 4ab21994..78f0686a 100644 --- a/server/shortenBackend/shortenBackend.go +++ b/server/shortenBackend/shortenBackend.go @@ -30,6 +30,7 @@ THE SOFTWARE. package shortenBackend import ( + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/common" "github.com/root-gg/plik/server/shortenBackend/isgd" "github.com/root-gg/plik/server/shortenBackend/w000t" @@ -40,7 +41,7 @@ var shortenBackend ShortenBackend // ShortenBackend interface describes methods that shorten backends // must implements to be compatible with plik. type ShortenBackend interface { - Shorten(ctx *common.PlikContext, longURL string) (string, error) + Shorten(ctx *juliet.Context, longURL string) (string, error) } // GetShortenBackend is a singleton pattern. @@ -62,7 +63,7 @@ func Initialize() { case "is.gd": shortenBackend = isgd.NewIsGdShortenBackend(common.Config.ShortenBackendConfig) default: - common.Log().Fatalf("Invalid shorten backend %s", common.Config.DataBackend) + common.Logger().Fatalf("Invalid shorten backend %s", common.Config.DataBackend) } } } diff --git a/server/shortenBackend/w000t/w000t.go b/server/shortenBackend/w000t/w000t.go index e05a6d58..450f3cd5 100644 --- a/server/shortenBackend/w000t/w000t.go +++ b/server/shortenBackend/w000t/w000t.go @@ -39,6 +39,7 @@ import ( "strings" "time" + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/utils" "github.com/root-gg/plik/server/common" ) @@ -65,15 +66,15 @@ func NewW000tMeShortenBackend(config map[string]interface{}) *ShortenBackendW000 } // Shorten implementation for w000t.me shorten backend -func (sb *ShortenBackendW000t) Shorten(ctx *common.PlikContext, longURL string) (shortURL string, err error) { - defer ctx.Finalize(err) +func (sb *ShortenBackendW000t) Shorten(ctx *juliet.Context, longURL string) (shortURL string, err error) { + log := common.GetLogger(ctx) // Request short url str := `{"w000t":{"long_url":"` + longURL + `", "status":"hidden"}, "token":"` + sb.Token + `" }` b := strings.NewReader(str) resp, err := client.Post(sb.URL, "application/json", b) if err != nil { - err = ctx.EWarningf("Unable to request short url from w000t.me : %s", err) + err = log.EWarningf("Unable to request short url from w000t.me : %s", err) return } defer resp.Body.Close() @@ -81,16 +82,16 @@ func (sb *ShortenBackendW000t) Shorten(ctx *common.PlikContext, longURL string) // Read response body respBody, err := ioutil.ReadAll(resp.Body) if err != nil { - err = ctx.EWarningf("Unable to read response from w000t.me : %s", err) + err = log.EWarningf("Unable to read response from w000t.me : %s", err) return } // Got url ? :) if !strings.HasPrefix(string(respBody), "http") { - err = ctx.EWarningf("Invalid response from w000t.me") + err = log.EWarningf("Invalid response from w000t.me") return } - ctx.Infof("Shortlink successfully created : %s", string(respBody)) + log.Infof("Shortlink successfully created : %s", string(respBody)) return string(respBody), nil } diff --git a/utils/file2bolt.go b/utils/file2bolt.go new file mode 100644 index 00000000..c37d6b72 --- /dev/null +++ b/utils/file2bolt.go @@ -0,0 +1,113 @@ +/** + + Plik upload server + +The MIT License (MIT) + +Copyright (c) <2015> + - Mathieu Bodjikian + - Charles-Antoine Mathieu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +**/ + +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + + "github.com/root-gg/plik/server/Godeps/_workspace/src/github.com/root-gg/juliet" + "github.com/root-gg/plik/server/metadataBackend/bolt" + "github.com/root-gg/plik/server/metadataBackend/file" +) + +// This script migrate upload metadata from file backend to Bolt backend +// +// go run file2bolt.go --directory ../files --db ../plik.db +// [02/01/2016 22:00:48][INFO][bolt.go:164 Create] Upload metadata successfully saved +// [02/01/2016 22:00:48][INFO][bolt.go:164 Create] Upload metadata successfully saved +// 2 upload imported +// +// Some .config "no such file or directory" errors are normal if you already switched to Bolt metadata backend +// while using the file data backend as it will create upload directories but not .config files. + +func main() { + // Parse command line arguments + var directoryPath = flag.String("directory", "../files", "File metadatabackend base path") + var dbPath = flag.String("db", "../plik.db", "Bold db path") + flag.Parse() + + if *directoryPath == "" || *dbPath == "" { + fmt.Println("usage : file2bolt --directory path --db path") + os.Exit(1) + } + + // Initialize File metadata backend + fileConfig := map[string]interface{}{"Directory": *directoryPath} + fmb := file.NewFileMetadataBackend(fileConfig) + + // Initialize Bolt metadata backend + boltConfig := map[string]interface{}{"Path": *dbPath} + bmb := bolt.NewBoltMetadataBackend(boltConfig) + + counter := 0 + + // upload ids are the name of the second level of directories of the file metadata backend + dirs1, err := ioutil.ReadDir(*directoryPath) + if err != nil { + fmt.Printf("Unable to open directory %s : %s\n", *directoryPath, err) + os.Exit(1) + } + for _, dir1 := range dirs1 { + if dir1.IsDir() { + path := *directoryPath + "/" + dir1.Name() + dirs2, err := ioutil.ReadDir(path) + if err != nil { + fmt.Printf("Unable to open directory %s : %s\n", path, err) + os.Exit(1) + } + for _, dir2 := range dirs2 { + if dir2.IsDir() { + uploadID := dir2.Name() + + // Load upload from file metadata backend + upload, err := fmb.Get(juliet.NewContext(), uploadID) + if err != nil { + fmt.Printf("Unable to load upload %s : %s\n", uploadID, err) + continue + } + + // Save upload to bolt metadata backend + err = bmb.Create(juliet.NewContext(), upload) + if err != nil { + fmt.Printf("Unable to save upload %s : %s\n", uploadID, err) + continue + } + + counter++ + } + } + } + } + + fmt.Printf("%d upload imported\n", counter) +}
-
+
diff --git a/server/public/js/app.js b/server/public/js/app.js index 54e09679..11d7bc97 100644 --- a/server/public/js/app.js +++ b/server/public/js/app.js @@ -1,4 +1,5 @@ -/* The MIT License (MIT) +/* + The MIT License (MIT) Copyright (c) <2015> - Mathieu Bodjikian @@ -20,7 +21,8 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. */ + THE SOFTWARE. + */ // Editable file name directive angular.module('contentEditable', []). @@ -127,14 +129,15 @@ angular.module('api', ['ngFileUpload']). var api = {base: ''}; // Make the actual HTTP call and return a promise - api.call = function (url, method, params, uploadToken) { + api.call = function (url, method, params, data, uploadToken) { var promise = $q.defer(); var headers = {}; if (uploadToken) headers['X-UploadToken'] = uploadToken; $http({ url: url, method: method, - data: params, + params: params, + data: data, headers: headers }) .success(function (data) { @@ -151,8 +154,8 @@ angular.module('api', ['ngFileUpload']). api.upload = function (url, file, params, progress_cb, basicAuth, uploadToken) { var promise = $q.defer(); var headers = {}; - if (basicAuth) headers['Authorization'] = "Basic " + basicAuth; if (uploadToken) headers['X-UploadToken'] = uploadToken; + if (basicAuth) headers['Authorization'] = "Basic " + basicAuth; Upload .upload({ url: url, @@ -175,56 +178,149 @@ angular.module('api', ['ngFileUpload']). }; // Get upload metadata - api.getUpload = function (uploadId) { + api.getUpload = function(uploadId, uploadToken) { var url = api.base + '/upload/' + uploadId; - return api.call(url, 'GET', {}); + return api.call(url, 'GET', {}, {}, uploadToken); }; // Create an upload with current settings - api.createUpload = function (upload) { + api.createUpload = function(upload) { var url = api.base + '/upload'; - return api.call(url, 'POST', upload); + return api.call(url, 'POST', {}, upload); + }; + + // Remove an upload + api.removeUpload = function(upload) { + var url = api.base + '/upload/' + upload.id; + return api.call(url, 'DELETE', {}, {}, upload.uploadToken); }; // Upload a file - api.uploadFile = function (upload, file, progres_cb, basicAuth) { + api.uploadFile = function(upload, file, progres_cb, basicAuth) { var mode = upload.stream ? "stream" : "file"; var url = api.base + '/' + mode + '/' + upload.id + '/' + file.metadata.id + '/' + file.metadata.fileName; return api.upload(url, file, null, progres_cb, basicAuth, upload.uploadToken); }; // Remove a file - api.removeFile = function (upload, file) { + api.removeFile = function(upload, file) { var mode = upload.stream ? "stream" : "file"; var url = api.base + '/' + mode + '/' + upload.id + '/' + file.metadata.id + '/' + file.metadata.fileName; - return api.call(url, 'DELETE', {}, upload.uploadToken); + return api.call(url, 'DELETE', {}, {}, upload.uploadToken); + }; + + // Log in + api.login = function(provider) { + var url = api.base + '/auth/'+ provider + '/login' ; + return api.call(url, 'GET'); + }; + + // Log out + api.logout = function() { + var url = api.base + '/auth/logout' ; + return api.call(url, 'GET'); + }; + + // Get user info + api.getUser = function() { + var url = api.base + '/me'; + return api.call(url, 'GET'); + }; + + // Get upload metadata + api.getUploads = function(token, size, offset) { + var url = api.base + '/me/uploads'; + return api.call(url, 'GET', { token : token, size : size, offset : offset }); + }; + + // Remove uploads + api.deleteUploads = function(token) { + var url = api.base + '/me/uploads'; + return api.call(url, 'DELETE', { token : token }); + }; + + // Delete account + api.deleteAccount = function() { + var url = api.base + '/me'; + return api.call(url, 'DELETE'); + }; + + // Create a new upload token + api.createToken = function(comment) { + var url = api.base + '/me/token'; + return api.call(url, 'POST', {}, { comment : comment }); + }; + + // Revoke an upload token + api.revokeToken = function(token) { + var url = api.base + '/me/token/' + token; + return api.call(url, 'DELETE'); }; // Get server version api.getVersion = function() { var url = api.base + '/version'; - return api.call(url, 'GET', {}); + return api.call(url, 'GET'); }; // Get server config api.getConfig = function() { var url = api.base + '/config'; - return api.call(url, 'GET', {}); + return api.call(url, 'GET'); }; return api; }); +// Config Service +angular.module('config', ['api']). + factory('$config', function ($rootScope, $api) { + var module = { + config : $api.getConfig(), + user : $api.getUser() + }; + + // Return config promise + module.getConfig = function(){ + return module.config; + }; + + // Refresh config promise and notify listeners (top menu) + module.refreshConfig = function(){ + module.config = $api.getConfig(); + $rootScope.$broadcast('config_refreshed', module.config); + return module.config; + }; + + // Return user promise + module.getUser = function(){ + return module.user; + }; + + // Refresh user promise and notify listeners (top menu) + module.refreshUser = function(){ + module.user = $api.getUser(); + $rootScope.$broadcast('user_refreshed', module.user); + return module.user; + }; + + return module; + }); + // Plik app bootstrap and global configuration -angular.module('plik', ['ngRoute', 'api', 'dialog', 'contentEditable', 'btford.markdown']) +angular.module('plik', ['ngRoute', 'api', 'config', 'dialog', 'contentEditable', 'btford.markdown']) .config(function ($routeProvider) { $routeProvider .when('/', {controller: MainCtrl, templateUrl: 'partials/main.html', reloadOnSearch: false}) .when('/clients', {controller: ClientListCtrl, templateUrl: 'partials/clients.html'}) + .when('/login', {controller: LoginCtrl, templateUrl: 'partials/login.html'}) + .when('/home', {controller: HomeCtrl, templateUrl: 'partials/home.html'}) .otherwise({redirectTo: '/'}); }) .config(['$httpProvider', function ($httpProvider) { $httpProvider.defaults.headers.common['X-ClientApp'] = 'web_client'; + $httpProvider.defaults.xsrfCookieName = 'plik-xsrf'; + $httpProvider.defaults.xsrfHeaderName = 'X-XRSFToken'; }]) .filter('collapseClass', function () { return function (opened) { @@ -233,8 +329,44 @@ angular.module('plik', ['ngRoute', 'api', 'dialog', 'contentEditable', 'btford.m } }); +function MenuCtrl($rootScope, $scope, $config){ + // Get server config + $config.getConfig() + .then(function (config) { + $scope.config = config; + }); + + // Refresh config + $rootScope.$on("config_refreshed", function(event, config){ + config + .then(function (c) { + $scope.config = c; + }) + .then(null, function () { + $scope.config = null; + }); + }); + + // Get user from session + $config.getUser() + .then(function (user) { + $scope.user = user; + }); + + // Refresh user + $rootScope.$on("user_refreshed", function(event, user){ + user + .then(function (u) { + $scope.user = u; + }) + .then(null, function () { + $scope.user = null; + }); + }); +} + // Main controller -function MainCtrl($scope, $dialog, $route, $location, $api) { +function MainCtrl($scope, $api, $config, $route, $location, $dialog) { $scope.sortField = 'metadata.fileName'; $scope.sortOrder = false; @@ -244,7 +376,7 @@ function MainCtrl($scope, $dialog, $route, $location, $api) { $scope.password = false; // Get server config - $api.getConfig() + $config.getConfig() .then(function (config) { $scope.config = config; $scope.setDefaultTTL(); @@ -280,7 +412,6 @@ function MainCtrl($scope, $dialog, $route, $location, $api) { } else { // Load current upload id $scope.load($location.search().id); - $scope.upload.uploadToken = $location.search().uploadToken; } }; @@ -288,7 +419,7 @@ function MainCtrl($scope, $dialog, $route, $location, $api) { $scope.load = function (id) { if (!id) return; $scope.upload.id = id; - $api.getUpload($scope.upload.id) + $api.getUpload($scope.upload.id, $location.search().uploadToken) .then(function (upload) { _.extend($scope.upload, upload); $scope.files = _.map($scope.upload.files, function (file) { @@ -487,9 +618,24 @@ function MainCtrl($scope, $dialog, $route, $location, $api) { }); }; + // Remove the whole upload + // Remove a file from the servers + $scope.removeUpload = function () { + if (!$scope.upload.removable && !$scope.upload.admin) return; + $api.removeUpload($scope.upload) + .then(function () { + // Redirect to main page + $location.search('id', null); + $route.reload(); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; + // Remove a file from the servers - $scope.delete = function (file) { - if (!$scope.upload.uploadToken) return; + $scope.deleteFile = function (file) { + if (!$scope.upload.removable && !$scope.upload.admin) return; $api.removeFile($scope.upload, file) .then(function () { $scope.files = _.reject($scope.files, function (f) { @@ -522,7 +668,7 @@ function MainCtrl($scope, $dialog, $route, $location, $api) { url += "?dl=1"; } - return url; + return encodeURI(url); }; // Return QR Code image url @@ -738,7 +884,6 @@ function MainCtrl($scope, $dialog, $route, $location, $api) { var d = new Date(($scope.upload.ttl + $scope.upload.uploadDate) * 1000); return "expire the " + d.toLocaleDateString() + " at " + d.toLocaleTimeString(); } - }; // Add upload token in url so one can add/remove files later @@ -767,20 +912,258 @@ function ClientListCtrl($scope, $api, $dialog) { }); } +// Login controller +function LoginCtrl($scope, $api, $config, $location, $dialog){ + // Get server config + $config.getConfig() + .then(function (config) { + $scope.config = config; + // Check if token authentication is enabled server side + if ( ! config.authentication ) { + $location.path('/'); + } + }) + .then(null, function (error) { + if (error.status != 401 && error.status != 403) { + $dialog.alert(error); + } + }); + + // Get user from session + $config.getUser() + .then(function () { + $location.path('/home'); + }) + .then(null, function (error) { + if (error.status != 401 && error.status != 403) { + $dialog.alert(error); + } + }); + + // Google authentication + $scope.google = function(){ + $api.login("google") + .then(function (url) { + // Redirect to Google user consent dialog + window.location.replace(url); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; + + // OVH authentication + $scope.ovh = function(){ + $api.login("ovh") + .then(function (url) { + // Redirect to OVH user consent dialog + window.location.replace(url); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; +} + +// Token controller +function HomeCtrl($scope, $api, $config, $dialog, $location) { + + $scope.display = 'uploads'; + $scope.displayUploads = function(token){ + $scope.uploads = []; + $scope.token = token; + $scope.display = 'uploads'; + $scope.refreshUser(); + }; + + $scope.displayTokens = function(){ + $scope.display = 'tokens'; + $scope.refreshUser(); + }; + + // Get server config + $config.config + .then(function (config) { + // Check if token authentication is enabled server side + if ( ! config.authentication ) { + $location.path('/'); + } + }) + .then(null, function (error) { + $dialog.alert(error); + }); + + // Handle user promise + var loadUser = function(promise) { + promise.then(function (user) { + $scope.user = user; + $scope.getUploads(); + }) + .then(null, function (error) { + if (error.status == 401 || error.status == 403) { + $location.path('/login'); + } else { + $dialog.alert(error); + } + }); + }; + + // Refresh user + $scope.refreshUser = function(){ + loadUser($config.refreshUser()); + }; + + // Get user upload list + $scope.getUploads = function(more){ + if (!more) { + $scope.uploads = []; + } + + $scope.size = 50; + $scope.offset = $scope.uploads.length; + $scope.more = false; + + // Get user uploads + $api.getUploads($scope.token, $scope.size, $scope.offset) + .then(function (uploads) { + $scope.uploads = $scope.uploads.concat(uploads); + $scope.more = uploads.length == $scope.size; + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; + + // Remove an upload + $scope.deleteUpload = function(upload){ + $api.removeUpload(upload) + .then(function(){ + $scope.uploads = _.reject($scope.uploads,function(u){ + return u.id == upload.id; + }); + }) + .then(null, function(error) { + $dialog.alert(error); + }); + }; + + // Delete all user uploads + $scope.deleteUploads = function(){ + $api.deleteUploads($scope.token) + .then(function (result) { + $scope.uploads = []; + $scope.getUploads(); + $dialog.alert(result); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; + + // Generate a new token + $scope.createToken = function(comment){ + $api.createToken(comment) + .then(function () { + $scope.refreshUser(); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; + + // Revoke a token + $scope.revokeToken = function(token){ + $dialog.alert({ + title : "Really ?", + message : "Revoking a token will not delete associated uploads.", + confirm : true, + callback : function(result){ + if (result) { + $api.revokeToken(token.token) + .then(function () { + $scope.refreshUser(); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + } + } + }); + }; + + // Log out + $scope.logout = function(){ + $api.logout() + .then(function () { + $config.refreshUser(); + $location.path('/'); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + }; + + // Sign out + $scope.deleteAccount = function(){ + $dialog.alert({ + title : "Really ?", + message : "Deleting your account will not delete your uploads.", + confirm : true, + callback : function(result){ + if (result) { + $api.deleteAccount() + .then(function () { + $config.refreshUser(); + $location.path('/'); + }) + .then(null, function (error) { + $dialog.alert(error); + }); + } + } + }); + }; + + // Get upload url + $scope.getUploadUrl = function(upload){ + return location.origin + '/#/?id=' + upload.id; + }; + + // Get file url + $scope.getFileUrl = function(upload,file){ + return location.origin + '/file/' + upload.id + '/' + file.id + '/' + file.fileName; + }; + + // Compute human readable size + $scope.humanReadableSize = function (size) { + if (_.isUndefined(size)) return; + return filesize(size, {base: 2}); + }; + + loadUser($config.getUser()); +} + // Alert modal dialog controller function AlertDialogController($rootScope, $scope, $modalInstance, args) { $rootScope.registerDialog($scope); - $scope.title = 'Success !'; - if (args.data.status != 100) $scope.title = 'Oops !'; + _.extend($scope,args.data); - $scope.data = args.data; + if (!$scope.title) { + if ($scope.status) { + if ($scope.status == 100) { + $scope.title = 'Success !'; + } else { + $scope.title = 'Oops ! (' + $scope.status + ')'; + } + } + } $scope.close = function (result) { $rootScope.dismissDialog($scope); $modalInstance.close(result); - if (args.callback) { - args.callback(result); + if ($scope.callback) { + $scope.callback(result); } }; } diff --git a/server/public/partials/alert.html b/server/public/partials/alert.html index ad8422df..9552568d 100644 --- a/server/public/partials/alert.html +++ b/server/public/partials/alert.html @@ -24,14 +24,18 @@ THE SOFTWARE. --> - @@ -104,7 +104,7 @@ @@ -122,13 +122,13 @@ - - + +